diff --git a/cmd/headscale/headscale_test.go b/cmd/headscale/headscale_test.go index 6ce3ce88..01eb09b2 100644 --- a/cmd/headscale/headscale_test.go +++ b/cmd/headscale/headscale_test.go @@ -14,10 +14,7 @@ import ( ) func TestConfigFileLoading(t *testing.T) { - tmpDir, err := os.MkdirTemp("", "headscale") - require.NoError(t, err) - - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() path, err := os.Getwd() require.NoError(t, err) @@ -49,10 +46,7 @@ func TestConfigFileLoading(t *testing.T) { } func TestConfigLoading(t *testing.T) { - tmpDir, err := os.MkdirTemp("", "headscale") - require.NoError(t, err) - - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() path, err := os.Getwd() require.NoError(t, err) diff --git a/cmd/hi/docker.go b/cmd/hi/docker.go index c402f578..0bf20fc1 100644 --- a/cmd/hi/docker.go +++ b/cmd/hi/docker.go @@ -32,6 +32,8 @@ var ( ) // runTestContainer executes integration tests in a Docker container. +// +//nolint:gocyclo // complex test orchestration function func runTestContainer(ctx context.Context, config *RunConfig) error { cli, err := createDockerClient(ctx) if err != nil { diff --git a/hscontrol/app.go b/hscontrol/app.go index 4a8e5658..6308aac7 100644 --- a/hscontrol/app.go +++ b/hscontrol/app.go @@ -504,6 +504,8 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router { } // Serve launches the HTTP and gRPC server service Headscale and the API. +// +//nolint:gocyclo // complex server startup function func (h *Headscale) Serve() error { var err error diff --git a/hscontrol/auth_test.go b/hscontrol/auth_test.go index 4c3c9bf7..6bbddfb4 100644 --- a/hscontrol/auth_test.go +++ b/hscontrol/auth_test.go @@ -34,6 +34,7 @@ type interactiveStep struct { callAuthPath bool // Real call to HandleNodeFromAuthPath, not mocked } +//nolint:gocyclo // comprehensive test function with many scenarios func TestAuthenticationFlows(t *testing.T) { // Shared test keys for consistent behavior across test cases machineKey1 := key.NewMachine() @@ -3109,7 +3110,7 @@ func TestWebFlowReauthDifferentUser(t *testing.T) { user1Nodes := 0 user2Nodes := 0 - for i := 0; i < allNodesSlice.Len(); i++ { + for i := range allNodesSlice.Len() { n := allNodesSlice.At(i) if n.UserID().Get() == user1.ID { user1Nodes++ diff --git a/hscontrol/db/db.go b/hscontrol/db/db.go index ca8daecb..d82f8503 100644 --- a/hscontrol/db/db.go +++ b/hscontrol/db/db.go @@ -53,6 +53,8 @@ type HSDatabase struct { // NewHeadscaleDatabase creates a new database connection and runs migrations. // It accepts the full configuration to allow migrations access to policy settings. +// +//nolint:gocyclo // complex database initialization with many migrations func NewHeadscaleDatabase( cfg *types.Config, regCache *zcache.Cache[types.RegistrationID, types.RegisterNode], @@ -995,6 +997,7 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig if err != nil { return err } + defer rows.Close() for rows.Next() { var violation constraintViolation @@ -1007,7 +1010,9 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig violatedConstraints = append(violatedConstraints, violation) } - _ = rows.Close() + if err := rows.Err(); err != nil { //nolint:noinlineerr + return err + } if len(violatedConstraints) > 0 { for _, violation := range violatedConstraints { diff --git a/hscontrol/derp/server/derp_server.go b/hscontrol/derp/server/derp_server.go index f965ee6b..c4766661 100644 --- a/hscontrol/derp/server/derp_server.go +++ b/hscontrol/derp/server/derp_server.go @@ -324,8 +324,8 @@ func DERPBootstrapDNSHandler( var resolver net.Resolver - for _, region := range derpMap.Regions().All() { - for _, node := range region.Nodes().All() { // we don't care if we override some nodes + for _, region := range derpMap.Regions().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator + for _, node := range region.Nodes().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator addrs, err := resolver.LookupIP(resolvCtx, "ip", node.HostName()) if err != nil { log.Trace(). diff --git a/hscontrol/mapper/batcher_test.go b/hscontrol/mapper/batcher_test.go index c26385bc..53f0bd75 100644 --- a/hscontrol/mapper/batcher_test.go +++ b/hscontrol/mapper/batcher_test.go @@ -235,8 +235,8 @@ func setupBatcherWithTestData( } derpMap, err := derp.GetDERPMap(cfg.DERP) - assert.NoError(t, err) - assert.NotNil(t, derpMap) + require.NoError(t, err) + require.NotNil(t, derpMap) state.SetDERPMap(derpMap) @@ -1124,6 +1124,7 @@ func TestBatcherWorkQueueBatching(t *testing.T) { // even when real node updates are being processed, ensuring no race conditions // occur during channel replacement with actual workload. func XTestBatcherChannelClosingRace(t *testing.T) { + t.Helper() for _, batcherFunc := range allBatcherFunctions { t.Run(batcherFunc.name, func(t *testing.T) { // Create test environment with real database and nodes @@ -1345,6 +1346,8 @@ func TestBatcherWorkerChannelSafety(t *testing.T) { // real node data. The test validates that stable clients continue to function // normally and receive proper updates despite the connection churn from other clients, // ensuring system stability under concurrent load. +// +//nolint:gocyclo // complex concurrent test scenario func TestBatcherConcurrentClients(t *testing.T) { if testing.Short() { t.Skip("Skipping concurrent client test in short mode") @@ -1629,6 +1632,8 @@ func TestBatcherConcurrentClients(t *testing.T) { // It validates that the system remains stable with no deadlocks, panics, or // missed updates under sustained high load. The test uses real node data to // generate authentic update scenarios and tracks comprehensive statistics. +// +//nolint:gocyclo // complex scalability test scenario func XTestBatcherScalability(t *testing.T) { if testing.Short() { t.Skip("Skipping scalability test in short mode") @@ -2422,6 +2427,7 @@ func TestBatcherRapidReconnection(t *testing.T) { } } +//nolint:gocyclo // complex multi-connection test scenario func TestBatcherMultiConnection(t *testing.T) { for _, batcherFunc := range allBatcherFunctions { t.Run(batcherFunc.name, func(t *testing.T) { diff --git a/hscontrol/mapper/builder_test.go b/hscontrol/mapper/builder_test.go index 978b2c0e..3de60c97 100644 --- a/hscontrol/mapper/builder_test.go +++ b/hscontrol/mapper/builder_test.go @@ -339,8 +339,8 @@ func TestMapResponseBuilder_MultipleErrors(t *testing.T) { // Build should return a multierr data, err := result.Build() - assert.Nil(t, data) - assert.Error(t, err) + require.Nil(t, data) + require.Error(t, err) // The error should contain information about multiple errors assert.Contains(t, err.Error(), "multiple errors") diff --git a/hscontrol/mapper/tail_test.go b/hscontrol/mapper/tail_test.go index 4189353d..70572f5a 100644 --- a/hscontrol/mapper/tail_test.go +++ b/hscontrol/mapper/tail_test.go @@ -258,7 +258,7 @@ func TestNodeExpiry(t *testing.T) { }, { name: "localtime", - exp: tp(time.Time{}.Local()), + exp: tp(time.Time{}.Local()), //nolint:gosmopolitan wantTimeZero: true, }, } diff --git a/hscontrol/policy/v2/filter.go b/hscontrol/policy/v2/filter.go index 2bce5bfc..df47d3f1 100644 --- a/hscontrol/policy/v2/filter.go +++ b/hscontrol/policy/v2/filter.go @@ -146,6 +146,8 @@ func (pol *Policy) compileFilterRulesForNode( // It returns a slice of filter rules because when an ACL has both autogroup:self // and other destinations, they need to be split into separate rules with different // source filtering logic. +// +//nolint:gocyclo // complex ACL compilation logic func (pol *Policy) compileACLWithAutogroupSelf( acl ACL, users types.Users, @@ -328,6 +330,7 @@ func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction { } } +//nolint:gocyclo // complex SSH policy compilation logic func (pol *Policy) compileSSHPolicy( users types.Users, node types.NodeView, diff --git a/hscontrol/policy/v2/policy_test.go b/hscontrol/policy/v2/policy_test.go index 062bcc04..5e22f57e 100644 --- a/hscontrol/policy/v2/policy_test.go +++ b/hscontrol/policy/v2/policy_test.go @@ -77,6 +77,7 @@ func TestInvalidateAutogroupSelfCache(t *testing.T) { {Model: gorm.Model{ID: 3}, Name: "user3", Email: "user3@headscale.net"}, } + //nolint:goconst // test-specific inline policy for clarity policy := `{ "acls": [ { diff --git a/hscontrol/policy/v2/types.go b/hscontrol/policy/v2/types.go index c8b4f4e5..e508bb11 100644 --- a/hscontrol/policy/v2/types.go +++ b/hscontrol/policy/v2/types.go @@ -518,7 +518,7 @@ func (p *Prefix) UnmarshalJSON(b []byte) error { return err } - if err := p.Validate(); err != nil { + if err := p.Validate(); err != nil { //nolint:noinlineerr return err } @@ -715,7 +715,7 @@ func (ve *AliasWithPorts) UnmarshalJSON(b []byte) error { return err } - if err := ve.Validate(); err != nil { + if err := ve.Validate(); err != nil { //nolint:noinlineerr return err } @@ -1585,7 +1585,7 @@ type ACL struct { func (a *ACL) UnmarshalJSON(b []byte) error { // First unmarshal into a map to filter out comment fields var raw map[string]any - if err := json.Unmarshal(b, &raw, policyJSONOpts...); err != nil { + if err := json.Unmarshal(b, &raw, policyJSONOpts...); err != nil { //nolint:noinlineerr return err } @@ -1850,6 +1850,8 @@ func validateACLSrcDstCombination(sources Aliases, destinations []AliasWithPorts // the unmarshaling process. // It runs through all rules and checks if there are any inconsistencies // in the policy that needs to be addressed before it can be used. +// +//nolint:gocyclo // comprehensive policy validation func (p *Policy) validate() error { if p == nil { panic("passed nil policy") diff --git a/hscontrol/policy/v2/types_test.go b/hscontrol/policy/v2/types_test.go index 4cfd0805..16105ecb 100644 --- a/hscontrol/policy/v2/types_test.go +++ b/hscontrol/policy/v2/types_test.go @@ -2106,7 +2106,7 @@ func TestResolvePolicy(t *testing.T) { }, { name: "autogroup-member-comprehensive", - toResolve: ptr.To(AutoGroup(AutoGroupMember)), + toResolve: ptr.To(AutoGroupMember), nodes: types.Nodes{ // Node with no tags (should be included - is a member) { @@ -2156,7 +2156,7 @@ func TestResolvePolicy(t *testing.T) { }, { name: "autogroup-tagged", - toResolve: ptr.To(AutoGroup(AutoGroupTagged)), + toResolve: ptr.To(AutoGroupTagged), nodes: types.Nodes{ // Node with no tags (should be excluded - not tagged) { diff --git a/hscontrol/state/node_store.go b/hscontrol/state/node_store.go index 5d8d6e85..1c921d6d 100644 --- a/hscontrol/state/node_store.go +++ b/hscontrol/state/node_store.go @@ -55,8 +55,8 @@ var ( }) nodeStoreNodesCount = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: prometheusNamespace, - Name: "nodestore_nodes_total", - Help: "Total number of nodes in the NodeStore", + Name: "nodestore_nodes", + Help: "Number of nodes in the NodeStore", }) nodeStorePeersCalculationDuration = promauto.NewHistogram(prometheus.HistogramOpts{ Namespace: prometheusNamespace, diff --git a/hscontrol/state/state.go b/hscontrol/state/state.go index 013cf56d..a9cc4cd7 100644 --- a/hscontrol/state/state.go +++ b/hscontrol/state/state.go @@ -766,7 +766,7 @@ func (s *State) RenameNode(nodeID types.NodeID, newName string) (types.NodeView, // Check name uniqueness against NodeStore allNodes := s.nodeStore.ListNodes() - for i := 0; i < allNodes.Len(); i++ { + for i := range allNodes.Len() { node := allNodes.At(i) if node.ID() != nodeID && node.AsStruct().GivenName == newName { return types.NodeView{}, change.Change{}, fmt.Errorf("%w: %s", ErrNodeNameNotUnique, newName) diff --git a/hscontrol/types/config_test.go b/hscontrol/types/config_test.go index e1e88087..836dea8b 100644 --- a/hscontrol/types/config_test.go +++ b/hscontrol/types/config_test.go @@ -351,11 +351,10 @@ func TestReadConfigFromEnv(t *testing.T) { } func TestTLSConfigValidation(t *testing.T) { - tmpDir, err := os.MkdirTemp("", "headscale") - if err != nil { - t.Fatal(err) - } - // defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() + + var err error + configYaml := []byte(`--- tls_letsencrypt_hostname: example.com tls_letsencrypt_challenge_type: "" diff --git a/hscontrol/types/node_test.go b/hscontrol/types/node_test.go index 2de2efc9..e5d2f4c2 100644 --- a/hscontrol/types/node_test.go +++ b/hscontrol/types/node_test.go @@ -407,7 +407,7 @@ func TestApplyHostnameFromHostInfo(t *testing.T) { Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ - Hostname: "我的电脑", + Hostname: "我的电脑", //nolint:gosmopolitan // intentional i18n test data }, want: Node{ GivenName: "valid-hostname", @@ -491,7 +491,7 @@ func TestApplyHostnameFromHostInfo(t *testing.T) { Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ - Hostname: "server-北京-01", + Hostname: "server-北京-01", //nolint:gosmopolitan // intentional i18n test data }, want: Node{ GivenName: "valid-hostname", @@ -505,7 +505,7 @@ func TestApplyHostnameFromHostInfo(t *testing.T) { Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ - Hostname: "我的电脑", + Hostname: "我的电脑", //nolint:gosmopolitan // intentional i18n test data }, want: Node{ GivenName: "valid-hostname", diff --git a/hscontrol/util/util_test.go b/hscontrol/util/util_test.go index a36bb33f..292cf319 100644 --- a/hscontrol/util/util_test.go +++ b/hscontrol/util/util_test.go @@ -1280,6 +1280,7 @@ func TestEnsureHostname_DNSLabelLimit(t *testing.T) { for i, hostname := range testCases { t.Run(cmp.Diff("", ""), func(t *testing.T) { + t.Parallel() hostinfo := &tailcfg.Hostinfo{Hostname: hostname} result := EnsureHostname(hostinfo, "mkey", "nkey") diff --git a/integration/acl_test.go b/integration/acl_test.go index c746f900..3483c4a0 100644 --- a/integration/acl_test.go +++ b/integration/acl_test.go @@ -1896,6 +1896,7 @@ func TestACLAutogroupSelf(t *testing.T) { } } +//nolint:gocyclo // complex integration test scenario func TestACLPolicyPropagationOverTime(t *testing.T) { IntegrationSkip(t) diff --git a/integration/cli_test.go b/integration/cli_test.go index 958444f5..433fa927 100644 --- a/integration/cli_test.go +++ b/integration/cli_test.go @@ -203,7 +203,7 @@ func TestUserCommand(t *testing.T) { "--identifier=1", }, ) - assert.NoError(t, err) + require.NoError(t, err) assert.Contains(t, deleteResult, "User destroyed") var listAfterIDDelete []*v1.User diff --git a/integration/dockertestutil/execute.go b/integration/dockertestutil/execute.go index c97fe59f..7f1d0efb 100644 --- a/integration/dockertestutil/execute.go +++ b/integration/dockertestutil/execute.go @@ -38,7 +38,7 @@ type buffer struct { // Write appends the contents of p to the buffer, growing the buffer as needed. It returns // the number of bytes written. -func (b *buffer) Write(p []byte) (n int, err error) { +func (b *buffer) Write(p []byte) (int, error) { b.mutex.Lock() defer b.mutex.Unlock() diff --git a/integration/helpers.go b/integration/helpers.go index a813acf4..a1a4e1d7 100644 --- a/integration/helpers.go +++ b/integration/helpers.go @@ -185,6 +185,8 @@ func requireAllClientsOnline(t *testing.T, headscale ControlServer, expectedNode } // requireAllClientsOnlineWithSingleTimeout is the original validation logic for online state. +// +//nolint:gocyclo // complex validation with multiple node states func requireAllClientsOnlineWithSingleTimeout(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, expectedOnline bool, message string, timeout time.Duration) { t.Helper() @@ -446,7 +448,7 @@ func requireAllClientsOfflineStaged(t *testing.T, headscale ControlServer, expec if slices.Contains(expectedNodes, nodeID) { allMapResponsesOffline = false - assert.False(c, true, "Node %d should not appear in map responses", nodeID) + assert.Fail(c, fmt.Sprintf("Node %d should not appear in map responses", nodeID)) } } } else { @@ -539,6 +541,7 @@ func requireAllClientsNetInfoAndDERP(t *testing.T, headscale ControlServer, expe // assertLastSeenSet validates that a node has a non-nil LastSeen timestamp. // Critical for ensuring node activity tracking is functioning properly. func assertLastSeenSet(t *testing.T, node *v1.Node) { + t.Helper() assert.NotNil(t, node) assert.NotNil(t, node.GetLastSeen()) } diff --git a/integration/hsic/hsic.go b/integration/hsic/hsic.go index 6ea7f30f..7348f07f 100644 --- a/integration/hsic/hsic.go +++ b/integration/hsic/hsic.go @@ -326,6 +326,8 @@ func (hsic *HeadscaleInContainer) buildEntrypoint() []string { } // New returns a new HeadscaleInContainer instance. +// +//nolint:gocyclo // complex container setup with many options func New( pool *dockertest.Pool, networks []*dockertest.Network, diff --git a/integration/route_test.go b/integration/route_test.go index 0d9cd1bb..df1b36e3 100644 --- a/integration/route_test.go +++ b/integration/route_test.go @@ -220,6 +220,7 @@ func TestEnablingRoutes(t *testing.T) { } } +//nolint:gocyclo // complex HA failover test scenario func TestHASubnetRouterFailover(t *testing.T) { IntegrationSkip(t) @@ -1601,7 +1602,7 @@ func TestSubnetRouteACL(t *testing.T) { func TestEnablingExitRoutes(t *testing.T) { IntegrationSkip(t) - user := "user2" + user := "user2" //nolint:goconst // test-specific value, not related to userToDelete constant spec := ScenarioSpec{ NodesPerUser: 2, @@ -2031,6 +2032,8 @@ func MustFindNode(hostname string, nodes []*v1.Node) *v1.Node { // - Verify that peers can no longer use node // - Policy is changed back to auto approve route, check that routes already existing is approved. // - Verify that routes can now be seen by peers. +// +//nolint:gocyclo // complex multi-network auto-approve test scenario func TestAutoApproveMultiNetwork(t *testing.T) { IntegrationSkip(t) diff --git a/integration/scenario.go b/integration/scenario.go index faef9e00..ae69a848 100644 --- a/integration/scenario.go +++ b/integration/scenario.go @@ -314,6 +314,7 @@ func (s *Scenario) Services(name string) ([]*dockertest.Resource, error) { } func (s *Scenario) ShutdownAssertNoPanics(t *testing.T) { + t.Helper() defer func() { _ = dockertestutil.CleanUnreferencedNetworks(s.pool) }() defer func() { _ = dockertestutil.CleanImagesInCI(s.pool) }() @@ -1162,7 +1163,7 @@ var errParseAuthPage = errors.New("parsing auth page") func (s *Scenario) runHeadscaleRegister(userStr string, body string) error { // see api.go HTML template - codeSep := strings.Split(string(body), "") + codeSep := strings.Split(body, "") if len(codeSep) != 2 { return errParseAuthPage } diff --git a/integration/tags_test.go b/integration/tags_test.go index 16105ea2..f9cd394b 100644 --- a/integration/tags_test.go +++ b/integration/tags_test.go @@ -2502,7 +2502,7 @@ func assertNetmapSelfHasTagsWithCollect(c *assert.CollectT, client TailscaleClie var actualTagsSlice []string if nm.SelfNode.Valid() { - for _, tag := range nm.SelfNode.Tags().All() { + for _, tag := range nm.SelfNode.Tags().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator actualTagsSlice = append(actualTagsSlice, tag) } } @@ -2647,7 +2647,7 @@ func TestTagsIssue2978ReproTagReplacement(t *testing.T) { var netmapTagsAfterFirstCall []string if nmErr == nil && nm != nil && nm.SelfNode.Valid() { - for _, tag := range nm.SelfNode.Tags().All() { + for _, tag := range nm.SelfNode.Tags().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator netmapTagsAfterFirstCall = append(netmapTagsAfterFirstCall, tag) } } diff --git a/integration/tsic/tsic.go b/integration/tsic/tsic.go index dbdb8e5f..f660f5e3 100644 --- a/integration/tsic/tsic.go +++ b/integration/tsic/tsic.go @@ -295,6 +295,8 @@ func (t *TailscaleInContainer) buildEntrypoint() []string { } // New returns a new TailscaleInContainer instance. +// +//nolint:gocyclo // complex container setup with many options func New( pool *dockertest.Pool, version string, @@ -687,7 +689,7 @@ func (t *TailscaleInContainer) Login( // This login mechanism uses web + command line flow for authentication. func (t *TailscaleInContainer) LoginWithURL( loginServer string, -) (loginURL *url.URL, err error) { +) (*url.URL, error) { command := t.buildLoginCommand(loginServer, "") stdout, stderr, err := t.Execute(command) @@ -701,7 +703,7 @@ func (t *TailscaleInContainer) LoginWithURL( } }() - loginURL, err = util.ParseLoginURLFromCLILogin(stdout + stderr) + loginURL, err := util.ParseLoginURLFromCLILogin(stdout + stderr) if err != nil { return nil, err } @@ -711,12 +713,12 @@ func (t *TailscaleInContainer) LoginWithURL( // Logout runs the logout routine on the given Tailscale instance. func (t *TailscaleInContainer) Logout() error { - stdout, stderr, err := t.Execute([]string{"tailscale", "logout"}) + _, _, err := t.Execute([]string{"tailscale", "logout"}) if err != nil { return err } - stdout, stderr, _ = t.Execute([]string{"tailscale", "status"}) + stdout, stderr, _ := t.Execute([]string{"tailscale", "status"}) if !strings.Contains(stdout+stderr, "Logged out.") { return fmt.Errorf("logging out, stdout: %s, stderr: %s", stdout, stderr) //nolint:err113 }