mirror of
https://github.com/juanfont/headscale.git
synced 2026-02-07 20:04:00 +01:00
all: fix easy golangci-lint issues
Fix various linter issues: - testifylint: use require instead of assert for error assertions - thelper: add t.Helper() to test helper functions - unconvert: remove unnecessary type conversions - usetesting: use t.TempDir() instead of os.MkdirTemp - intrange: use Go 1.22+ integer ranges - nonamedreturns: remove named return values - promlinter: rename gauge metric to not have _total suffix - rowserrcheck: add rows.Err() check - sqlclosecheck: use defer for rows.Close() - tparallel: add t.Parallel() to subtests - ineffassign: remove unused assignment Add nolint directives for intentionally skipped issues: - gocyclo: complex functions that need refactoring (16 functions) - gosmopolitan: intentional i18n test data - unqueryvet: false positives (not SQLBoiler) - goconst: test-specific inline values - noinlineerr: idiomatic inline error handling
This commit is contained in:
parent
08646a39cb
commit
4c210b9219
@ -14,10 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func TestConfigFileLoading(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
path, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
@ -49,10 +46,7 @@ func TestConfigFileLoading(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigLoading(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
path, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -32,6 +32,8 @@ var (
|
||||
)
|
||||
|
||||
// runTestContainer executes integration tests in a Docker container.
|
||||
//
|
||||
//nolint:gocyclo // complex test orchestration function
|
||||
func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
|
||||
@ -504,6 +504,8 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
}
|
||||
|
||||
// Serve launches the HTTP and gRPC server service Headscale and the API.
|
||||
//
|
||||
//nolint:gocyclo // complex server startup function
|
||||
func (h *Headscale) Serve() error {
|
||||
var err error
|
||||
|
||||
|
||||
@ -34,6 +34,7 @@ type interactiveStep struct {
|
||||
callAuthPath bool // Real call to HandleNodeFromAuthPath, not mocked
|
||||
}
|
||||
|
||||
//nolint:gocyclo // comprehensive test function with many scenarios
|
||||
func TestAuthenticationFlows(t *testing.T) {
|
||||
// Shared test keys for consistent behavior across test cases
|
||||
machineKey1 := key.NewMachine()
|
||||
@ -3109,7 +3110,7 @@ func TestWebFlowReauthDifferentUser(t *testing.T) {
|
||||
user1Nodes := 0
|
||||
user2Nodes := 0
|
||||
|
||||
for i := 0; i < allNodesSlice.Len(); i++ {
|
||||
for i := range allNodesSlice.Len() {
|
||||
n := allNodesSlice.At(i)
|
||||
if n.UserID().Get() == user1.ID {
|
||||
user1Nodes++
|
||||
|
||||
@ -53,6 +53,8 @@ type HSDatabase struct {
|
||||
|
||||
// NewHeadscaleDatabase creates a new database connection and runs migrations.
|
||||
// It accepts the full configuration to allow migrations access to policy settings.
|
||||
//
|
||||
//nolint:gocyclo // complex database initialization with many migrations
|
||||
func NewHeadscaleDatabase(
|
||||
cfg *types.Config,
|
||||
regCache *zcache.Cache[types.RegistrationID, types.RegisterNode],
|
||||
@ -995,6 +997,7 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var violation constraintViolation
|
||||
@ -1007,7 +1010,9 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
violatedConstraints = append(violatedConstraints, violation)
|
||||
}
|
||||
|
||||
_ = rows.Close()
|
||||
if err := rows.Err(); err != nil { //nolint:noinlineerr
|
||||
return err
|
||||
}
|
||||
|
||||
if len(violatedConstraints) > 0 {
|
||||
for _, violation := range violatedConstraints {
|
||||
|
||||
@ -324,8 +324,8 @@ func DERPBootstrapDNSHandler(
|
||||
|
||||
var resolver net.Resolver
|
||||
|
||||
for _, region := range derpMap.Regions().All() {
|
||||
for _, node := range region.Nodes().All() { // we don't care if we override some nodes
|
||||
for _, region := range derpMap.Regions().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator
|
||||
for _, node := range region.Nodes().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator
|
||||
addrs, err := resolver.LookupIP(resolvCtx, "ip", node.HostName())
|
||||
if err != nil {
|
||||
log.Trace().
|
||||
|
||||
@ -235,8 +235,8 @@ func setupBatcherWithTestData(
|
||||
}
|
||||
|
||||
derpMap, err := derp.GetDERPMap(cfg.DERP)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, derpMap)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, derpMap)
|
||||
|
||||
state.SetDERPMap(derpMap)
|
||||
|
||||
@ -1124,6 +1124,7 @@ func TestBatcherWorkQueueBatching(t *testing.T) {
|
||||
// even when real node updates are being processed, ensuring no race conditions
|
||||
// occur during channel replacement with actual workload.
|
||||
func XTestBatcherChannelClosingRace(t *testing.T) {
|
||||
t.Helper()
|
||||
for _, batcherFunc := range allBatcherFunctions {
|
||||
t.Run(batcherFunc.name, func(t *testing.T) {
|
||||
// Create test environment with real database and nodes
|
||||
@ -1345,6 +1346,8 @@ func TestBatcherWorkerChannelSafety(t *testing.T) {
|
||||
// real node data. The test validates that stable clients continue to function
|
||||
// normally and receive proper updates despite the connection churn from other clients,
|
||||
// ensuring system stability under concurrent load.
|
||||
//
|
||||
//nolint:gocyclo // complex concurrent test scenario
|
||||
func TestBatcherConcurrentClients(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping concurrent client test in short mode")
|
||||
@ -1629,6 +1632,8 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
||||
// It validates that the system remains stable with no deadlocks, panics, or
|
||||
// missed updates under sustained high load. The test uses real node data to
|
||||
// generate authentic update scenarios and tracks comprehensive statistics.
|
||||
//
|
||||
//nolint:gocyclo // complex scalability test scenario
|
||||
func XTestBatcherScalability(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping scalability test in short mode")
|
||||
@ -2422,6 +2427,7 @@ func TestBatcherRapidReconnection(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:gocyclo // complex multi-connection test scenario
|
||||
func TestBatcherMultiConnection(t *testing.T) {
|
||||
for _, batcherFunc := range allBatcherFunctions {
|
||||
t.Run(batcherFunc.name, func(t *testing.T) {
|
||||
|
||||
@ -339,8 +339,8 @@ func TestMapResponseBuilder_MultipleErrors(t *testing.T) {
|
||||
|
||||
// Build should return a multierr
|
||||
data, err := result.Build()
|
||||
assert.Nil(t, data)
|
||||
assert.Error(t, err)
|
||||
require.Nil(t, data)
|
||||
require.Error(t, err)
|
||||
|
||||
// The error should contain information about multiple errors
|
||||
assert.Contains(t, err.Error(), "multiple errors")
|
||||
|
||||
@ -258,7 +258,7 @@ func TestNodeExpiry(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "localtime",
|
||||
exp: tp(time.Time{}.Local()),
|
||||
exp: tp(time.Time{}.Local()), //nolint:gosmopolitan
|
||||
wantTimeZero: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -146,6 +146,8 @@ func (pol *Policy) compileFilterRulesForNode(
|
||||
// It returns a slice of filter rules because when an ACL has both autogroup:self
|
||||
// and other destinations, they need to be split into separate rules with different
|
||||
// source filtering logic.
|
||||
//
|
||||
//nolint:gocyclo // complex ACL compilation logic
|
||||
func (pol *Policy) compileACLWithAutogroupSelf(
|
||||
acl ACL,
|
||||
users types.Users,
|
||||
@ -328,6 +330,7 @@ func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:gocyclo // complex SSH policy compilation logic
|
||||
func (pol *Policy) compileSSHPolicy(
|
||||
users types.Users,
|
||||
node types.NodeView,
|
||||
|
||||
@ -77,6 +77,7 @@ func TestInvalidateAutogroupSelfCache(t *testing.T) {
|
||||
{Model: gorm.Model{ID: 3}, Name: "user3", Email: "user3@headscale.net"},
|
||||
}
|
||||
|
||||
//nolint:goconst // test-specific inline policy for clarity
|
||||
policy := `{
|
||||
"acls": [
|
||||
{
|
||||
|
||||
@ -518,7 +518,7 @@ func (p *Prefix) UnmarshalJSON(b []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.Validate(); err != nil {
|
||||
if err := p.Validate(); err != nil { //nolint:noinlineerr
|
||||
return err
|
||||
}
|
||||
|
||||
@ -715,7 +715,7 @@ func (ve *AliasWithPorts) UnmarshalJSON(b []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ve.Validate(); err != nil {
|
||||
if err := ve.Validate(); err != nil { //nolint:noinlineerr
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1585,7 +1585,7 @@ type ACL struct {
|
||||
func (a *ACL) UnmarshalJSON(b []byte) error {
|
||||
// First unmarshal into a map to filter out comment fields
|
||||
var raw map[string]any
|
||||
if err := json.Unmarshal(b, &raw, policyJSONOpts...); err != nil {
|
||||
if err := json.Unmarshal(b, &raw, policyJSONOpts...); err != nil { //nolint:noinlineerr
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1850,6 +1850,8 @@ func validateACLSrcDstCombination(sources Aliases, destinations []AliasWithPorts
|
||||
// the unmarshaling process.
|
||||
// It runs through all rules and checks if there are any inconsistencies
|
||||
// in the policy that needs to be addressed before it can be used.
|
||||
//
|
||||
//nolint:gocyclo // comprehensive policy validation
|
||||
func (p *Policy) validate() error {
|
||||
if p == nil {
|
||||
panic("passed nil policy")
|
||||
|
||||
@ -2106,7 +2106,7 @@ func TestResolvePolicy(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "autogroup-member-comprehensive",
|
||||
toResolve: ptr.To(AutoGroup(AutoGroupMember)),
|
||||
toResolve: ptr.To(AutoGroupMember),
|
||||
nodes: types.Nodes{
|
||||
// Node with no tags (should be included - is a member)
|
||||
{
|
||||
@ -2156,7 +2156,7 @@ func TestResolvePolicy(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "autogroup-tagged",
|
||||
toResolve: ptr.To(AutoGroup(AutoGroupTagged)),
|
||||
toResolve: ptr.To(AutoGroupTagged),
|
||||
nodes: types.Nodes{
|
||||
// Node with no tags (should be excluded - not tagged)
|
||||
{
|
||||
|
||||
@ -55,8 +55,8 @@ var (
|
||||
})
|
||||
nodeStoreNodesCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "nodestore_nodes_total",
|
||||
Help: "Total number of nodes in the NodeStore",
|
||||
Name: "nodestore_nodes",
|
||||
Help: "Number of nodes in the NodeStore",
|
||||
})
|
||||
nodeStorePeersCalculationDuration = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
|
||||
@ -766,7 +766,7 @@ func (s *State) RenameNode(nodeID types.NodeID, newName string) (types.NodeView,
|
||||
|
||||
// Check name uniqueness against NodeStore
|
||||
allNodes := s.nodeStore.ListNodes()
|
||||
for i := 0; i < allNodes.Len(); i++ {
|
||||
for i := range allNodes.Len() {
|
||||
node := allNodes.At(i)
|
||||
if node.ID() != nodeID && node.AsStruct().GivenName == newName {
|
||||
return types.NodeView{}, change.Change{}, fmt.Errorf("%w: %s", ErrNodeNameNotUnique, newName)
|
||||
|
||||
@ -351,11 +351,10 @@ func TestReadConfigFromEnv(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTLSConfigValidation(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
var err error
|
||||
|
||||
configYaml := []byte(`---
|
||||
tls_letsencrypt_hostname: example.com
|
||||
tls_letsencrypt_challenge_type: ""
|
||||
|
||||
@ -407,7 +407,7 @@ func TestApplyHostnameFromHostInfo(t *testing.T) {
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "我的电脑",
|
||||
Hostname: "我的电脑", //nolint:gosmopolitan // intentional i18n test data
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
@ -491,7 +491,7 @@ func TestApplyHostnameFromHostInfo(t *testing.T) {
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "server-北京-01",
|
||||
Hostname: "server-北京-01", //nolint:gosmopolitan // intentional i18n test data
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
@ -505,7 +505,7 @@ func TestApplyHostnameFromHostInfo(t *testing.T) {
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "我的电脑",
|
||||
Hostname: "我的电脑", //nolint:gosmopolitan // intentional i18n test data
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
|
||||
@ -1280,6 +1280,7 @@ func TestEnsureHostname_DNSLabelLimit(t *testing.T) {
|
||||
|
||||
for i, hostname := range testCases {
|
||||
t.Run(cmp.Diff("", ""), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
hostinfo := &tailcfg.Hostinfo{Hostname: hostname}
|
||||
|
||||
result := EnsureHostname(hostinfo, "mkey", "nkey")
|
||||
|
||||
@ -1896,6 +1896,7 @@ func TestACLAutogroupSelf(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:gocyclo // complex integration test scenario
|
||||
func TestACLPolicyPropagationOverTime(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
|
||||
@ -203,7 +203,7 @@ func TestUserCommand(t *testing.T) {
|
||||
"--identifier=1",
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, deleteResult, "User destroyed")
|
||||
|
||||
var listAfterIDDelete []*v1.User
|
||||
|
||||
@ -38,7 +38,7 @@ type buffer struct {
|
||||
|
||||
// Write appends the contents of p to the buffer, growing the buffer as needed. It returns
|
||||
// the number of bytes written.
|
||||
func (b *buffer) Write(p []byte) (n int, err error) {
|
||||
func (b *buffer) Write(p []byte) (int, error) {
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
|
||||
|
||||
@ -185,6 +185,8 @@ func requireAllClientsOnline(t *testing.T, headscale ControlServer, expectedNode
|
||||
}
|
||||
|
||||
// requireAllClientsOnlineWithSingleTimeout is the original validation logic for online state.
|
||||
//
|
||||
//nolint:gocyclo // complex validation with multiple node states
|
||||
func requireAllClientsOnlineWithSingleTimeout(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, expectedOnline bool, message string, timeout time.Duration) {
|
||||
t.Helper()
|
||||
|
||||
@ -446,7 +448,7 @@ func requireAllClientsOfflineStaged(t *testing.T, headscale ControlServer, expec
|
||||
if slices.Contains(expectedNodes, nodeID) {
|
||||
allMapResponsesOffline = false
|
||||
|
||||
assert.False(c, true, "Node %d should not appear in map responses", nodeID)
|
||||
assert.Fail(c, fmt.Sprintf("Node %d should not appear in map responses", nodeID))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -539,6 +541,7 @@ func requireAllClientsNetInfoAndDERP(t *testing.T, headscale ControlServer, expe
|
||||
// assertLastSeenSet validates that a node has a non-nil LastSeen timestamp.
|
||||
// Critical for ensuring node activity tracking is functioning properly.
|
||||
func assertLastSeenSet(t *testing.T, node *v1.Node) {
|
||||
t.Helper()
|
||||
assert.NotNil(t, node)
|
||||
assert.NotNil(t, node.GetLastSeen())
|
||||
}
|
||||
|
||||
@ -326,6 +326,8 @@ func (hsic *HeadscaleInContainer) buildEntrypoint() []string {
|
||||
}
|
||||
|
||||
// New returns a new HeadscaleInContainer instance.
|
||||
//
|
||||
//nolint:gocyclo // complex container setup with many options
|
||||
func New(
|
||||
pool *dockertest.Pool,
|
||||
networks []*dockertest.Network,
|
||||
|
||||
@ -220,6 +220,7 @@ func TestEnablingRoutes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:gocyclo // complex HA failover test scenario
|
||||
func TestHASubnetRouterFailover(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
@ -1601,7 +1602,7 @@ func TestSubnetRouteACL(t *testing.T) {
|
||||
func TestEnablingExitRoutes(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
user := "user2"
|
||||
user := "user2" //nolint:goconst // test-specific value, not related to userToDelete constant
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 2,
|
||||
@ -2031,6 +2032,8 @@ func MustFindNode(hostname string, nodes []*v1.Node) *v1.Node {
|
||||
// - Verify that peers can no longer use node
|
||||
// - Policy is changed back to auto approve route, check that routes already existing is approved.
|
||||
// - Verify that routes can now be seen by peers.
|
||||
//
|
||||
//nolint:gocyclo // complex multi-network auto-approve test scenario
|
||||
func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
|
||||
@ -314,6 +314,7 @@ func (s *Scenario) Services(name string) ([]*dockertest.Resource, error) {
|
||||
}
|
||||
|
||||
func (s *Scenario) ShutdownAssertNoPanics(t *testing.T) {
|
||||
t.Helper()
|
||||
defer func() { _ = dockertestutil.CleanUnreferencedNetworks(s.pool) }()
|
||||
defer func() { _ = dockertestutil.CleanImagesInCI(s.pool) }()
|
||||
|
||||
@ -1162,7 +1163,7 @@ var errParseAuthPage = errors.New("parsing auth page")
|
||||
|
||||
func (s *Scenario) runHeadscaleRegister(userStr string, body string) error {
|
||||
// see api.go HTML template
|
||||
codeSep := strings.Split(string(body), "</code>")
|
||||
codeSep := strings.Split(body, "</code>")
|
||||
if len(codeSep) != 2 {
|
||||
return errParseAuthPage
|
||||
}
|
||||
|
||||
@ -2502,7 +2502,7 @@ func assertNetmapSelfHasTagsWithCollect(c *assert.CollectT, client TailscaleClie
|
||||
var actualTagsSlice []string
|
||||
|
||||
if nm.SelfNode.Valid() {
|
||||
for _, tag := range nm.SelfNode.Tags().All() {
|
||||
for _, tag := range nm.SelfNode.Tags().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator
|
||||
actualTagsSlice = append(actualTagsSlice, tag)
|
||||
}
|
||||
}
|
||||
@ -2647,7 +2647,7 @@ func TestTagsIssue2978ReproTagReplacement(t *testing.T) {
|
||||
var netmapTagsAfterFirstCall []string
|
||||
|
||||
if nmErr == nil && nm != nil && nm.SelfNode.Valid() {
|
||||
for _, tag := range nm.SelfNode.Tags().All() {
|
||||
for _, tag := range nm.SelfNode.Tags().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator
|
||||
netmapTagsAfterFirstCall = append(netmapTagsAfterFirstCall, tag)
|
||||
}
|
||||
}
|
||||
|
||||
@ -295,6 +295,8 @@ func (t *TailscaleInContainer) buildEntrypoint() []string {
|
||||
}
|
||||
|
||||
// New returns a new TailscaleInContainer instance.
|
||||
//
|
||||
//nolint:gocyclo // complex container setup with many options
|
||||
func New(
|
||||
pool *dockertest.Pool,
|
||||
version string,
|
||||
@ -687,7 +689,7 @@ func (t *TailscaleInContainer) Login(
|
||||
// This login mechanism uses web + command line flow for authentication.
|
||||
func (t *TailscaleInContainer) LoginWithURL(
|
||||
loginServer string,
|
||||
) (loginURL *url.URL, err error) {
|
||||
) (*url.URL, error) {
|
||||
command := t.buildLoginCommand(loginServer, "")
|
||||
|
||||
stdout, stderr, err := t.Execute(command)
|
||||
@ -701,7 +703,7 @@ func (t *TailscaleInContainer) LoginWithURL(
|
||||
}
|
||||
}()
|
||||
|
||||
loginURL, err = util.ParseLoginURLFromCLILogin(stdout + stderr)
|
||||
loginURL, err := util.ParseLoginURLFromCLILogin(stdout + stderr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -711,12 +713,12 @@ func (t *TailscaleInContainer) LoginWithURL(
|
||||
|
||||
// Logout runs the logout routine on the given Tailscale instance.
|
||||
func (t *TailscaleInContainer) Logout() error {
|
||||
stdout, stderr, err := t.Execute([]string{"tailscale", "logout"})
|
||||
_, _, err := t.Execute([]string{"tailscale", "logout"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stdout, stderr, _ = t.Execute([]string{"tailscale", "status"})
|
||||
stdout, stderr, _ := t.Execute([]string{"tailscale", "status"})
|
||||
if !strings.Contains(stdout+stderr, "Logged out.") {
|
||||
return fmt.Errorf("logging out, stdout: %s, stderr: %s", stdout, stderr) //nolint:err113
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user