1
0
mirror of https://github.com/juanfont/headscale.git synced 2026-02-07 20:04:00 +01:00

hscontrol/mapper: fix remaining copylocks issues in tests

Convert all range loops iterating over node slices to use index-based
iteration with pointer access to avoid copying structs containing
atomic fields.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Kristoffer Dalby 2026-02-06 08:55:07 +00:00
parent 698ef4272c
commit d29a47adaa

View File

@ -1429,7 +1429,8 @@ func TestBatcherConcurrentClients(t *testing.T) {
// Connection churn cycles - rapidly connect/disconnect to test concurrency safety
for i := range numCycles {
for _, node := range churningNodes {
for j := range churningNodes {
node := &churningNodes[j]
wg.Add(2)
// Connect churning node
@ -1525,7 +1526,7 @@ func TestBatcherConcurrentClients(t *testing.T) {
if i%7 == 0 && len(allNodes) > 0 {
// Node-specific changes using real nodes
node := allNodes[i%len(allNodes)]
node := &allNodes[i%len(allNodes)]
// Use a valid expiry time for testing since test nodes don't have expiry set
testExpiry := time.Now().Add(24 * time.Hour)
batcher.AddWork(change.KeyExpiryFor(node.n.ID, testExpiry))
@ -1573,7 +1574,8 @@ func TestBatcherConcurrentClients(t *testing.T) {
t.Logf("Work generated: %d DERP + %d Full + %d KeyExpiry = %d total AddWork calls",
expectedDerpUpdates, expectedFullUpdates, expectedKeyUpdates, totalGeneratedWork)
for _, node := range stableNodes {
for i := range stableNodes {
node := &stableNodes[i]
if stats, exists := allStats[node.n.ID]; exists {
stableUpdateCount += stats.TotalUpdates
t.Logf("Stable node %d: %d updates",
@ -1586,7 +1588,8 @@ func TestBatcherConcurrentClients(t *testing.T) {
}
}
for _, node := range churningNodes {
for i := range churningNodes {
node := &churningNodes[i]
if stats, exists := allStats[node.n.ID]; exists {
churningUpdateCount += stats.TotalUpdates
}
@ -1611,7 +1614,8 @@ func TestBatcherConcurrentClients(t *testing.T) {
}
// Verify all stable clients are still functional
for _, node := range stableNodes {
for i := range stableNodes {
node := &stableNodes[i]
if !batcher.IsConnected(node.n.ID) {
t.Errorf("Stable node %d lost connection during racing", node.n.ID)
}
@ -1830,7 +1834,8 @@ func XTestBatcherScalability(t *testing.T) {
}
// Connection/disconnection cycles for subset of nodes
for i, node := range chaosNodes {
for i := range chaosNodes {
node := &chaosNodes[i]
// Only add work if this is connection chaos or mixed
if tc.chaosType == "connection" || tc.chaosType == "mixed" {
wg.Add(2)
@ -2145,7 +2150,8 @@ func TestBatcherFullPeerUpdates(t *testing.T) {
t.Logf("Created %d nodes in database", len(allNodes))
// Connect nodes one at a time and wait for each to be connected
for i, node := range allNodes {
for i := range allNodes {
node := &allNodes[i]
_ = batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100))
t.Logf("Connected node %d (ID: %d)", i, node.n.ID)
@ -2164,7 +2170,8 @@ func TestBatcherFullPeerUpdates(t *testing.T) {
}, 5*time.Second, 50*time.Millisecond, "waiting for all nodes to connect")
// Check how many peers each node should see
for i, node := range allNodes {
for i := range allNodes {
node := &allNodes[i]
peers := testData.State.ListPeers(node.n.ID)
t.Logf("Node %d should see %d peers from state", i, peers.Len())
}
@ -2294,7 +2301,8 @@ func TestBatcherRapidReconnection(t *testing.T) {
// Phase 1: Connect all nodes initially
t.Logf("Phase 1: Connecting all nodes...")
for i, node := range allNodes {
for i := range allNodes {
node := &allNodes[i]
err := batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100))
if err != nil {
t.Fatalf("Failed to add node %d: %v", i, err)
@ -2311,7 +2319,8 @@ func TestBatcherRapidReconnection(t *testing.T) {
// Phase 2: Rapid disconnect ALL nodes (simulating nodes going down)
t.Logf("Phase 2: Rapid disconnect all nodes...")
for i, node := range allNodes {
for i := range allNodes {
node := &allNodes[i]
removed := batcher.RemoveNode(node.n.ID, node.ch)
t.Logf("Node %d RemoveNode result: %t", i, removed)
}
@ -2320,7 +2329,8 @@ func TestBatcherRapidReconnection(t *testing.T) {
t.Logf("Phase 3: Rapid reconnect with new channels...")
newChannels := make([]chan *tailcfg.MapResponse, len(allNodes))
for i, node := range allNodes {
for i := range allNodes {
node := &allNodes[i]
newChannels[i] = make(chan *tailcfg.MapResponse, 10)
err := batcher.AddNode(node.n.ID, newChannels[i], tailcfg.CapabilityVersion(100))
@ -2345,7 +2355,8 @@ func TestBatcherRapidReconnection(t *testing.T) {
debugInfo := debugBatcher.Debug()
disconnectedCount := 0
for i, node := range allNodes {
for i := range allNodes {
node := &allNodes[i]
if info, exists := debugInfo[node.n.ID]; exists {
t.Logf("Node %d (ID %d): debug info = %+v", i, node.n.ID, info)
@ -2420,8 +2431,8 @@ func TestBatcherMultiConnection(t *testing.T) {
defer cleanup()
batcher := testData.Batcher
node1 := testData.Nodes[0]
node2 := testData.Nodes[1]
node1 := &testData.Nodes[0]
node2 := &testData.Nodes[1]
t.Logf("=== MULTI-CONNECTION TEST ===")