mirror of
https://github.com/juanfont/headscale.git
synced 2025-10-23 11:19:19 +02:00
This PR addresses some consistency issues that was introduced or discovered with the nodestore. nodestore: Now returns the node that is being put or updated when it is finished. This closes a race condition where when we read it back, we do not necessarily get the node with the given change and it ensures we get all the other updates from that batch write. auth: Authentication paths have been unified and simplified. It removes a lot of bad branches and ensures we only do the minimal work. A comprehensive auth test set has been created so we do not have to run integration tests to validate auth and it has allowed us to generate test cases for all the branches we currently know of. integration: added a lot more tooling and checks to validate that nodes reach the expected state when they come up and down. Standardised between the different auth models. A lot of this is to support or detect issues in the changes to nodestore (races) and auth (inconsistencies after login and reaching correct state) This PR was assisted, particularly tests, by claude code.
209 lines
6.0 KiB
Go
209 lines
6.0 KiB
Go
package integration
|
|
|
|
import (
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/juanfont/headscale/integration/hsic"
|
|
"github.com/juanfont/headscale/integration/tsic"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
"tailscale.com/tailcfg"
|
|
"tailscale.com/types/key"
|
|
)
|
|
|
|
type ClientsSpec struct {
|
|
Plain int
|
|
WebsocketDERP int
|
|
}
|
|
|
|
func TestDERPServerScenario(t *testing.T) {
|
|
spec := ScenarioSpec{
|
|
NodesPerUser: 1,
|
|
Users: []string{"user1", "user2", "user3"},
|
|
Networks: map[string][]string{
|
|
"usernet1": {"user1"},
|
|
"usernet2": {"user2"},
|
|
"usernet3": {"user3"},
|
|
},
|
|
}
|
|
|
|
derpServerScenario(t, spec, false, func(scenario *Scenario) {
|
|
allClients, err := scenario.ListTailscaleClients()
|
|
requireNoErrListClients(t, err)
|
|
t.Logf("checking %d clients for websocket connections", len(allClients))
|
|
|
|
for _, client := range allClients {
|
|
if didClientUseWebsocketForDERP(t, client) {
|
|
t.Logf(
|
|
"client %q used websocket a connection, but was not expected to",
|
|
client.Hostname(),
|
|
)
|
|
t.Fail()
|
|
}
|
|
}
|
|
|
|
hsServer, err := scenario.Headscale()
|
|
requireNoErrGetHeadscale(t, err)
|
|
|
|
derpRegion := tailcfg.DERPRegion{
|
|
RegionCode: "test-derpverify",
|
|
RegionName: "TestDerpVerify",
|
|
Nodes: []*tailcfg.DERPNode{
|
|
{
|
|
Name: "TestDerpVerify",
|
|
RegionID: 900,
|
|
HostName: hsServer.GetHostname(),
|
|
STUNPort: 3478,
|
|
STUNOnly: false,
|
|
DERPPort: 443,
|
|
InsecureForTests: true,
|
|
},
|
|
},
|
|
}
|
|
|
|
fakeKey := key.NewNode()
|
|
DERPVerify(t, fakeKey, derpRegion, false)
|
|
})
|
|
}
|
|
|
|
func TestDERPServerWebsocketScenario(t *testing.T) {
|
|
spec := ScenarioSpec{
|
|
NodesPerUser: 1,
|
|
Users: []string{"user1", "user2", "user3"},
|
|
Networks: map[string][]string{
|
|
"usernet1": {"user1"},
|
|
"usernet2": {"user2"},
|
|
"usernet3": {"user3"},
|
|
},
|
|
}
|
|
|
|
derpServerScenario(t, spec, true, func(scenario *Scenario) {
|
|
allClients, err := scenario.ListTailscaleClients()
|
|
requireNoErrListClients(t, err)
|
|
t.Logf("checking %d clients for websocket connections", len(allClients))
|
|
|
|
for _, client := range allClients {
|
|
if !didClientUseWebsocketForDERP(t, client) {
|
|
t.Logf(
|
|
"client %q does not seem to have used a websocket connection, even though it was expected to do so",
|
|
client.Hostname(),
|
|
)
|
|
t.Fail()
|
|
}
|
|
}
|
|
})
|
|
}
|
|
|
|
// This function implements the common parts of a DERP scenario,
|
|
// we *want* it to show up in stacktraces,
|
|
// so marking it as a test helper would be counterproductive.
|
|
//
|
|
//nolint:thelper
|
|
func derpServerScenario(
|
|
t *testing.T,
|
|
spec ScenarioSpec,
|
|
websocket bool,
|
|
furtherAssertions ...func(*Scenario),
|
|
) {
|
|
IntegrationSkip(t)
|
|
|
|
scenario, err := NewScenario(spec)
|
|
require.NoError(t, err)
|
|
|
|
defer scenario.ShutdownAssertNoPanics(t)
|
|
|
|
err = scenario.CreateHeadscaleEnv(
|
|
[]tsic.Option{
|
|
tsic.WithWebsocketDERP(websocket),
|
|
},
|
|
hsic.WithTestName("derpserver"),
|
|
hsic.WithExtraPorts([]string{"3478/udp"}),
|
|
hsic.WithEmbeddedDERPServerOnly(),
|
|
hsic.WithPort(443),
|
|
hsic.WithTLS(),
|
|
hsic.WithConfigEnv(map[string]string{
|
|
"HEADSCALE_DERP_AUTO_UPDATE_ENABLED": "true",
|
|
"HEADSCALE_DERP_UPDATE_FREQUENCY": "10s",
|
|
"HEADSCALE_LISTEN_ADDR": "0.0.0.0:443",
|
|
"HEADSCALE_DERP_SERVER_VERIFY_CLIENTS": "true",
|
|
}),
|
|
)
|
|
requireNoErrHeadscaleEnv(t, err)
|
|
|
|
allClients, err := scenario.ListTailscaleClients()
|
|
requireNoErrListClients(t, err)
|
|
|
|
err = scenario.WaitForTailscaleSync()
|
|
requireNoErrSync(t, err)
|
|
|
|
allHostnames, err := scenario.ListTailscaleClientsFQDNs()
|
|
requireNoErrListFQDN(t, err)
|
|
|
|
for _, client := range allClients {
|
|
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
|
status, err := client.Status()
|
|
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
|
|
|
|
for _, health := range status.Health {
|
|
assert.NotContains(ct, health, "could not connect to any relay server",
|
|
"Client %s should be connected to DERP relay", client.Hostname())
|
|
assert.NotContains(ct, health, "could not connect to the 'Headscale Embedded DERP' relay server.",
|
|
"Client %s should be connected to Headscale Embedded DERP", client.Hostname())
|
|
}
|
|
}, 30*time.Second, 2*time.Second)
|
|
}
|
|
|
|
success := pingDerpAllHelper(t, allClients, allHostnames)
|
|
if len(allHostnames)*len(allClients) > success {
|
|
t.FailNow()
|
|
|
|
return
|
|
}
|
|
|
|
for _, client := range allClients {
|
|
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
|
status, err := client.Status()
|
|
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
|
|
|
|
for _, health := range status.Health {
|
|
assert.NotContains(ct, health, "could not connect to any relay server",
|
|
"Client %s should be connected to DERP relay after first run", client.Hostname())
|
|
assert.NotContains(ct, health, "could not connect to the 'Headscale Embedded DERP' relay server.",
|
|
"Client %s should be connected to Headscale Embedded DERP after first run", client.Hostname())
|
|
}
|
|
}, 30*time.Second, 2*time.Second)
|
|
}
|
|
|
|
t.Logf("Run 1: %d successful pings out of %d", success, len(allClients)*len(allHostnames))
|
|
|
|
// Let the DERP updater run a couple of times to ensure it does not
|
|
// break the DERPMap.
|
|
time.Sleep(30 * time.Second)
|
|
|
|
success = pingDerpAllHelper(t, allClients, allHostnames)
|
|
if len(allHostnames)*len(allClients) > success {
|
|
t.Fail()
|
|
}
|
|
|
|
for _, client := range allClients {
|
|
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
|
status, err := client.Status()
|
|
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
|
|
|
|
for _, health := range status.Health {
|
|
assert.NotContains(ct, health, "could not connect to any relay server",
|
|
"Client %s should be connected to DERP relay after second run", client.Hostname())
|
|
assert.NotContains(ct, health, "could not connect to the 'Headscale Embedded DERP' relay server.",
|
|
"Client %s should be connected to Headscale Embedded DERP after second run", client.Hostname())
|
|
}
|
|
}, 30*time.Second, 2*time.Second)
|
|
}
|
|
|
|
t.Logf("Run2: %d successful pings out of %d", success, len(allClients)*len(allHostnames))
|
|
|
|
for _, check := range furtherAssertions {
|
|
check(scenario)
|
|
}
|
|
}
|