1
0
mirror of https://github.com/juanfont/headscale.git synced 2025-05-23 01:15:27 +02:00

switch to new spec format

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
Kristoffer Dalby 2025-03-05 17:01:54 +01:00
parent f7f7b13faa
commit 9001707d44
No known key found for this signature in database
13 changed files with 218 additions and 195 deletions

View File

@ -57,6 +57,15 @@ func GenerateRandomStringDNSSafe(size int) (string, error) {
return str[:size], nil
}
func MustGenerateRandomStringDNSSafe(size int) string {
hash, err := GenerateRandomStringDNSSafe(size)
if err != nil {
panic(err)
}
return hash
}
func TailNodesToString(nodes []*tailcfg.Node) string {
temp := make([]string, len(nodes))

View File

@ -57,9 +57,9 @@ func aclScenario(
scenario, err := NewScenario(dockertestMaxWait())
require.NoError(t, err)
spec := map[string]int{
"user1": clientsPerUser,
"user2": clientsPerUser,
spec := ScenarioSpec{
NodesPerUser: clientsPerUser,
Users: []string{"user1", "user2"},
}
err = scenario.CreateHeadscaleEnv(spec,
@ -96,22 +96,24 @@ func aclScenario(
func TestACLHostsInNetMapTable(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: 2,
Users: []string{"user1", "user2"},
}
// NOTE: All want cases currently checks the
// total count of expected peers, this would
// typically be the client count of the users
// they can access minus one (them self).
tests := map[string]struct {
users map[string]int
users ScenarioSpec
policy policyv1.ACLPolicy
want map[string]int
}{
// Test that when we have no ACL, each client netmap has
// the amount of peers of the total amount of clients
"base-acls": {
users: map[string]int{
"user1": 2,
"user2": 2,
},
users: spec,
policy: policyv1.ACLPolicy{
ACLs: []policyv1.ACL{
{
@ -129,10 +131,7 @@ func TestACLHostsInNetMapTable(t *testing.T) {
// each other, each node has only the number of pairs from
// their own user.
"two-isolated-users": {
users: map[string]int{
"user1": 2,
"user2": 2,
},
users: spec,
policy: policyv1.ACLPolicy{
ACLs: []policyv1.ACL{
{
@ -155,10 +154,7 @@ func TestACLHostsInNetMapTable(t *testing.T) {
// are restricted to a single port, nodes are still present
// in the netmap.
"two-restricted-present-in-netmap": {
users: map[string]int{
"user1": 2,
"user2": 2,
},
users: spec,
policy: policyv1.ACLPolicy{
ACLs: []policyv1.ACL{
{
@ -192,10 +188,7 @@ func TestACLHostsInNetMapTable(t *testing.T) {
// of peers. This will still result in all the peers as we
// need them present on the other side for the "return path".
"two-ns-one-isolated": {
users: map[string]int{
"user1": 2,
"user2": 2,
},
users: spec,
policy: policyv1.ACLPolicy{
ACLs: []policyv1.ACL{
{
@ -220,10 +213,7 @@ func TestACLHostsInNetMapTable(t *testing.T) {
},
},
"very-large-destination-prefix-1372": {
users: map[string]int{
"user1": 2,
"user2": 2,
},
users: spec,
policy: policyv1.ACLPolicy{
ACLs: []policyv1.ACL{
{
@ -248,10 +238,7 @@ func TestACLHostsInNetMapTable(t *testing.T) {
},
},
"ipv6-acls-1470": {
users: map[string]int{
"user1": 2,
"user2": 2,
},
users: spec,
policy: policyv1.ACLPolicy{
ACLs: []policyv1.ACL{
{
@ -1026,9 +1013,9 @@ func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) {
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": 1,
"user2": 1,
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2"},
}
err = scenario.CreateHeadscaleEnv(spec,

View File

@ -23,9 +23,9 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": len(MustTestVersions),
"user2": len(MustTestVersions),
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
opts := []hsic.Option{hsic.WithTestName("pingallbyip")}
@ -84,7 +84,7 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
time.Sleep(5 * time.Minute)
}
for userName := range spec {
for _, userName := range spec.Users {
key, err := scenario.CreatePreAuthKey(userName, true, false)
if err != nil {
t.Fatalf("failed to create pre-auth key for user %s: %s", userName, err)
@ -156,9 +156,9 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": len(MustTestVersions),
"user2": len(MustTestVersions),
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{},
@ -203,7 +203,7 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
// Log in all clients as user1, iterating over the spec only returns the
// clients, not the usernames.
for userName := range spec {
for _, userName := range spec.Users {
err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())
if err != nil {
t.Fatalf("failed to run tailscale up for user %s: %s", userName, err)
@ -239,9 +239,9 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": len(MustTestVersions),
"user2": len(MustTestVersions),
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
opts := []hsic.Option{hsic.WithTestName("pingallbyip")}
@ -300,7 +300,7 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
time.Sleep(5 * time.Minute)
}
for userName := range spec {
for _, userName := range spec.Users {
key, err := scenario.CreatePreAuthKey(userName, true, false)
if err != nil {
t.Fatalf("failed to create pre-auth key for user %s: %s", userName, err)

View File

@ -25,6 +25,7 @@ import (
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/oauth2-proxy/mockoidc"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
@ -512,7 +513,7 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
assertNoErr(t, err)
assert.Len(t, listUsers, 0)
ts, err := scenario.CreateTailscaleNode("unstable")
ts, err := scenario.CreateTailscaleNode("unstable", tsic.WithNetwork(scenario.networks[TestDefaultNetwork]))
assertNoErr(t, err)
u, err := ts.LoginWithURL(headscale.GetEndpoint())
@ -743,7 +744,7 @@ func (s *AuthOIDCScenario) runMockOIDC(accessTTL time.Duration, users []mockoidc
PortBindings: map[docker.Port][]docker.PortBinding{
docker.Port(portNotation): {{HostPort: strconv.Itoa(port)}},
},
Networks: s.Scenario.networks,
Networks: s.Scenario.Networks(),
Env: []string{
fmt.Sprintf("MOCKOIDC_ADDR=%s", hostname),
fmt.Sprintf("MOCKOIDC_PORT=%d", port),

View File

@ -52,9 +52,8 @@ func TestUserCommand(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": 0,
"user2": 0,
spec := ScenarioSpec{
Users: []string{"user1", "user2"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
@ -251,8 +250,8 @@ func TestPreAuthKeyCommand(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
user: 0,
spec := ScenarioSpec{
Users: []string{user},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipak"))
@ -393,8 +392,8 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
user: 0,
spec := ScenarioSpec{
Users: []string{user},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipaknaexp"))
@ -456,8 +455,8 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
user: 0,
spec := ScenarioSpec{
Users: []string{user},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipakresueeph"))
@ -534,9 +533,8 @@ func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
user1: 1,
user2: 0,
spec := ScenarioSpec{
Users: []string{user1, user2},
}
err = scenario.CreateHeadscaleEnv(
@ -624,9 +622,8 @@ func TestApiKeyCommand(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": 0,
"user2": 0,
spec := ScenarioSpec{
Users: []string{"user1", "user2"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
@ -792,8 +789,8 @@ func TestNodeTagCommand(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": 0,
spec := ScenarioSpec{
Users: []string{"user1"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
@ -981,8 +978,9 @@ func TestNodeAdvertiseTagCommand(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": 1,
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1"},
}
err = scenario.CreateHeadscaleEnv(spec,
@ -996,7 +994,7 @@ func TestNodeAdvertiseTagCommand(t *testing.T) {
assertNoErr(t, err)
// Test list all nodes after added seconds
resultMachines := make([]*v1.Node, spec["user1"])
resultMachines := make([]*v1.Node, spec.NodesPerUser)
err = executeAndUnmarshal(
headscale,
[]string{
@ -1033,9 +1031,8 @@ func TestNodeCommand(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"node-user": 0,
"other-user": 0,
spec := ScenarioSpec{
Users: []string{"node-user", "other-user"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
@ -1273,8 +1270,8 @@ func TestNodeExpireCommand(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"node-expire-user": 0,
spec := ScenarioSpec{
Users: []string{"node-expire-user"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
@ -1399,8 +1396,8 @@ func TestNodeRenameCommand(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"node-rename-command": 0,
spec := ScenarioSpec{
Users: []string{"node-rename-command"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
@ -1564,9 +1561,8 @@ func TestNodeMoveCommand(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"old-user": 0,
"new-user": 0,
spec := ScenarioSpec{
Users: []string{"old-user", "new-user"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
@ -1725,8 +1721,8 @@ func TestPolicyCommand(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": 0,
spec := ScenarioSpec{
Users: []string{"user1"},
}
err = scenario.CreateHeadscaleEnv(
@ -1812,8 +1808,9 @@ func TestPolicyBrokenConfigCommand(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": 1,
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1"},
}
err = scenario.CreateHeadscaleEnv(

View File

@ -35,8 +35,9 @@ func TestDERPVerifyEndpoint(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": len(MustTestVersions),
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1"},
}
derper, err := scenario.CreateDERPServer("head",

View File

@ -21,9 +21,9 @@ func TestResolveMagicDNS(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"magicdns1": len(MustTestVersions),
"magicdns2": len(MustTestVersions),
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("magicdns"))
@ -91,9 +91,9 @@ func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"magicdns1": 1,
"magicdns2": 1,
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2"},
}
const erPath = "/tmp/extra_records.json"
@ -368,9 +368,9 @@ func TestValidateResolvConf(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"resolvconf1": 3,
"resolvconf2": 3,
spec := ScenarioSpec{
NodesPerUser: 3,
Users: []string{"user1", "user2"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("resolvconf"), hsic.WithConfigEnv(tt.conf))

View File

@ -315,7 +315,6 @@ func (s *EmbeddedDERPServerScenario) CreateTailscaleIsolatedNodesInUser(
tsClient, err := tsic.New(
s.pool,
version,
network,
opts...,
)
if err != nil {

View File

@ -32,11 +32,9 @@ func TestPingAllByIP(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
// TODO(kradalby): it does not look like the user thing works, only second
// get created? maybe only when many?
spec := map[string]int{
"user1": len(MustTestVersions),
"user2": len(MustTestVersions),
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
err = scenario.CreateHeadscaleEnv(spec,
@ -75,9 +73,9 @@ func TestPingAllByIPPublicDERP(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": len(MustTestVersions),
"user2": len(MustTestVersions),
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
err = scenario.CreateHeadscaleEnv(spec,
@ -312,9 +310,9 @@ func TestPingAllByHostname(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user3": len(MustTestVersions),
"user4": len(MustTestVersions),
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyname"))
@ -361,8 +359,9 @@ func TestTaildrop(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"taildrop": len(MustTestVersions),
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{},
@ -522,8 +521,6 @@ func TestUpdateHostnameFromClient(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
user := "update-hostname-from-client"
hostnames := map[string]string{
"1": "user1-host",
"2": "User2-Host",
@ -534,8 +531,9 @@ func TestUpdateHostnameFromClient(t *testing.T) {
assertNoErrf(t, "failed to create scenario: %s", err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
user: 3,
spec := ScenarioSpec{
NodesPerUser: 3,
Users: []string{"user1"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("updatehostname"))
@ -654,8 +652,9 @@ func TestExpireNode(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": len(MustTestVersions),
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("expirenode"))
@ -684,7 +683,7 @@ func TestExpireNode(t *testing.T) {
assertNoErr(t, err)
// Assert that we have the original count - self
assert.Len(t, status.Peers(), spec["user1"]-1)
assert.Len(t, status.Peers(), spec.NodesPerUser-1)
}
headscale, err := scenario.Headscale()
@ -780,8 +779,9 @@ func TestNodeOnlineStatus(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
"user1": len(MustTestVersions),
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("online"))
@ -895,11 +895,9 @@ func TestPingAllByIPManyUpDown(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
// TODO(kradalby): it does not look like the user thing works, only second
// get created? maybe only when many?
spec := map[string]int{
"user1": len(MustTestVersions),
"user2": len(MustTestVersions),
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
err = scenario.CreateHeadscaleEnv(spec,
@ -977,11 +975,9 @@ func Test2118DeletingOnlineNodePanics(t *testing.T) {
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
// TODO(kradalby): it does not look like the user thing works, only second
// get created? maybe only when many?
spec := map[string]int{
"user1": 1,
"user2": 1,
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2"},
}
err = scenario.CreateHeadscaleEnv(spec,

View File

@ -29,14 +29,13 @@ func TestEnablingRoutes(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
user := "user6"
scenario, err := NewScenario(dockertestMaxWait())
require.NoErrorf(t, err, "failed to create scenario: %s", err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
user: 3,
spec := ScenarioSpec{
NodesPerUser: 3,
Users: []string{"user1"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute"))
@ -203,14 +202,13 @@ func TestHASubnetRouterFailover(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
user := "user9"
scenario, err := NewScenario(dockertestMaxWait())
require.NoErrorf(t, err, "failed to create scenario: %s", err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
user: 4,
spec := ScenarioSpec{
NodesPerUser: 4,
Users: []string{"user1"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{},
@ -534,8 +532,9 @@ func TestEnableDisableAutoApprovedRoute(t *testing.T) {
require.NoErrorf(t, err, "failed to create scenario: %s", err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
user: 1,
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1"},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{tsic.WithTags([]string{"tag:approve"})}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy(
@ -631,8 +630,9 @@ func TestAutoApprovedSubRoute2068(t *testing.T) {
require.NoErrorf(t, err, "failed to create scenario: %s", err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
user: 1,
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{user},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{tsic.WithTags([]string{"tag:approve"})},
@ -702,8 +702,9 @@ func TestSubnetRouteACL(t *testing.T) {
require.NoErrorf(t, err, "failed to create scenario: %s", err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
user: 2,
spec := ScenarioSpec{
NodesPerUser: 2,
Users: []string{user},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy(
@ -924,8 +925,9 @@ func TestEnablingExitRoutes(t *testing.T) {
assertNoErrf(t, "failed to create scenario: %s", err)
defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
user: 2,
spec := ScenarioSpec{
NodesPerUser: 2,
Users: []string{user},
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{

View File

@ -26,6 +26,7 @@ import (
xmaps "golang.org/x/exp/maps"
"golang.org/x/sync/errgroup"
"tailscale.com/envknob"
"tailscale.com/util/mak"
)
const (
@ -87,19 +88,17 @@ type Scenario struct {
users map[string]*User
pool *dockertest.Pool
networks []*dockertest.Network
networks map[string]*dockertest.Network
mu sync.Mutex
}
var TestHashPrefix = "hs-" + util.MustGenerateRandomStringDNSSafe(scenarioHashLength)
var TestDefaultNetwork = TestHashPrefix + "-default"
// NewScenario creates a test Scenario which can be used to bootstraps a ControlServer with
// a set of Users and TailscaleClients.
func NewScenario(maxWait time.Duration) (*Scenario, error) {
hash, err := util.GenerateRandomStringDNSSafe(scenarioHashLength)
if err != nil {
return nil, err
}
pool, err := dockertest.NewPool("")
if err != nil {
return nil, fmt.Errorf("could not connect to docker: %w", err)
@ -107,12 +106,16 @@ func NewScenario(maxWait time.Duration) (*Scenario, error) {
pool.MaxWait = maxWait
networkName := fmt.Sprintf("hs-%s", hash)
if overrideNetworkName := os.Getenv("HEADSCALE_TEST_NETWORK_NAME"); overrideNetworkName != "" {
networkName = overrideNetworkName
return &Scenario{
controlServers: xsync.NewMapOf[string, ControlServer](),
users: make(map[string]*User),
pool: pool,
}, nil
}
network, err := dockertestutil.GetFirstOrCreateNetwork(pool, networkName)
func (s *Scenario) AddNetwork(name string) (*dockertest.Network, error) {
network, err := dockertestutil.GetFirstOrCreateNetwork(s.pool, name)
if err != nil {
return nil, fmt.Errorf("failed to create or get network: %w", err)
}
@ -120,18 +123,19 @@ func NewScenario(maxWait time.Duration) (*Scenario, error) {
// We run the test suite in a docker container that calls a couple of endpoints for
// readiness checks, this ensures that we can run the tests with individual networks
// and have the client reach the different containers
err = dockertestutil.AddContainerToNetwork(pool, network, "headscale-test-suite")
// TODO(kradalby): Can the test-suite be renamed so we can have multiple?
err = dockertestutil.AddContainerToNetwork(s.pool, network, "headscale-test-suite")
if err != nil {
return nil, fmt.Errorf("failed to add test suite container to network: %w", err)
}
return &Scenario{
controlServers: xsync.NewMapOf[string, ControlServer](),
users: make(map[string]*User),
mak.Set(&s.networks, name, network)
pool: pool,
networks: []*dockertest.Network{network},
}, nil
return network, nil
}
func (s *Scenario) Networks() []*dockertest.Network {
return xmaps.Values(s.networks)
}
func (s *Scenario) ShutdownAssertNoPanics(t *testing.T) {
@ -232,7 +236,7 @@ func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {
opts = append(opts, hsic.WithPolicyV2())
}
headscale, err := hsic.New(s.pool, s.networks, opts...)
headscale, err := hsic.New(s.pool, s.Networks(), opts...)
if err != nil {
return nil, fmt.Errorf("failed to create headscale container: %w", err)
}
@ -309,7 +313,6 @@ func (s *Scenario) CreateTailscaleNode(
tsClient, err := tsic.New(
s.pool,
version,
s.networks[0],
opts...,
)
if err != nil {
@ -369,7 +372,6 @@ func (s *Scenario) CreateTailscaleNodesInUser(
tsClient, err := tsic.New(
s.pool,
version,
s.networks[0],
opts...,
)
s.mu.Unlock()
@ -489,39 +491,86 @@ func (s *Scenario) WaitForTailscaleSyncWithPeerCount(peerCount int) error {
return nil
}
// ScenarioSpec describes the users, nodes, and network topology to
// set up for a given scenario.
type ScenarioSpec struct {
// Users is a list of usernames that will be created.
// Each created user will get nodes equivalent to NodesPerUser
Users []string
// NodesPerUser is how many nodes should be attached to each user.
NodesPerUser int
// Networks, if set, is the deparate Docker networks that should be
// created and a list of the users that should be placed in those networks.
// If not set, a single network will be created and all users+nodes will be
// added there.
// Please note that Docker networks are not necessarily routable and
// connections between them might fall back to DERP.
Networks map[string][]string
}
// CreateHeadscaleEnv is a convenient method returning a complete Headcale
// test environment with nodes of all versions, joined to the server with X
// users.
func (s *Scenario) CreateHeadscaleEnv(
users map[string]int,
spec ScenarioSpec,
tsOpts []tsic.Option,
opts ...hsic.Option,
) error {
var userToNetwork map[string]*dockertest.Network
if spec.Networks != nil || len(spec.Networks) != 0 {
for name, users := range spec.Networks {
networkName := TestHashPrefix + "-" + name
network, err := s.AddNetwork(networkName)
if err != nil {
return err
}
for _, user := range users {
if n2, ok := userToNetwork[user]; ok {
return fmt.Errorf("users can only have nodes placed in one network: %s into %s but already in %s", user, network.Network.Name, n2.Network.Name)
}
mak.Set(&userToNetwork, user, network)
}
}
} else {
_, err := s.AddNetwork(TestDefaultNetwork)
if err != nil {
return err
}
}
headscale, err := s.Headscale(opts...)
if err != nil {
return err
}
usernames := xmaps.Keys(users)
sort.Strings(usernames)
for _, username := range usernames {
clientCount := users[username]
err = s.CreateUser(username)
sort.Strings(spec.Users)
for _, user := range spec.Users {
err = s.CreateUser(user)
if err != nil {
return err
}
err = s.CreateTailscaleNodesInUser(username, "all", clientCount, tsOpts...)
var opts []tsic.Option
if userToNetwork != nil {
opts = append(tsOpts, tsic.WithNetwork(userToNetwork[user]))
} else {
opts = append(tsOpts, tsic.WithNetwork(s.networks[TestDefaultNetwork]))
}
err = s.CreateTailscaleNodesInUser(user, "all", spec.NodesPerUser, opts...)
if err != nil {
return err
}
key, err := s.CreatePreAuthKey(username, true, false)
key, err := s.CreatePreAuthKey(user, true, false)
if err != nil {
return err
}
err = s.RunTailscaleUp(username, headscale.GetEndpoint(), key.GetKey())
err = s.RunTailscaleUp(user, headscale.GetEndpoint(), key.GetKey())
if err != nil {
return err
}
@ -667,7 +716,7 @@ func (s *Scenario) WaitForTailscaleLogout() error {
// CreateDERPServer creates a new DERP server in a container.
func (s *Scenario) CreateDERPServer(version string, opts ...dsic.Option) (*dsic.DERPServerInContainer, error) {
derp, err := dsic.New(s.pool, version, s.networks, opts...)
derp, err := dsic.New(s.pool, version, s.Networks(), opts...)
if err != nil {
return nil, fmt.Errorf("failed to create DERP server: %w", err)
}

View File

@ -53,9 +53,9 @@ func sshScenario(t *testing.T, policy *policyv1.ACLPolicy, clientsPerUser int) *
scenario, err := NewScenario(dockertestMaxWait())
assertNoErr(t, err)
spec := map[string]int{
"user1": clientsPerUser,
"user2": clientsPerUser,
spec := ScenarioSpec{
NodesPerUser: clientsPerUser,
Users: []string{"user1", "user2"},
}
err = scenario.CreateHeadscaleEnv(spec,

View File

@ -101,26 +101,10 @@ func WithCACert(cert []byte) Option {
}
}
// WithOrCreateNetwork sets the Docker container network to use with
// the Tailscale instance, if the parameter is nil, a new network,
// isolating the TailscaleClient, will be created. If a network is
// passed, the Tailscale instance will join the given network.
func WithOrCreateNetwork(network *dockertest.Network) Option {
// WithNetwork sets the Docker container network to use with
// the Tailscale instance.
func WithNetwork(network *dockertest.Network) Option {
return func(tsic *TailscaleInContainer) {
if network != nil {
tsic.network = network
return
}
network, err := dockertestutil.GetFirstOrCreateNetwork(
tsic.pool,
fmt.Sprintf("%s-network", tsic.hostname),
)
if err != nil {
log.Fatalf("failed to create network: %s", err)
}
tsic.network = network
}
}
@ -216,7 +200,6 @@ func WithExtraLoginArgs(args []string) Option {
func New(
pool *dockertest.Pool,
version string,
network *dockertest.Network,
opts ...Option,
) (*TailscaleInContainer, error) {
hash, err := util.GenerateRandomStringDNSSafe(tsicHashLength)
@ -231,7 +214,6 @@ func New(
hostname: hostname,
pool: pool,
network: network,
withEntrypoint: []string{
"/bin/sh",