1
0
mirror of https://github.com/juanfont/headscale.git synced 2024-12-20 19:09:07 +01:00

port reminder of integrationv1 test to v2

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
Kristoffer Dalby 2023-05-10 12:10:33 +02:00 committed by Juan Font
parent a16f0c9f60
commit 61a2915f17
46 changed files with 1022 additions and 2274 deletions

View File

@ -1,35 +0,0 @@
name: Integration Test CLI
on: [pull_request]
jobs:
integration-test-cli:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 10
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run CLI integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: nix develop --command -- make test_integration_cli

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -43,7 +43,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -24,21 +24,9 @@ build:
dev: lint test build
test:
@go test $(TAGS) -short -coverprofile=coverage.out ./...
gotestsum -- $(TAGS) -short -coverprofile=coverage.out ./...
test_integration: test_integration_cli test_integration_v2_general
test_integration_cli:
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
docker network create headscale-test || true
docker run -t --rm \
--network headscale-test \
-v ~/.cache/hs-integration-go:/go \
-v $$PWD:$$PWD -w $$PWD \
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
go run gotest.tools/gotestsum@latest -- $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationCLI ./...
test_integration_v2_general:
test_integration:
docker run \
-t --rm \
-v ~/.cache/hs-integration-go:/go \

View File

@ -64,7 +64,7 @@ jobs:
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \

View File

@ -532,3 +532,989 @@ func TestEnablingRoutes(t *testing.T) {
}
}
}
func TestApiKeyCommand(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
count := 5
scenario, err := NewScenario()
assert.NoError(t, err)
spec := map[string]int{
"user1": 0,
"user2": 0,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err)
headscale, err := scenario.Headscale()
assert.NoError(t, err)
keys := make([]string, count)
for idx := 0; idx < count; idx++ {
apiResult, err := headscale.Execute(
[]string{
"headscale",
"apikeys",
"create",
"--expiration",
"24h",
"--output",
"json",
},
)
assert.Nil(t, err)
assert.NotEmpty(t, apiResult)
keys[idx] = apiResult
}
assert.Len(t, keys, 5)
var listedAPIKeys []v1.ApiKey
err = executeAndUnmarshal(headscale,
[]string{
"headscale",
"apikeys",
"list",
"--output",
"json",
},
&listedAPIKeys,
)
assert.Nil(t, err)
assert.Len(t, listedAPIKeys, 5)
assert.Equal(t, uint64(1), listedAPIKeys[0].Id)
assert.Equal(t, uint64(2), listedAPIKeys[1].Id)
assert.Equal(t, uint64(3), listedAPIKeys[2].Id)
assert.Equal(t, uint64(4), listedAPIKeys[3].Id)
assert.Equal(t, uint64(5), listedAPIKeys[4].Id)
assert.NotEmpty(t, listedAPIKeys[0].Prefix)
assert.NotEmpty(t, listedAPIKeys[1].Prefix)
assert.NotEmpty(t, listedAPIKeys[2].Prefix)
assert.NotEmpty(t, listedAPIKeys[3].Prefix)
assert.NotEmpty(t, listedAPIKeys[4].Prefix)
assert.True(t, listedAPIKeys[0].Expiration.AsTime().After(time.Now()))
assert.True(t, listedAPIKeys[1].Expiration.AsTime().After(time.Now()))
assert.True(t, listedAPIKeys[2].Expiration.AsTime().After(time.Now()))
assert.True(t, listedAPIKeys[3].Expiration.AsTime().After(time.Now()))
assert.True(t, listedAPIKeys[4].Expiration.AsTime().After(time.Now()))
assert.True(
t,
listedAPIKeys[0].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
)
assert.True(
t,
listedAPIKeys[1].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
)
assert.True(
t,
listedAPIKeys[2].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
)
assert.True(
t,
listedAPIKeys[3].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
)
assert.True(
t,
listedAPIKeys[4].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
)
expiredPrefixes := make(map[string]bool)
// Expire three keys
for idx := 0; idx < 3; idx++ {
_, err := headscale.Execute(
[]string{
"headscale",
"apikeys",
"expire",
"--prefix",
listedAPIKeys[idx].Prefix,
},
)
assert.Nil(t, err)
expiredPrefixes[listedAPIKeys[idx].Prefix] = true
}
var listedAfterExpireAPIKeys []v1.ApiKey
err = executeAndUnmarshal(headscale,
[]string{
"headscale",
"apikeys",
"list",
"--output",
"json",
},
&listedAfterExpireAPIKeys,
)
assert.Nil(t, err)
for index := range listedAfterExpireAPIKeys {
if _, ok := expiredPrefixes[listedAfterExpireAPIKeys[index].Prefix]; ok {
// Expired
assert.True(
t,
listedAfterExpireAPIKeys[index].Expiration.AsTime().Before(time.Now()),
)
} else {
// Not expired
assert.False(
t,
listedAfterExpireAPIKeys[index].Expiration.AsTime().Before(time.Now()),
)
}
}
err = scenario.Shutdown()
assert.NoError(t, err)
}
func TestNodeTagCommand(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
assert.NoError(t, err)
spec := map[string]int{
"user1": 0,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err)
headscale, err := scenario.Headscale()
assert.NoError(t, err)
machineKeys := []string{
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
"nodekey:6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
}
machines := make([]*v1.Machine, len(machineKeys))
assert.Nil(t, err)
for index, machineKey := range machineKeys {
_, err := headscale.Execute(
[]string{
"headscale",
"debug",
"create-node",
"--name",
fmt.Sprintf("machine-%d", index+1),
"--user",
"user1",
"--key",
machineKey,
"--output",
"json",
},
)
assert.Nil(t, err)
var machine v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"--user",
"user1",
"register",
"--key",
machineKey,
"--output",
"json",
},
&machine,
)
assert.Nil(t, err)
machines[index] = &machine
}
assert.Len(t, machines, len(machineKeys))
var machine v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"tag",
"-i", "1",
"-t", "tag:test",
"--output", "json",
},
&machine,
)
assert.Nil(t, err)
assert.Equal(t, []string{"tag:test"}, machine.ForcedTags)
// try to set a wrong tag and retrieve the error
type errOutput struct {
Error string `json:"error"`
}
var errorOutput errOutput
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"tag",
"-i", "2",
"-t", "wrong-tag",
"--output", "json",
},
&errorOutput,
)
assert.Nil(t, err)
assert.Contains(t, errorOutput.Error, "tag must start with the string 'tag:'")
// Test list all nodes after added seconds
resultMachines := make([]*v1.Machine, len(machineKeys))
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output", "json",
},
&resultMachines,
)
assert.Nil(t, err)
found := false
for _, machine := range resultMachines {
if machine.ForcedTags != nil {
for _, tag := range machine.ForcedTags {
if tag == "tag:test" {
found = true
}
}
}
}
assert.Equal(
t,
true,
found,
"should find a machine with the tag 'tag:test' in the list of machines",
)
err = scenario.Shutdown()
assert.NoError(t, err)
}
func TestNodeCommand(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
assert.NoError(t, err)
spec := map[string]int{
"machine-user": 0,
"other-user": 0,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err)
headscale, err := scenario.Headscale()
assert.NoError(t, err)
// Randomly generated machine keys
machineKeys := []string{
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
"nodekey:6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
"nodekey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
"nodekey:8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
"nodekey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
}
machines := make([]*v1.Machine, len(machineKeys))
assert.Nil(t, err)
for index, machineKey := range machineKeys {
_, err := headscale.Execute(
[]string{
"headscale",
"debug",
"create-node",
"--name",
fmt.Sprintf("machine-%d", index+1),
"--user",
"machine-user",
"--key",
machineKey,
"--output",
"json",
},
)
assert.Nil(t, err)
var machine v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"--user",
"machine-user",
"register",
"--key",
machineKey,
"--output",
"json",
},
&machine,
)
assert.Nil(t, err)
machines[index] = &machine
}
assert.Len(t, machines, len(machineKeys))
// Test list all nodes after added seconds
var listAll []v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&listAll,
)
assert.Nil(t, err)
assert.Len(t, listAll, 5)
assert.Equal(t, uint64(1), listAll[0].Id)
assert.Equal(t, uint64(2), listAll[1].Id)
assert.Equal(t, uint64(3), listAll[2].Id)
assert.Equal(t, uint64(4), listAll[3].Id)
assert.Equal(t, uint64(5), listAll[4].Id)
assert.Equal(t, "machine-1", listAll[0].Name)
assert.Equal(t, "machine-2", listAll[1].Name)
assert.Equal(t, "machine-3", listAll[2].Name)
assert.Equal(t, "machine-4", listAll[3].Name)
assert.Equal(t, "machine-5", listAll[4].Name)
otherUserMachineKeys := []string{
"nodekey:b5b444774186d4217adcec407563a1223929465ee2c68a4da13af0d0185b4f8e",
"nodekey:dc721977ac7415aafa87f7d4574cbe07c6b171834a6d37375782bdc1fb6b3584",
}
otherUserMachines := make([]*v1.Machine, len(otherUserMachineKeys))
assert.Nil(t, err)
for index, machineKey := range otherUserMachineKeys {
_, err := headscale.Execute(
[]string{
"headscale",
"debug",
"create-node",
"--name",
fmt.Sprintf("otherUser-machine-%d", index+1),
"--user",
"other-user",
"--key",
machineKey,
"--output",
"json",
},
)
assert.Nil(t, err)
var machine v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"--user",
"other-user",
"register",
"--key",
machineKey,
"--output",
"json",
},
&machine,
)
assert.Nil(t, err)
otherUserMachines[index] = &machine
}
assert.Len(t, otherUserMachines, len(otherUserMachineKeys))
// Test list all nodes after added otherUser
var listAllWithotherUser []v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&listAllWithotherUser,
)
assert.Nil(t, err)
// All nodes, machines + otherUser
assert.Len(t, listAllWithotherUser, 7)
assert.Equal(t, uint64(6), listAllWithotherUser[5].Id)
assert.Equal(t, uint64(7), listAllWithotherUser[6].Id)
assert.Equal(t, "otherUser-machine-1", listAllWithotherUser[5].Name)
assert.Equal(t, "otherUser-machine-2", listAllWithotherUser[6].Name)
// Test list all nodes after added otherUser
var listOnlyotherUserMachineUser []v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--user",
"other-user",
"--output",
"json",
},
&listOnlyotherUserMachineUser,
)
assert.Nil(t, err)
assert.Len(t, listOnlyotherUserMachineUser, 2)
assert.Equal(t, uint64(6), listOnlyotherUserMachineUser[0].Id)
assert.Equal(t, uint64(7), listOnlyotherUserMachineUser[1].Id)
assert.Equal(
t,
"otherUser-machine-1",
listOnlyotherUserMachineUser[0].Name,
)
assert.Equal(
t,
"otherUser-machine-2",
listOnlyotherUserMachineUser[1].Name,
)
// Delete a machines
_, err = headscale.Execute(
[]string{
"headscale",
"nodes",
"delete",
"--identifier",
// Delete the last added machine
"4",
"--output",
"json",
"--force",
},
)
assert.Nil(t, err)
// Test: list main user after machine is deleted
var listOnlyMachineUserAfterDelete []v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--user",
"machine-user",
"--output",
"json",
},
&listOnlyMachineUserAfterDelete,
)
assert.Nil(t, err)
assert.Len(t, listOnlyMachineUserAfterDelete, 4)
err = scenario.Shutdown()
assert.NoError(t, err)
}
func TestNodeExpireCommand(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
assert.NoError(t, err)
spec := map[string]int{
"machine-expire-user": 0,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err)
headscale, err := scenario.Headscale()
assert.NoError(t, err)
// Randomly generated machine keys
machineKeys := []string{
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
"nodekey:6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
"nodekey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
"nodekey:8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
"nodekey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
}
machines := make([]*v1.Machine, len(machineKeys))
for index, machineKey := range machineKeys {
_, err := headscale.Execute(
[]string{
"headscale",
"debug",
"create-node",
"--name",
fmt.Sprintf("machine-%d", index+1),
"--user",
"machine-expire-user",
"--key",
machineKey,
"--output",
"json",
},
)
assert.Nil(t, err)
var machine v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"--user",
"machine-expire-user",
"register",
"--key",
machineKey,
"--output",
"json",
},
&machine,
)
assert.Nil(t, err)
machines[index] = &machine
}
assert.Len(t, machines, len(machineKeys))
var listAll []v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&listAll,
)
assert.Nil(t, err)
assert.Len(t, listAll, 5)
assert.True(t, listAll[0].Expiry.AsTime().IsZero())
assert.True(t, listAll[1].Expiry.AsTime().IsZero())
assert.True(t, listAll[2].Expiry.AsTime().IsZero())
assert.True(t, listAll[3].Expiry.AsTime().IsZero())
assert.True(t, listAll[4].Expiry.AsTime().IsZero())
for idx := 0; idx < 3; idx++ {
_, err := headscale.Execute(
[]string{
"headscale",
"nodes",
"expire",
"--identifier",
fmt.Sprintf("%d", listAll[idx].Id),
},
)
assert.Nil(t, err)
}
var listAllAfterExpiry []v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&listAllAfterExpiry,
)
assert.Nil(t, err)
assert.Len(t, listAllAfterExpiry, 5)
assert.True(t, listAllAfterExpiry[0].Expiry.AsTime().Before(time.Now()))
assert.True(t, listAllAfterExpiry[1].Expiry.AsTime().Before(time.Now()))
assert.True(t, listAllAfterExpiry[2].Expiry.AsTime().Before(time.Now()))
assert.True(t, listAllAfterExpiry[3].Expiry.AsTime().IsZero())
assert.True(t, listAllAfterExpiry[4].Expiry.AsTime().IsZero())
err = scenario.Shutdown()
assert.NoError(t, err)
}
func TestNodeRenameCommand(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
assert.NoError(t, err)
spec := map[string]int{
"machine-rename-command": 0,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err)
headscale, err := scenario.Headscale()
assert.NoError(t, err)
// Randomly generated machine keys
machineKeys := []string{
"nodekey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
"nodekey:8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
"nodekey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
"nodekey:6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
}
machines := make([]*v1.Machine, len(machineKeys))
assert.Nil(t, err)
for index, machineKey := range machineKeys {
_, err := headscale.Execute(
[]string{
"headscale",
"debug",
"create-node",
"--name",
fmt.Sprintf("machine-%d", index+1),
"--user",
"machine-rename-command",
"--key",
machineKey,
"--output",
"json",
},
)
assert.Nil(t, err)
var machine v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"--user",
"machine-rename-command",
"register",
"--key",
machineKey,
"--output",
"json",
},
&machine,
)
assert.Nil(t, err)
machines[index] = &machine
}
assert.Len(t, machines, len(machineKeys))
var listAll []v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&listAll,
)
assert.Nil(t, err)
assert.Len(t, listAll, 5)
assert.Contains(t, listAll[0].GetGivenName(), "machine-1")
assert.Contains(t, listAll[1].GetGivenName(), "machine-2")
assert.Contains(t, listAll[2].GetGivenName(), "machine-3")
assert.Contains(t, listAll[3].GetGivenName(), "machine-4")
assert.Contains(t, listAll[4].GetGivenName(), "machine-5")
for idx := 0; idx < 3; idx++ {
_, err := headscale.Execute(
[]string{
"headscale",
"nodes",
"rename",
"--identifier",
fmt.Sprintf("%d", listAll[idx].Id),
fmt.Sprintf("newmachine-%d", idx+1),
},
)
assert.Nil(t, err)
}
var listAllAfterRename []v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&listAllAfterRename,
)
assert.Nil(t, err)
assert.Len(t, listAllAfterRename, 5)
assert.Equal(t, "newmachine-1", listAllAfterRename[0].GetGivenName())
assert.Equal(t, "newmachine-2", listAllAfterRename[1].GetGivenName())
assert.Equal(t, "newmachine-3", listAllAfterRename[2].GetGivenName())
assert.Contains(t, listAllAfterRename[3].GetGivenName(), "machine-4")
assert.Contains(t, listAllAfterRename[4].GetGivenName(), "machine-5")
// Test failure for too long names
result, err := headscale.Execute(
[]string{
"headscale",
"nodes",
"rename",
"--identifier",
fmt.Sprintf("%d", listAll[4].Id),
"testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine12345678901234567890",
},
)
assert.Nil(t, err)
assert.Contains(t, result, "not be over 63 chars")
var listAllAfterRenameAttempt []v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&listAllAfterRenameAttempt,
)
assert.Nil(t, err)
assert.Len(t, listAllAfterRenameAttempt, 5)
assert.Equal(t, "newmachine-1", listAllAfterRenameAttempt[0].GetGivenName())
assert.Equal(t, "newmachine-2", listAllAfterRenameAttempt[1].GetGivenName())
assert.Equal(t, "newmachine-3", listAllAfterRenameAttempt[2].GetGivenName())
assert.Contains(t, listAllAfterRenameAttempt[3].GetGivenName(), "machine-4")
assert.Contains(t, listAllAfterRenameAttempt[4].GetGivenName(), "machine-5")
err = scenario.Shutdown()
assert.NoError(t, err)
}
func TestNodeMoveCommand(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
assert.NoError(t, err)
spec := map[string]int{
"old-user": 0,
"new-user": 0,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err)
headscale, err := scenario.Headscale()
assert.NoError(t, err)
// Randomly generated machine key
machineKey := "nodekey:688411b767663479632d44140f08a9fde87383adc7cdeb518f62ce28a17ef0aa"
_, err = headscale.Execute(
[]string{
"headscale",
"debug",
"create-node",
"--name",
"nomad-machine",
"--user",
"old-user",
"--key",
machineKey,
"--output",
"json",
},
)
assert.Nil(t, err)
var machine v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"--user",
"old-user",
"register",
"--key",
machineKey,
"--output",
"json",
},
&machine,
)
assert.Nil(t, err)
assert.Equal(t, uint64(1), machine.Id)
assert.Equal(t, "nomad-machine", machine.Name)
assert.Equal(t, machine.User.Name, "old-user")
machineID := fmt.Sprintf("%d", machine.Id)
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"move",
"--identifier",
machineID,
"--user",
"new-user",
"--output",
"json",
},
&machine,
)
assert.Nil(t, err)
assert.Equal(t, machine.User.Name, "new-user")
var allNodes []v1.Machine
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&allNodes,
)
assert.Nil(t, err)
assert.Len(t, allNodes, 1)
assert.Equal(t, allNodes[0].Id, machine.Id)
assert.Equal(t, allNodes[0].User, machine.User)
assert.Equal(t, allNodes[0].User.Name, "new-user")
moveToNonExistingNSResult, err := headscale.Execute(
[]string{
"headscale",
"nodes",
"move",
"--identifier",
machineID,
"--user",
"non-existing-user",
"--output",
"json",
},
)
assert.Nil(t, err)
assert.Contains(
t,
moveToNonExistingNSResult,
"User not found",
)
assert.Equal(t, machine.User.Name, "new-user")
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"move",
"--identifier",
machineID,
"--user",
"old-user",
"--output",
"json",
},
&machine,
)
assert.Nil(t, err)
assert.Equal(t, machine.User.Name, "old-user")
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"move",
"--identifier",
machineID,
"--user",
"old-user",
"--output",
"json",
},
&machine,
)
assert.Nil(t, err)
assert.Equal(t, machine.User.Name, "old-user")
err = scenario.Shutdown()
assert.NoError(t, err)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,271 +0,0 @@
// nolint
package headscale
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/netip"
"os"
"strconv"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
)
const (
headscaleNetwork = "headscale-test"
headscaleHostname = "headscale"
DOCKER_EXECUTE_TIMEOUT = 10 * time.Second
)
var (
errEnvVarEmpty = errors.New("getenv: environment variable empty")
IpPrefix4 = netip.MustParsePrefix("100.64.0.0/10")
IpPrefix6 = netip.MustParsePrefix("fd7a:115c:a1e0::/48")
tailscaleVersions = []string{
"head",
"unstable",
"1.38.4",
"1.36.2",
"1.34.2",
"1.32.3",
"1.30.2",
"1.28.0",
"1.26.2",
"1.24.2",
"1.22.2",
"1.20.4",
"1.18.2",
"1.16.2",
"1.14.3",
"1.12.3",
}
)
type ExecuteCommandConfig struct {
timeout time.Duration
}
type ExecuteCommandOption func(*ExecuteCommandConfig) error
func ExecuteCommandTimeout(timeout time.Duration) ExecuteCommandOption {
return ExecuteCommandOption(func(conf *ExecuteCommandConfig) error {
conf.timeout = timeout
return nil
})
}
func ExecuteCommand(
resource *dockertest.Resource,
cmd []string,
env []string,
options ...ExecuteCommandOption,
) (string, string, error) {
var stdout bytes.Buffer
var stderr bytes.Buffer
execConfig := ExecuteCommandConfig{
timeout: DOCKER_EXECUTE_TIMEOUT,
}
for _, opt := range options {
if err := opt(&execConfig); err != nil {
return "", "", fmt.Errorf("execute-command/options: %w", err)
}
}
type result struct {
exitCode int
err error
}
resultChan := make(chan result, 1)
// Run your long running function in it's own goroutine and pass back it's
// response into our channel.
go func() {
exitCode, err := resource.Exec(
cmd,
dockertest.ExecOptions{
Env: append(env, "HEADSCALE_LOG_LEVEL=disabled"),
StdOut: &stdout,
StdErr: &stderr,
},
)
resultChan <- result{exitCode, err}
}()
// Listen on our channel AND a timeout channel - which ever happens first.
select {
case res := <-resultChan:
if res.err != nil {
return stdout.String(), stderr.String(), res.err
}
if res.exitCode != 0 {
fmt.Println("Command: ", cmd)
fmt.Println("stdout: ", stdout.String())
fmt.Println("stderr: ", stderr.String())
return stdout.String(), stderr.String(), fmt.Errorf(
"command failed with: %s",
stderr.String(),
)
}
return stdout.String(), stderr.String(), nil
case <-time.After(execConfig.timeout):
return stdout.String(), stderr.String(), fmt.Errorf(
"command timed out after %s",
execConfig.timeout,
)
}
}
func DockerRestartPolicy(config *docker.HostConfig) {
// set AutoRemove to true so that stopped container goes away by itself on error *immediately*.
// when set to false, containers remain until the end of the integration test.
config.AutoRemove = false
config.RestartPolicy = docker.RestartPolicy{
Name: "no",
}
}
func DockerAllowLocalIPv6(config *docker.HostConfig) {
if config.Sysctls == nil {
config.Sysctls = make(map[string]string, 1)
}
config.Sysctls["net.ipv6.conf.all.disable_ipv6"] = "0"
}
func DockerAllowNetworkAdministration(config *docker.HostConfig) {
config.CapAdd = append(config.CapAdd, "NET_ADMIN")
config.Mounts = append(config.Mounts, docker.HostMount{
Type: "bind",
Source: "/dev/net/tun",
Target: "/dev/net/tun",
})
}
func getDockerBuildOptions(version string) *dockertest.BuildOptions {
var tailscaleBuildOptions *dockertest.BuildOptions
switch version {
case "head":
tailscaleBuildOptions = &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale-HEAD",
ContextDir: ".",
BuildArgs: []docker.BuildArg{},
}
case "unstable":
tailscaleBuildOptions = &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale",
ContextDir: ".",
BuildArgs: []docker.BuildArg{
{
Name: "TAILSCALE_VERSION",
Value: "*", // Installs the latest version https://askubuntu.com/a/824926
},
{
Name: "TAILSCALE_CHANNEL",
Value: "unstable",
},
},
}
default:
tailscaleBuildOptions = &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale",
ContextDir: ".",
BuildArgs: []docker.BuildArg{
{
Name: "TAILSCALE_VERSION",
Value: version,
},
{
Name: "TAILSCALE_CHANNEL",
Value: "stable",
},
},
}
}
return tailscaleBuildOptions
}
func getDNSNames(
headscale *dockertest.Resource,
) ([]string, error) {
listAllResult, _, err := ExecuteCommand(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
[]string{},
)
if err != nil {
return nil, err
}
var listAll []v1.Machine
err = json.Unmarshal([]byte(listAllResult), &listAll)
if err != nil {
return nil, err
}
hostnames := make([]string, len(listAll))
for index := range listAll {
hostnames[index] = listAll[index].GetGivenName()
}
return hostnames, nil
}
func GetEnvStr(key string) (string, error) {
v := os.Getenv(key)
if v == "" {
return v, errEnvVarEmpty
}
return v, nil
}
func GetEnvBool(key string) (bool, error) {
s, err := GetEnvStr(key)
if err != nil {
return false, err
}
v, err := strconv.ParseBool(s)
if err != nil {
return false, err
}
return v, nil
}
func GetFirstOrCreateNetwork(pool *dockertest.Pool, name string) (dockertest.Network, error) {
networks, err := pool.NetworksByName(name)
if err != nil || len(networks) == 0 {
if _, err := pool.CreateNetwork(name); err == nil {
// Create does not give us an updated version of the resource, so we need to
// get it again.
networks, err := pool.NetworksByName(name)
if err != nil {
return dockertest.Network{}, err
}
return networks[0], nil
}
}
return networks[0], nil
}

View File

@ -1,3 +0,0 @@
derp.yaml
*.sqlite
*.sqlite3

View File

@ -1,56 +0,0 @@
acl_policy_path: ""
cli:
insecure: false
timeout: 5s
db_path: /tmp/integration_test_db.sqlite3
db_ssl: false
db_type: sqlite3
derp:
auto_update_enabled: false
server:
enabled: false
stun:
enabled: true
update_frequency: 1m
urls:
- https://controlplane.tailscale.com/derpmap/default
dns_config:
override_local_dns: true
base_domain: headscale.net
domains: []
magic_dns: true
nameservers:
- 127.0.0.11
- 1.1.1.1
ephemeral_node_inactivity_timeout: 30m
node_update_check_interval: 10s
grpc_allow_insecure: false
grpc_listen_addr: :50443
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
listen_addr: 0.0.0.0:18080
log:
level: disabled
format: text
logtail:
enabled: false
metrics_listen_addr: 127.0.0.1:19090
oidc:
expiry: 180d
only_start_if_oidc_is_available: true
scope:
- openid
- profile
- email
strip_email_domain: true
use_expiry_from_token: false
private_key_path: private.key
noise:
private_key_path: noise_private.key
server_url: http://headscale:18080
tls_letsencrypt_cache_dir: /var/www/.cache
tls_letsencrypt_challenge_type: HTTP-01
unix_socket: /var/run/headscale.sock
unix_socket_permission: "0o770"
randomize_client_port: false

View File

@ -1,31 +0,0 @@
log:
level: trace
acl_policy_path: ""
db_type: sqlite3
ephemeral_node_inactivity_timeout: 30m
node_update_check_interval: 10s
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
dns_config:
override_local_dns: true
base_domain: headscale.net
magic_dns: true
domains: []
nameservers:
- 127.0.0.11
- 1.1.1.1
db_path: /tmp/integration_test_db.sqlite3
db_ssl: false
private_key_path: private.key
noise:
private_key_path: noise_private.key
listen_addr: 0.0.0.0:18080
metrics_listen_addr: 127.0.0.1:19090
server_url: http://headscale:18080
derp:
urls:
- https://controlplane.tailscale.com/derpmap/default
auto_update_enabled: false
update_frequency: 1m

View File

@ -1,55 +0,0 @@
acl_policy_path: ""
cli:
insecure: false
timeout: 5s
db_path: /tmp/integration_test_db.sqlite3
db_ssl: false
db_type: sqlite3
derp:
auto_update_enabled: false
server:
enabled: false
stun:
enabled: true
update_frequency: 1m
urls:
- https://controlplane.tailscale.com/derpmap/default
dns_config:
override_local_dns: true
base_domain: headscale.net
domains: []
magic_dns: true
nameservers:
- 1.1.1.1
ephemeral_node_inactivity_timeout: 30m
node_update_check_interval: 30s
grpc_allow_insecure: false
grpc_listen_addr: :50443
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
listen_addr: 0.0.0.0:18080
log:
level: disabled
format: text
logtail:
enabled: false
metrics_listen_addr: 127.0.0.1:19090
oidc:
expiry: 180d
only_start_if_oidc_is_available: true
scope:
- openid
- profile
- email
strip_email_domain: true
use_expiry_from_token: false
private_key_path: private.key
noise:
private_key_path: noise_private.key
server_url: http://headscale:18080
tls_letsencrypt_cache_dir: /var/www/.cache
tls_letsencrypt_challenge_type: HTTP-01
unix_socket: /var/run/headscale.sock
unix_socket_permission: "0o770"
randomize_client_port: false

View File

@ -1,30 +0,0 @@
log:
level: trace
acl_policy_path: ""
db_type: sqlite3
ephemeral_node_inactivity_timeout: 30m
node_update_check_interval: 30s
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
dns_config:
override_local_dns: true
base_domain: headscale.net
magic_dns: true
domains: []
nameservers:
- 1.1.1.1
db_path: /tmp/integration_test_db.sqlite3
db_ssl: false
private_key_path: private.key
noise:
private_key_path: noise_private.key
listen_addr: 0.0.0.0:18080
metrics_listen_addr: 127.0.0.1:19090
server_url: http://headscale:18080
derp:
urls:
- https://controlplane.tailscale.com/derpmap/default
auto_update_enabled: false
update_frequency: 1m

View File

@ -1,56 +0,0 @@
acl_policy_path: ""
cli:
insecure: false
timeout: 5s
db_path: /tmp/integration_test_db.sqlite3
db_ssl: false
db_type: sqlite3
derp:
auto_update_enabled: false
server:
enabled: false
stun:
enabled: true
update_frequency: 1m
urls:
- https://controlplane.tailscale.com/derpmap/default
dns_config:
override_local_dns: true
base_domain: headscale.net
domains: []
magic_dns: true
nameservers:
- 127.0.0.11
- 1.1.1.1
ephemeral_node_inactivity_timeout: 30m
node_update_check_interval: 10s
grpc_allow_insecure: false
grpc_listen_addr: :50443
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
listen_addr: 0.0.0.0:8080
log:
format: text
level: disabled
logtail:
enabled: false
metrics_listen_addr: 127.0.0.1:9090
oidc:
expiry: 180d
only_start_if_oidc_is_available: true
scope:
- openid
- profile
- email
strip_email_domain: true
use_expiry_from_token: false
private_key_path: private.key
noise:
private_key_path: noise_private.key
server_url: http://headscale:8080
tls_letsencrypt_cache_dir: /var/www/.cache
tls_letsencrypt_challenge_type: HTTP-01
unix_socket: /var/run/headscale.sock
unix_socket_permission: "0o770"
randomize_client_port: false

View File

@ -1,54 +0,0 @@
acl_policy_path: ""
cli:
insecure: false
timeout: 5s
db_path: /tmp/integration_test_db.sqlite3
db_type: sqlite3
derp:
auto_update_enabled: false
server:
enabled: false
stun:
enabled: true
update_frequency: 1m
urls:
- https://controlplane.tailscale.com/derpmap/default
dns_config:
base_domain: headscale.net
domains: []
magic_dns: true
nameservers:
- 127.0.0.11
- 1.1.1.1
override_local_dns: true
ephemeral_node_inactivity_timeout: 30m
grpc_allow_insecure: false
grpc_listen_addr: :50443
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
listen_addr: 0.0.0.0:18080
log:
format: text
level: disabled
logtail:
enabled: false
metrics_listen_addr: 127.0.0.1:19090
node_update_check_interval: 10s
noise:
private_key_path: noise_private.key
oidc:
only_start_if_oidc_is_available: true
scope:
- openid
- profile
- email
strip_email_domain: true
private_key_path: private.key
randomize_client_port: false
server_url: http://headscale:18080
tls_client_auth_mode: relaxed
tls_letsencrypt_cache_dir: /var/www/.cache
tls_letsencrypt_challenge_type: HTTP-01
unix_socket: /var/run/headscale.sock
unix_socket_permission: "0o770"