diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7f715ca7..37c55a04 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -49,6 +49,7 @@ jobs: ${{ secrets.DOCKERHUB_USERNAME }}/headscale ghcr.io/${{ github.repository_owner }}/headscale tags: | + type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}} type=sha diff --git a/Dockerfile b/Dockerfile index 9499af22..20bb7dae 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:latest AS build +FROM golang:1.17.1-bullseye AS build ENV GOPATH /go COPY go.mod go.sum /go/src/headscale/ diff --git a/Dockerfile.tailscale b/Dockerfile.tailscale index c6830f8a..df8cdcb1 100644 --- a/Dockerfile.tailscale +++ b/Dockerfile.tailscale @@ -1,9 +1,11 @@ FROM ubuntu:latest +ARG TAILSCALE_VERSION + RUN apt-get update \ && apt-get install -y gnupg curl \ && curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.gpg | apt-key add - \ && curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \ && apt-get update \ - && apt-get install -y tailscale \ + && apt-get install -y tailscale=${TAILSCALE_VERSION} \ && rm -rf /var/lib/apt/lists/* diff --git a/README.md b/README.md index c4ff7bc2..a57900a0 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ Headscale implements this coordination server. - [x] Base functionality (nodes can communicate with each other) - [x] Node registration through the web flow -- [x] Network changes are relied to the nodes +- [x] Network changes are relayed to the nodes - [x] Namespace support (~equivalent to multi-user in Tailscale.com) - [x] Routing (advertise & accept, including exit nodes) - [x] Node registration via pre-auth keys (including reusable keys, and ephemeral node support) @@ -26,7 +26,7 @@ Headscale implements this coordination server. - [X] ACLs - [X] Support for alternative IP ranges in the tailnets (default Tailscale's 100.64.0.0/10) - [X] DNS (passing DNS servers to nodes) -- [X] Share nodes between ~~users~~ namespaces +- [X] Share nodes between ~~users~~ namespaces - [ ] MagicDNS / Smart DNS @@ -40,74 +40,87 @@ Suggestions/PRs welcomed! 1. Download the Headscale binary https://github.com/juanfont/headscale/releases, and place it somewhere in your PATH or use the docker container - ```shell - docker pull headscale/headscale:x.x.x - ``` - + ```shell + docker pull headscale/headscale:x.x.x + ``` + 2. (Optional, you can also use SQLite) Get yourself a PostgreSQL DB running - ```shell - docker run --name headscale -e POSTGRES_DB=headscale -e \ - POSTGRES_USER=foo -e POSTGRES_PASSWORD=bar -p 5432:5432 -d postgres - ``` + ```shell + docker run --name headscale -e POSTGRES_DB=headscale -e \ + POSTGRES_USER=foo -e POSTGRES_PASSWORD=bar -p 5432:5432 -d postgres + ``` 3. Set some stuff up (headscale Wireguard keys & the config.json file) - ```shell - wg genkey > private.key - wg pubkey < private.key > public.key # not needed + ```shell + wg genkey > private.key + wg pubkey < private.key > public.key # not needed - # Postgres - cp config.json.postgres.example config.json - # or - # SQLite - cp config.json.sqlite.example config.json - ``` + # Postgres + cp config.json.postgres.example config.json + # or + # SQLite + cp config.json.sqlite.example config.json + ``` 4. Create a namespace (a namespace is a 'tailnet', a group of Tailscale nodes that can talk to each other) - ```shell - headscale namespaces create myfirstnamespace - ``` - or docker: - ```shell - docker run -v ./private.key:/private.key -v ./config.json:/config.json headscale/headscale:x.x.x headscale namespace create myfirstnamespace - ``` + ```shell + headscale namespaces create myfirstnamespace + ``` + or docker: + + the db.sqlite mount is only needed if you use sqlite + ```shell + touch db.sqlite + docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derp.yaml:/derp.yaml -v $(pwd)/db.sqlite:/db.sqlite -p 127.0.0.1:8000:8000 headscale/headscale:x.x.x headscale namespaces create myfirstnamespace + ``` + or if your server is already running in docker: + ```shell + docker exec headscale create myfirstnamespace + ``` 5. Run the server - ```shell - headscale serve - ``` - or docker: - ```shell - docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derb.yaml:/derb.yaml -p 127.0.0.1:8080:8080 headscale/headscale:x.x.x headscale serve - ``` + ```shell + headscale serve + ``` + or docker: -6. If you used tailscale.com before in your nodes, make sure you clear the tailscaled data folder - ```shell - systemctl stop tailscaled - rm -fr /var/lib/tailscale - systemctl start tailscaled - ``` + the db.sqlite mount is only needed if you use sqlite + ```shell + docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derp.yaml:/derp.yaml -v $(pwd)/db.sqlite:/db.sqlite -p 127.0.0.1:8000:8000 headscale/headscale:x.x.x headscale serve + ``` + +6. If you used tailscale.com before in your nodes, make sure you clear the tailscald data folder + ```shell + systemctl stop tailscaled + rm -fr /var/lib/tailscale + systemctl start tailscaled + ``` 7. Add your first machine - ```shell - tailscale up -login-server YOUR_HEADSCALE_URL - ``` + ```shell + tailscale up -login-server YOUR_HEADSCALE_URL + ``` 8. Navigate to the URL you will get with `tailscale up`, where you'll find your machine key. 9. In the server, register your machine to a namespace with the CLI - ```shell - headscale -n myfirstnamespace node register YOURMACHINEKEY - ``` - or docker: - ```shell - docker run -v ./private.key:/private.key -v ./config.json:/config.json headscale/headscale:x.x.x headscale -n myfirstnamespace node register YOURMACHINEKEY - ``` + ```shell + headscale -n myfirstnamespace node register YOURMACHINEKEY + ``` + or docker: + ```shell + docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derp.yaml:/derp.yaml headscale/headscale:x.x.x headscale -n myfirstnamespace node register YOURMACHINEKEY + ``` + or if your server is already running in docker: + ```shell + docker exec headscale -n myfistnamespace node register YOURMACHINEKEY + ``` Alternatively, you can use Auth Keys to register your machines: @@ -115,15 +128,19 @@ Alternatively, you can use Auth Keys to register your machines: ```shell headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h ``` - or docker: - ```shell - docker run -v ./private.key:/private.key -v ./config.json:/config.json headscale/headscale:x.x.x headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h - ``` + or docker: + ```shell + docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v$(pwd)/derp.yaml:/derp.yaml -v $(pwd)/db.sqlite:/db.sqlite headscale/headscale:x.x.x headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h + ``` + or if your server is already running in docker: + ```shell + docker exec headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h + ``` 2. Use the authkey from your machine to register it - ```shell - tailscale up -login-server YOUR_HEADSCALE_URL --authkey YOURAUTHKEY - ``` + ```shell + tailscale up -login-server YOUR_HEADSCALE_URL --authkey YOURAUTHKEY + ``` If you create an authkey with the `--ephemeral` flag, that key will create ephemeral nodes. This implies that `--reusable` is true. @@ -206,7 +223,7 @@ Alternatively, `tls_letsencrypt_challenge_type` can be set to `TLS-ALPN-01`. In ### Policy ACLs -Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment. +Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment. For instance, instead of referring to users when defining groups you must use namespaces (which are the equivalent to user/logins in Tailscale.com). diff --git a/integration_test.go b/integration_test.go index 8cdc1918..4b28b148 100644 --- a/integration_test.go +++ b/integration_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package headscale @@ -5,6 +6,7 @@ package headscale import ( "bytes" "context" + "encoding/json" "fmt" "io/ioutil" "log" @@ -19,34 +21,60 @@ import ( "github.com/ory/dockertest/v3/docker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" + "tailscale.com/ipn/ipnstate" "inet.af/netaddr" ) -var integrationTmpDir string -var ih Headscale +var ( + integrationTmpDir string + ih Headscale +) -var pool dockertest.Pool -var network dockertest.Network -var headscale dockertest.Resource -var tailscaleCount int = 25 -var tailscales map[string]dockertest.Resource +var ( + pool dockertest.Pool + network dockertest.Network + headscale dockertest.Resource +) + +var tailscaleVersions = []string{"1.14.3", "1.12.3"} + +type TestNamespace struct { + count int + tailscales map[string]dockertest.Resource +} type IntegrationTestSuite struct { suite.Suite stats *suite.SuiteInformation + + namespaces map[string]TestNamespace } func TestIntegrationTestSuite(t *testing.T) { s := new(IntegrationTestSuite) + + s.namespaces = map[string]TestNamespace{ + "main": { + count: 20, + tailscales: make(map[string]dockertest.Resource), + }, + "shared": { + count: 5, + tailscales: make(map[string]dockertest.Resource), + }, + } + suite.Run(t, s) // HandleStats, which allows us to check if we passed and save logs // is called after TearDown, so we cannot tear down containers before // we have potentially saved the logs. - for _, tailscale := range tailscales { - if err := pool.Purge(&tailscale); err != nil { - log.Printf("Could not purge resource: %s\n", err) + for _, scales := range s.namespaces { + for _, tailscale := range scales.tailscales { + if err := pool.Purge(&tailscale); err != nil { + log.Printf("Could not purge resource: %s\n", err) + } } } @@ -119,12 +147,12 @@ func saveLog(resource *dockertest.Resource, basePath string) error { fmt.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath) - err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stdout.log"), []byte(stdout.String()), 0644) + err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stdout.log"), []byte(stdout.String()), 0o644) if err != nil { return err } - err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stderr.log"), []byte(stdout.String()), 0644) + err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stderr.log"), []byte(stdout.String()), 0o644) if err != nil { return err } @@ -140,6 +168,32 @@ func dockerRestartPolicy(config *docker.HostConfig) { } } +func tailscaleContainer(namespace, identifier, version string) (string, *dockertest.Resource) { + tailscaleBuildOptions := &dockertest.BuildOptions{ + Dockerfile: "Dockerfile.tailscale", + ContextDir: ".", + BuildArgs: []docker.BuildArg{ + { + Name: "TAILSCALE_VERSION", + Value: version, + }, + }, + } + hostname := fmt.Sprintf("%s-tailscale-%s-%s", namespace, strings.Replace(version, ".", "-", -1), identifier) + tailscaleOptions := &dockertest.RunOptions{ + Name: hostname, + Networks: []*dockertest.Network{&network}, + Cmd: []string{"tailscaled", "--tun=userspace-networking", "--socks5-server=localhost:1055"}, + } + + pts, err := pool.BuildAndRunWithBuildOptions(tailscaleBuildOptions, tailscaleOptions, dockerRestartPolicy) + if err != nil { + log.Fatalf("Could not start resource: %s", err) + } + fmt.Printf("Created %s container\n", hostname) + return hostname, pts +} + func (s *IntegrationTestSuite) SetupSuite() { var err error h = Headscale{ @@ -164,11 +218,6 @@ func (s *IntegrationTestSuite) SetupSuite() { ContextDir: ".", } - tailscaleBuildOptions := &dockertest.BuildOptions{ - Dockerfile: "Dockerfile.tailscale", - ContextDir: ".", - } - currentPath, err := os.Getwd() if err != nil { log.Fatalf("Could not determine current path: %s", err) @@ -183,7 +232,7 @@ func (s *IntegrationTestSuite) SetupSuite() { Networks: []*dockertest.Network{&network}, Cmd: []string{"headscale", "serve"}, PortBindings: map[docker.Port][]docker.PortBinding{ - "8080/tcp": []docker.PortBinding{{HostPort: "8080"}}, + "8080/tcp": {{HostPort: "8080"}}, }, } @@ -196,21 +245,13 @@ func (s *IntegrationTestSuite) SetupSuite() { fmt.Println("Created headscale container") fmt.Println("Creating tailscale containers") - tailscales = make(map[string]dockertest.Resource) - for i := 0; i < tailscaleCount; i++ { - hostname := fmt.Sprintf("tailscale%d", i) - tailscaleOptions := &dockertest.RunOptions{ - Name: hostname, - Networks: []*dockertest.Network{&network}, - Cmd: []string{"tailscaled", "--tun=userspace-networking", "--socks5-server=localhost:1055"}, - } + for namespace, scales := range s.namespaces { + for i := 0; i < scales.count; i++ { + version := tailscaleVersions[i%len(tailscaleVersions)] - if pts, err := pool.BuildAndRunWithBuildOptions(tailscaleBuildOptions, tailscaleOptions, dockerRestartPolicy); err == nil { - tailscales[hostname] = *pts - } else { - log.Fatalf("Could not start resource: %s", err) + hostname, container := tailscaleContainer(namespace, fmt.Sprint(i), version) + scales.tailscales[hostname] = *container } - fmt.Printf("Created %s container\n", hostname) } fmt.Println("Waiting for headscale to be ready") @@ -231,35 +272,38 @@ func (s *IntegrationTestSuite) SetupSuite() { } fmt.Println("headscale container is ready") - fmt.Println("Creating headscale namespace") - result, err := executeCommand( - &headscale, - []string{"headscale", "namespaces", "create", "test"}, - ) - assert.Nil(s.T(), err) - - fmt.Println("Creating pre auth key") - authKey, err := executeCommand( - &headscale, - []string{"headscale", "-n", "test", "preauthkeys", "create", "--reusable", "--expiration", "24h"}, - ) - assert.Nil(s.T(), err) - - headscaleEndpoint := fmt.Sprintf("http://headscale:%s", headscale.GetPort("8080/tcp")) - - fmt.Printf("Joining tailscale containers to headscale at %s\n", headscaleEndpoint) - for hostname, tailscale := range tailscales { - command := []string{"tailscale", "up", "-login-server", headscaleEndpoint, "--authkey", strings.TrimSuffix(authKey, "\n"), "--hostname", hostname} - - fmt.Println("Join command:", command) - fmt.Printf("Running join command for %s\n", hostname) - result, err = executeCommand( - &tailscale, - command, + for namespace, scales := range s.namespaces { + fmt.Printf("Creating headscale namespace: %s\n", namespace) + result, err := executeCommand( + &headscale, + []string{"headscale", "namespaces", "create", namespace}, ) - fmt.Println("tailscale result: ", result) assert.Nil(s.T(), err) - fmt.Printf("%s joined\n", hostname) + fmt.Println("headscale create namespace result: ", result) + + fmt.Printf("Creating pre auth key for %s\n", namespace) + authKey, err := executeCommand( + &headscale, + []string{"headscale", "--namespace", namespace, "preauthkeys", "create", "--reusable", "--expiration", "24h"}, + ) + assert.Nil(s.T(), err) + + headscaleEndpoint := fmt.Sprintf("http://headscale:%s", headscale.GetPort("8080/tcp")) + + fmt.Printf("Joining tailscale containers to headscale at %s\n", headscaleEndpoint) + for hostname, tailscale := range scales.tailscales { + command := []string{"tailscale", "up", "-login-server", headscaleEndpoint, "--authkey", strings.TrimSuffix(authKey, "\n"), "--hostname", hostname} + + fmt.Println("Join command:", command) + fmt.Printf("Running join command for %s\n", hostname) + result, err := executeCommand( + &tailscale, + command, + ) + fmt.Println("tailscale result: ", result) + assert.Nil(s.T(), err) + fmt.Printf("%s joined\n", hostname) + } } // The nodes need a bit of time to get their updated maps from headscale @@ -275,109 +319,217 @@ func (s *IntegrationTestSuite) HandleStats(suiteName string, stats *suite.SuiteI } func (s *IntegrationTestSuite) TestListNodes() { - fmt.Println("Listing nodes") - result, err := executeCommand( - &headscale, - []string{"headscale", "-n", "test", "nodes", "list"}, - ) - assert.Nil(s.T(), err) + for namespace, scales := range s.namespaces { + fmt.Println("Listing nodes") + result, err := executeCommand( + &headscale, + []string{"headscale", "--namespace", namespace, "nodes", "list"}, + ) + assert.Nil(s.T(), err) - fmt.Printf("List nodes: \n%s\n", result) + fmt.Printf("List nodes: \n%s\n", result) - // Chck that the correct count of host is present in node list - lines := strings.Split(result, "\n") - assert.Equal(s.T(), len(tailscales), len(lines)-2) + // Chck that the correct count of host is present in node list + lines := strings.Split(result, "\n") + assert.Equal(s.T(), len(scales.tailscales), len(lines)-2) - for hostname, _ := range tailscales { - assert.Contains(s.T(), result, hostname) + for hostname := range scales.tailscales { + assert.Contains(s.T(), result, hostname) + } } } func (s *IntegrationTestSuite) TestGetIpAddresses() { - ipPrefix := netaddr.MustParseIPPrefix("100.64.0.0/10") - ips, err := getIPs() - assert.Nil(s.T(), err) + for _, scales := range s.namespaces { + ipPrefix := netaddr.MustParseIPPrefix("100.64.0.0/10") + ips, err := getIPs(scales.tailscales) + assert.Nil(s.T(), err) - for hostname, _ := range tailscales { - s.T().Run(hostname, func(t *testing.T) { - ip := ips[hostname] + for hostname := range scales.tailscales { + s.T().Run(hostname, func(t *testing.T) { + ip := ips[hostname] - fmt.Printf("IP for %s: %s\n", hostname, ip) + fmt.Printf("IP for %s: %s\n", hostname, ip) - // c.Assert(ip.Valid(), check.IsTrue) - assert.True(t, ip.Is4()) - assert.True(t, ipPrefix.Contains(ip)) + // c.Assert(ip.Valid(), check.IsTrue) + assert.True(t, ip.Is4()) + assert.True(t, ipPrefix.Contains(ip)) - ips[hostname] = ip - }) + ips[hostname] = ip + }) + } } } func (s *IntegrationTestSuite) TestStatus() { - ips, err := getIPs() - assert.Nil(s.T(), err) + for _, scales := range s.namespaces { + ips, err := getIPs(scales.tailscales) + assert.Nil(s.T(), err) - for hostname, tailscale := range tailscales { - s.T().Run(hostname, func(t *testing.T) { - command := []string{"tailscale", "status"} + for hostname, tailscale := range scales.tailscales { + s.T().Run(hostname, func(t *testing.T) { + command := []string{"tailscale", "status", "--json"} - fmt.Printf("Getting status for %s\n", hostname) - result, err := executeCommand( - &tailscale, - command, - ) - assert.Nil(t, err) - // fmt.Printf("Status for %s: %s", hostname, result) + fmt.Printf("Getting status for %s\n", hostname) + result, err := executeCommand( + &tailscale, + command, + ) + assert.Nil(t, err) - // Check if we have as many nodes in status - // as we have IPs/tailscales - lines := strings.Split(result, "\n") - assert.Equal(t, len(ips), len(lines)-1) - assert.Equal(t, len(tailscales), len(lines)-1) + var status ipnstate.Status + err = json.Unmarshal([]byte(result), &status) + assert.Nil(s.T(), err) - // Check that all hosts is present in all hosts status - for ipHostname, ip := range ips { - assert.Contains(t, result, ip.String()) - assert.Contains(t, result, ipHostname) - } - }) - } -} + // TODO(kradalby): Replace this check with peer length of SAME namespace + // Check if we have as many nodes in status + // as we have IPs/tailscales + // lines := strings.Split(result, "\n") + // assert.Equal(t, len(ips), len(lines)-1) + // assert.Equal(t, len(scales.tailscales), len(lines)-1) -func (s *IntegrationTestSuite) TestPingAllPeers() { - ips, err := getIPs() - assert.Nil(s.T(), err) + peerIps := getIPsfromIPNstate(status) - for hostname, tailscale := range tailscales { - for peername, ip := range ips { - s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) { - // We currently cant ping ourselves, so skip that. - if peername != hostname { - // We are only interested in "direct ping" which means what we - // might need a couple of more attempts before reaching the node. - command := []string{ - "tailscale", "ping", - "--timeout=1s", - "--c=20", - "--until-direct=true", - ip.String(), + // Check that all hosts is present in all hosts status + for ipHostname, ip := range ips { + if hostname != ipHostname { + assert.Contains(t, peerIps, ip) } - - fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, ips[hostname], peername, ip) - result, err := executeCommand( - &tailscale, - command, - ) - assert.Nil(t, err) - fmt.Printf("Result for %s: %s\n", hostname, result) - assert.Contains(t, result, "pong") } }) } } } -func getIPs() (map[string]netaddr.IP, error) { +func getIPsfromIPNstate(status ipnstate.Status) []netaddr.IP { + ips := make([]netaddr.IP, 0) + + for _, peer := range status.Peer { + ips = append(ips, peer.TailscaleIPs...) + } + + return ips +} + +func (s *IntegrationTestSuite) TestPingAllPeers() { + for _, scales := range s.namespaces { + ips, err := getIPs(scales.tailscales) + assert.Nil(s.T(), err) + + for hostname, tailscale := range scales.tailscales { + for peername, ip := range ips { + s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) { + // We currently cant ping ourselves, so skip that. + if peername != hostname { + // We are only interested in "direct ping" which means what we + // might need a couple of more attempts before reaching the node. + command := []string{ + "tailscale", "ping", + "--timeout=1s", + "--c=20", + "--until-direct=true", + ip.String(), + } + + fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, ips[hostname], peername, ip) + result, err := executeCommand( + &tailscale, + command, + ) + assert.Nil(t, err) + fmt.Printf("Result for %s: %s\n", hostname, result) + assert.Contains(t, result, "pong") + } + }) + } + } + } +} + +func (s *IntegrationTestSuite) TestSharedNodes() { + main := s.namespaces["main"] + shared := s.namespaces["shared"] + + result, err := executeCommand( + &headscale, + []string{"headscale", "nodes", "list", "-o", "json", "--namespace", "shared"}, + ) + assert.Nil(s.T(), err) + + var machineList []Machine + err = json.Unmarshal([]byte(result), &machineList) + assert.Nil(s.T(), err) + + for _, machine := range machineList { + + result, err := executeCommand( + &headscale, + []string{"headscale", "nodes", "share", "--namespace", "shared", fmt.Sprint(machine.ID), "main"}, + ) + assert.Nil(s.T(), err) + + fmt.Println("Shared node with result: ", result) + } + + result, err = executeCommand( + &headscale, + []string{"headscale", "nodes", "list", "--namespace", "main"}, + ) + assert.Nil(s.T(), err) + fmt.Println("Nodelist after sharing", result) + + // Chck that the correct count of host is present in node list + lines := strings.Split(result, "\n") + assert.Equal(s.T(), len(main.tailscales)+len(shared.tailscales), len(lines)-2) + + for hostname := range main.tailscales { + assert.Contains(s.T(), result, hostname) + } + + for hostname := range shared.tailscales { + assert.Contains(s.T(), result, hostname) + } + + // TODO(kradalby): Figure out why these connections are not set up + // // TODO: See if we can have a more deterministic wait here. + // time.Sleep(100 * time.Second) + + // mainIps, err := getIPs(main.tailscales) + // assert.Nil(s.T(), err) + + // sharedIps, err := getIPs(shared.tailscales) + // assert.Nil(s.T(), err) + + // for hostname, tailscale := range main.tailscales { + // for peername, ip := range sharedIps { + // s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) { + // // We currently cant ping ourselves, so skip that. + // if peername != hostname { + // // We are only interested in "direct ping" which means what we + // // might need a couple of more attempts before reaching the node. + // command := []string{ + // "tailscale", "ping", + // "--timeout=1s", + // "--c=20", + // "--until-direct=true", + // ip.String(), + // } + + // fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, mainIps[hostname], peername, ip) + // result, err := executeCommand( + // &tailscale, + // command, + // ) + // assert.Nil(t, err) + // fmt.Printf("Result for %s: %s\n", hostname, result) + // assert.Contains(t, result, "pong") + // } + // }) + // } + // } +} + +func getIPs(tailscales map[string]dockertest.Resource) (map[string]netaddr.IP, error) { ips := make(map[string]netaddr.IP) for hostname, tailscale := range tailscales { command := []string{"tailscale", "ip"}