1
0
mirror of https://github.com/juanfont/headscale.git synced 2024-12-20 19:09:07 +01:00

Merge pull request #121 from juanfont/main

New integration test for tailscale 1.14
This commit is contained in:
Kristoffer Dalby 2021-09-23 14:51:26 +01:00 committed by GitHub
commit 7db91c68be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 368 additions and 196 deletions

View File

@ -49,6 +49,7 @@ jobs:
${{ secrets.DOCKERHUB_USERNAME }}/headscale ${{ secrets.DOCKERHUB_USERNAME }}/headscale
ghcr.io/${{ github.repository_owner }}/headscale ghcr.io/${{ github.repository_owner }}/headscale
tags: | tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}} type=semver,pattern={{major}}
type=sha type=sha

View File

@ -1,4 +1,4 @@
FROM golang:latest AS build FROM golang:1.17.1-bullseye AS build
ENV GOPATH /go ENV GOPATH /go
COPY go.mod go.sum /go/src/headscale/ COPY go.mod go.sum /go/src/headscale/

View File

@ -1,9 +1,11 @@
FROM ubuntu:latest FROM ubuntu:latest
ARG TAILSCALE_VERSION
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y gnupg curl \ && apt-get install -y gnupg curl \
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.gpg | apt-key add - \ && curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.gpg | apt-key add - \
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \ && curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
&& apt-get update \ && apt-get update \
&& apt-get install -y tailscale \ && apt-get install -y tailscale=${TAILSCALE_VERSION} \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*

139
README.md
View File

@ -18,7 +18,7 @@ Headscale implements this coordination server.
- [x] Base functionality (nodes can communicate with each other) - [x] Base functionality (nodes can communicate with each other)
- [x] Node registration through the web flow - [x] Node registration through the web flow
- [x] Network changes are relied to the nodes - [x] Network changes are relayed to the nodes
- [x] Namespace support (~equivalent to multi-user in Tailscale.com) - [x] Namespace support (~equivalent to multi-user in Tailscale.com)
- [x] Routing (advertise & accept, including exit nodes) - [x] Routing (advertise & accept, including exit nodes)
- [x] Node registration via pre-auth keys (including reusable keys, and ephemeral node support) - [x] Node registration via pre-auth keys (including reusable keys, and ephemeral node support)
@ -26,7 +26,7 @@ Headscale implements this coordination server.
- [X] ACLs - [X] ACLs
- [X] Support for alternative IP ranges in the tailnets (default Tailscale's 100.64.0.0/10) - [X] Support for alternative IP ranges in the tailnets (default Tailscale's 100.64.0.0/10)
- [X] DNS (passing DNS servers to nodes) - [X] DNS (passing DNS servers to nodes)
- [X] Share nodes between ~~users~~ namespaces - [X] Share nodes between ~~users~~ namespaces
- [ ] MagicDNS / Smart DNS - [ ] MagicDNS / Smart DNS
@ -40,74 +40,87 @@ Suggestions/PRs welcomed!
1. Download the Headscale binary https://github.com/juanfont/headscale/releases, and place it somewhere in your PATH or use the docker container 1. Download the Headscale binary https://github.com/juanfont/headscale/releases, and place it somewhere in your PATH or use the docker container
```shell ```shell
docker pull headscale/headscale:x.x.x docker pull headscale/headscale:x.x.x
``` ```
<!-- <!--
or or
```shell ```shell
docker pull ghrc.io/juanfont/headscale:x.x.x docker pull ghrc.io/juanfont/headscale:x.x.x
``` --> ``` -->
2. (Optional, you can also use SQLite) Get yourself a PostgreSQL DB running 2. (Optional, you can also use SQLite) Get yourself a PostgreSQL DB running
```shell ```shell
docker run --name headscale -e POSTGRES_DB=headscale -e \ docker run --name headscale -e POSTGRES_DB=headscale -e \
POSTGRES_USER=foo -e POSTGRES_PASSWORD=bar -p 5432:5432 -d postgres POSTGRES_USER=foo -e POSTGRES_PASSWORD=bar -p 5432:5432 -d postgres
``` ```
3. Set some stuff up (headscale Wireguard keys & the config.json file) 3. Set some stuff up (headscale Wireguard keys & the config.json file)
```shell ```shell
wg genkey > private.key wg genkey > private.key
wg pubkey < private.key > public.key # not needed wg pubkey < private.key > public.key # not needed
# Postgres # Postgres
cp config.json.postgres.example config.json cp config.json.postgres.example config.json
# or # or
# SQLite # SQLite
cp config.json.sqlite.example config.json cp config.json.sqlite.example config.json
``` ```
4. Create a namespace (a namespace is a 'tailnet', a group of Tailscale nodes that can talk to each other) 4. Create a namespace (a namespace is a 'tailnet', a group of Tailscale nodes that can talk to each other)
```shell ```shell
headscale namespaces create myfirstnamespace headscale namespaces create myfirstnamespace
``` ```
or docker: or docker:
```shell
docker run -v ./private.key:/private.key -v ./config.json:/config.json headscale/headscale:x.x.x headscale namespace create myfirstnamespace the db.sqlite mount is only needed if you use sqlite
``` ```shell
touch db.sqlite
docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derp.yaml:/derp.yaml -v $(pwd)/db.sqlite:/db.sqlite -p 127.0.0.1:8000:8000 headscale/headscale:x.x.x headscale namespaces create myfirstnamespace
```
or if your server is already running in docker:
```shell
docker exec <container_name> headscale create myfirstnamespace
```
5. Run the server 5. Run the server
```shell ```shell
headscale serve headscale serve
``` ```
or docker: or docker:
```shell
docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derb.yaml:/derb.yaml -p 127.0.0.1:8080:8080 headscale/headscale:x.x.x headscale serve
```
6. If you used tailscale.com before in your nodes, make sure you clear the tailscaled data folder the db.sqlite mount is only needed if you use sqlite
```shell ```shell
systemctl stop tailscaled docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derp.yaml:/derp.yaml -v $(pwd)/db.sqlite:/db.sqlite -p 127.0.0.1:8000:8000 headscale/headscale:x.x.x headscale serve
rm -fr /var/lib/tailscale ```
systemctl start tailscaled
``` 6. If you used tailscale.com before in your nodes, make sure you clear the tailscald data folder
```shell
systemctl stop tailscaled
rm -fr /var/lib/tailscale
systemctl start tailscaled
```
7. Add your first machine 7. Add your first machine
```shell ```shell
tailscale up -login-server YOUR_HEADSCALE_URL tailscale up -login-server YOUR_HEADSCALE_URL
``` ```
8. Navigate to the URL you will get with `tailscale up`, where you'll find your machine key. 8. Navigate to the URL you will get with `tailscale up`, where you'll find your machine key.
9. In the server, register your machine to a namespace with the CLI 9. In the server, register your machine to a namespace with the CLI
```shell ```shell
headscale -n myfirstnamespace node register YOURMACHINEKEY headscale -n myfirstnamespace node register YOURMACHINEKEY
``` ```
or docker: or docker:
```shell ```shell
docker run -v ./private.key:/private.key -v ./config.json:/config.json headscale/headscale:x.x.x headscale -n myfirstnamespace node register YOURMACHINEKEY docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derp.yaml:/derp.yaml headscale/headscale:x.x.x headscale -n myfirstnamespace node register YOURMACHINEKEY
``` ```
or if your server is already running in docker:
```shell
docker exec <container_name> headscale -n myfistnamespace node register YOURMACHINEKEY
```
Alternatively, you can use Auth Keys to register your machines: Alternatively, you can use Auth Keys to register your machines:
@ -115,15 +128,19 @@ Alternatively, you can use Auth Keys to register your machines:
```shell ```shell
headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
``` ```
or docker: or docker:
```shell ```shell
docker run -v ./private.key:/private.key -v ./config.json:/config.json headscale/headscale:x.x.x headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v$(pwd)/derp.yaml:/derp.yaml -v $(pwd)/db.sqlite:/db.sqlite headscale/headscale:x.x.x headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
``` ```
or if your server is already running in docker:
```shell
docker exec <container_name> headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
```
2. Use the authkey from your machine to register it 2. Use the authkey from your machine to register it
```shell ```shell
tailscale up -login-server YOUR_HEADSCALE_URL --authkey YOURAUTHKEY tailscale up -login-server YOUR_HEADSCALE_URL --authkey YOURAUTHKEY
``` ```
If you create an authkey with the `--ephemeral` flag, that key will create ephemeral nodes. This implies that `--reusable` is true. If you create an authkey with the `--ephemeral` flag, that key will create ephemeral nodes. This implies that `--reusable` is true.
@ -206,7 +223,7 @@ Alternatively, `tls_letsencrypt_challenge_type` can be set to `TLS-ALPN-01`. In
### Policy ACLs ### Policy ACLs
Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment. Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment.
For instance, instead of referring to users when defining groups you must For instance, instead of referring to users when defining groups you must
use namespaces (which are the equivalent to user/logins in Tailscale.com). use namespaces (which are the equivalent to user/logins in Tailscale.com).

View File

@ -1,3 +1,4 @@
//go:build integration
// +build integration // +build integration
package headscale package headscale
@ -5,6 +6,7 @@ package headscale
import ( import (
"bytes" "bytes"
"context" "context"
"encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log" "log"
@ -19,34 +21,60 @@ import (
"github.com/ory/dockertest/v3/docker" "github.com/ory/dockertest/v3/docker"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"tailscale.com/ipn/ipnstate"
"inet.af/netaddr" "inet.af/netaddr"
) )
var integrationTmpDir string var (
var ih Headscale integrationTmpDir string
ih Headscale
)
var pool dockertest.Pool var (
var network dockertest.Network pool dockertest.Pool
var headscale dockertest.Resource network dockertest.Network
var tailscaleCount int = 25 headscale dockertest.Resource
var tailscales map[string]dockertest.Resource )
var tailscaleVersions = []string{"1.14.3", "1.12.3"}
type TestNamespace struct {
count int
tailscales map[string]dockertest.Resource
}
type IntegrationTestSuite struct { type IntegrationTestSuite struct {
suite.Suite suite.Suite
stats *suite.SuiteInformation stats *suite.SuiteInformation
namespaces map[string]TestNamespace
} }
func TestIntegrationTestSuite(t *testing.T) { func TestIntegrationTestSuite(t *testing.T) {
s := new(IntegrationTestSuite) s := new(IntegrationTestSuite)
s.namespaces = map[string]TestNamespace{
"main": {
count: 20,
tailscales: make(map[string]dockertest.Resource),
},
"shared": {
count: 5,
tailscales: make(map[string]dockertest.Resource),
},
}
suite.Run(t, s) suite.Run(t, s)
// HandleStats, which allows us to check if we passed and save logs // HandleStats, which allows us to check if we passed and save logs
// is called after TearDown, so we cannot tear down containers before // is called after TearDown, so we cannot tear down containers before
// we have potentially saved the logs. // we have potentially saved the logs.
for _, tailscale := range tailscales { for _, scales := range s.namespaces {
if err := pool.Purge(&tailscale); err != nil { for _, tailscale := range scales.tailscales {
log.Printf("Could not purge resource: %s\n", err) if err := pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
} }
} }
@ -119,12 +147,12 @@ func saveLog(resource *dockertest.Resource, basePath string) error {
fmt.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath) fmt.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stdout.log"), []byte(stdout.String()), 0644) err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stdout.log"), []byte(stdout.String()), 0o644)
if err != nil { if err != nil {
return err return err
} }
err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stderr.log"), []byte(stdout.String()), 0644) err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stderr.log"), []byte(stdout.String()), 0o644)
if err != nil { if err != nil {
return err return err
} }
@ -140,6 +168,32 @@ func dockerRestartPolicy(config *docker.HostConfig) {
} }
} }
func tailscaleContainer(namespace, identifier, version string) (string, *dockertest.Resource) {
tailscaleBuildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale",
ContextDir: ".",
BuildArgs: []docker.BuildArg{
{
Name: "TAILSCALE_VERSION",
Value: version,
},
},
}
hostname := fmt.Sprintf("%s-tailscale-%s-%s", namespace, strings.Replace(version, ".", "-", -1), identifier)
tailscaleOptions := &dockertest.RunOptions{
Name: hostname,
Networks: []*dockertest.Network{&network},
Cmd: []string{"tailscaled", "--tun=userspace-networking", "--socks5-server=localhost:1055"},
}
pts, err := pool.BuildAndRunWithBuildOptions(tailscaleBuildOptions, tailscaleOptions, dockerRestartPolicy)
if err != nil {
log.Fatalf("Could not start resource: %s", err)
}
fmt.Printf("Created %s container\n", hostname)
return hostname, pts
}
func (s *IntegrationTestSuite) SetupSuite() { func (s *IntegrationTestSuite) SetupSuite() {
var err error var err error
h = Headscale{ h = Headscale{
@ -164,11 +218,6 @@ func (s *IntegrationTestSuite) SetupSuite() {
ContextDir: ".", ContextDir: ".",
} }
tailscaleBuildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale",
ContextDir: ".",
}
currentPath, err := os.Getwd() currentPath, err := os.Getwd()
if err != nil { if err != nil {
log.Fatalf("Could not determine current path: %s", err) log.Fatalf("Could not determine current path: %s", err)
@ -183,7 +232,7 @@ func (s *IntegrationTestSuite) SetupSuite() {
Networks: []*dockertest.Network{&network}, Networks: []*dockertest.Network{&network},
Cmd: []string{"headscale", "serve"}, Cmd: []string{"headscale", "serve"},
PortBindings: map[docker.Port][]docker.PortBinding{ PortBindings: map[docker.Port][]docker.PortBinding{
"8080/tcp": []docker.PortBinding{{HostPort: "8080"}}, "8080/tcp": {{HostPort: "8080"}},
}, },
} }
@ -196,21 +245,13 @@ func (s *IntegrationTestSuite) SetupSuite() {
fmt.Println("Created headscale container") fmt.Println("Created headscale container")
fmt.Println("Creating tailscale containers") fmt.Println("Creating tailscale containers")
tailscales = make(map[string]dockertest.Resource) for namespace, scales := range s.namespaces {
for i := 0; i < tailscaleCount; i++ { for i := 0; i < scales.count; i++ {
hostname := fmt.Sprintf("tailscale%d", i) version := tailscaleVersions[i%len(tailscaleVersions)]
tailscaleOptions := &dockertest.RunOptions{
Name: hostname,
Networks: []*dockertest.Network{&network},
Cmd: []string{"tailscaled", "--tun=userspace-networking", "--socks5-server=localhost:1055"},
}
if pts, err := pool.BuildAndRunWithBuildOptions(tailscaleBuildOptions, tailscaleOptions, dockerRestartPolicy); err == nil { hostname, container := tailscaleContainer(namespace, fmt.Sprint(i), version)
tailscales[hostname] = *pts scales.tailscales[hostname] = *container
} else {
log.Fatalf("Could not start resource: %s", err)
} }
fmt.Printf("Created %s container\n", hostname)
} }
fmt.Println("Waiting for headscale to be ready") fmt.Println("Waiting for headscale to be ready")
@ -231,35 +272,38 @@ func (s *IntegrationTestSuite) SetupSuite() {
} }
fmt.Println("headscale container is ready") fmt.Println("headscale container is ready")
fmt.Println("Creating headscale namespace") for namespace, scales := range s.namespaces {
result, err := executeCommand( fmt.Printf("Creating headscale namespace: %s\n", namespace)
&headscale, result, err := executeCommand(
[]string{"headscale", "namespaces", "create", "test"}, &headscale,
) []string{"headscale", "namespaces", "create", namespace},
assert.Nil(s.T(), err)
fmt.Println("Creating pre auth key")
authKey, err := executeCommand(
&headscale,
[]string{"headscale", "-n", "test", "preauthkeys", "create", "--reusable", "--expiration", "24h"},
)
assert.Nil(s.T(), err)
headscaleEndpoint := fmt.Sprintf("http://headscale:%s", headscale.GetPort("8080/tcp"))
fmt.Printf("Joining tailscale containers to headscale at %s\n", headscaleEndpoint)
for hostname, tailscale := range tailscales {
command := []string{"tailscale", "up", "-login-server", headscaleEndpoint, "--authkey", strings.TrimSuffix(authKey, "\n"), "--hostname", hostname}
fmt.Println("Join command:", command)
fmt.Printf("Running join command for %s\n", hostname)
result, err = executeCommand(
&tailscale,
command,
) )
fmt.Println("tailscale result: ", result)
assert.Nil(s.T(), err) assert.Nil(s.T(), err)
fmt.Printf("%s joined\n", hostname) fmt.Println("headscale create namespace result: ", result)
fmt.Printf("Creating pre auth key for %s\n", namespace)
authKey, err := executeCommand(
&headscale,
[]string{"headscale", "--namespace", namespace, "preauthkeys", "create", "--reusable", "--expiration", "24h"},
)
assert.Nil(s.T(), err)
headscaleEndpoint := fmt.Sprintf("http://headscale:%s", headscale.GetPort("8080/tcp"))
fmt.Printf("Joining tailscale containers to headscale at %s\n", headscaleEndpoint)
for hostname, tailscale := range scales.tailscales {
command := []string{"tailscale", "up", "-login-server", headscaleEndpoint, "--authkey", strings.TrimSuffix(authKey, "\n"), "--hostname", hostname}
fmt.Println("Join command:", command)
fmt.Printf("Running join command for %s\n", hostname)
result, err := executeCommand(
&tailscale,
command,
)
fmt.Println("tailscale result: ", result)
assert.Nil(s.T(), err)
fmt.Printf("%s joined\n", hostname)
}
} }
// The nodes need a bit of time to get their updated maps from headscale // The nodes need a bit of time to get their updated maps from headscale
@ -275,109 +319,217 @@ func (s *IntegrationTestSuite) HandleStats(suiteName string, stats *suite.SuiteI
} }
func (s *IntegrationTestSuite) TestListNodes() { func (s *IntegrationTestSuite) TestListNodes() {
fmt.Println("Listing nodes") for namespace, scales := range s.namespaces {
result, err := executeCommand( fmt.Println("Listing nodes")
&headscale, result, err := executeCommand(
[]string{"headscale", "-n", "test", "nodes", "list"}, &headscale,
) []string{"headscale", "--namespace", namespace, "nodes", "list"},
assert.Nil(s.T(), err) )
assert.Nil(s.T(), err)
fmt.Printf("List nodes: \n%s\n", result) fmt.Printf("List nodes: \n%s\n", result)
// Chck that the correct count of host is present in node list // Chck that the correct count of host is present in node list
lines := strings.Split(result, "\n") lines := strings.Split(result, "\n")
assert.Equal(s.T(), len(tailscales), len(lines)-2) assert.Equal(s.T(), len(scales.tailscales), len(lines)-2)
for hostname, _ := range tailscales { for hostname := range scales.tailscales {
assert.Contains(s.T(), result, hostname) assert.Contains(s.T(), result, hostname)
}
} }
} }
func (s *IntegrationTestSuite) TestGetIpAddresses() { func (s *IntegrationTestSuite) TestGetIpAddresses() {
ipPrefix := netaddr.MustParseIPPrefix("100.64.0.0/10") for _, scales := range s.namespaces {
ips, err := getIPs() ipPrefix := netaddr.MustParseIPPrefix("100.64.0.0/10")
assert.Nil(s.T(), err) ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname, _ := range tailscales { for hostname := range scales.tailscales {
s.T().Run(hostname, func(t *testing.T) { s.T().Run(hostname, func(t *testing.T) {
ip := ips[hostname] ip := ips[hostname]
fmt.Printf("IP for %s: %s\n", hostname, ip) fmt.Printf("IP for %s: %s\n", hostname, ip)
// c.Assert(ip.Valid(), check.IsTrue) // c.Assert(ip.Valid(), check.IsTrue)
assert.True(t, ip.Is4()) assert.True(t, ip.Is4())
assert.True(t, ipPrefix.Contains(ip)) assert.True(t, ipPrefix.Contains(ip))
ips[hostname] = ip ips[hostname] = ip
}) })
}
} }
} }
func (s *IntegrationTestSuite) TestStatus() { func (s *IntegrationTestSuite) TestStatus() {
ips, err := getIPs() for _, scales := range s.namespaces {
assert.Nil(s.T(), err) ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname, tailscale := range tailscales { for hostname, tailscale := range scales.tailscales {
s.T().Run(hostname, func(t *testing.T) { s.T().Run(hostname, func(t *testing.T) {
command := []string{"tailscale", "status"} command := []string{"tailscale", "status", "--json"}
fmt.Printf("Getting status for %s\n", hostname) fmt.Printf("Getting status for %s\n", hostname)
result, err := executeCommand( result, err := executeCommand(
&tailscale, &tailscale,
command, command,
) )
assert.Nil(t, err) assert.Nil(t, err)
// fmt.Printf("Status for %s: %s", hostname, result)
// Check if we have as many nodes in status var status ipnstate.Status
// as we have IPs/tailscales err = json.Unmarshal([]byte(result), &status)
lines := strings.Split(result, "\n") assert.Nil(s.T(), err)
assert.Equal(t, len(ips), len(lines)-1)
assert.Equal(t, len(tailscales), len(lines)-1)
// Check that all hosts is present in all hosts status // TODO(kradalby): Replace this check with peer length of SAME namespace
for ipHostname, ip := range ips { // Check if we have as many nodes in status
assert.Contains(t, result, ip.String()) // as we have IPs/tailscales
assert.Contains(t, result, ipHostname) // lines := strings.Split(result, "\n")
} // assert.Equal(t, len(ips), len(lines)-1)
}) // assert.Equal(t, len(scales.tailscales), len(lines)-1)
}
}
func (s *IntegrationTestSuite) TestPingAllPeers() { peerIps := getIPsfromIPNstate(status)
ips, err := getIPs()
assert.Nil(s.T(), err)
for hostname, tailscale := range tailscales { // Check that all hosts is present in all hosts status
for peername, ip := range ips { for ipHostname, ip := range ips {
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) { if hostname != ipHostname {
// We currently cant ping ourselves, so skip that. assert.Contains(t, peerIps, ip)
if peername != hostname {
// We are only interested in "direct ping" which means what we
// might need a couple of more attempts before reaching the node.
command := []string{
"tailscale", "ping",
"--timeout=1s",
"--c=20",
"--until-direct=true",
ip.String(),
} }
fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, ips[hostname], peername, ip)
result, err := executeCommand(
&tailscale,
command,
)
assert.Nil(t, err)
fmt.Printf("Result for %s: %s\n", hostname, result)
assert.Contains(t, result, "pong")
} }
}) })
} }
} }
} }
func getIPs() (map[string]netaddr.IP, error) { func getIPsfromIPNstate(status ipnstate.Status) []netaddr.IP {
ips := make([]netaddr.IP, 0)
for _, peer := range status.Peer {
ips = append(ips, peer.TailscaleIPs...)
}
return ips
}
func (s *IntegrationTestSuite) TestPingAllPeers() {
for _, scales := range s.namespaces {
ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname, tailscale := range scales.tailscales {
for peername, ip := range ips {
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
// We currently cant ping ourselves, so skip that.
if peername != hostname {
// We are only interested in "direct ping" which means what we
// might need a couple of more attempts before reaching the node.
command := []string{
"tailscale", "ping",
"--timeout=1s",
"--c=20",
"--until-direct=true",
ip.String(),
}
fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, ips[hostname], peername, ip)
result, err := executeCommand(
&tailscale,
command,
)
assert.Nil(t, err)
fmt.Printf("Result for %s: %s\n", hostname, result)
assert.Contains(t, result, "pong")
}
})
}
}
}
}
func (s *IntegrationTestSuite) TestSharedNodes() {
main := s.namespaces["main"]
shared := s.namespaces["shared"]
result, err := executeCommand(
&headscale,
[]string{"headscale", "nodes", "list", "-o", "json", "--namespace", "shared"},
)
assert.Nil(s.T(), err)
var machineList []Machine
err = json.Unmarshal([]byte(result), &machineList)
assert.Nil(s.T(), err)
for _, machine := range machineList {
result, err := executeCommand(
&headscale,
[]string{"headscale", "nodes", "share", "--namespace", "shared", fmt.Sprint(machine.ID), "main"},
)
assert.Nil(s.T(), err)
fmt.Println("Shared node with result: ", result)
}
result, err = executeCommand(
&headscale,
[]string{"headscale", "nodes", "list", "--namespace", "main"},
)
assert.Nil(s.T(), err)
fmt.Println("Nodelist after sharing", result)
// Chck that the correct count of host is present in node list
lines := strings.Split(result, "\n")
assert.Equal(s.T(), len(main.tailscales)+len(shared.tailscales), len(lines)-2)
for hostname := range main.tailscales {
assert.Contains(s.T(), result, hostname)
}
for hostname := range shared.tailscales {
assert.Contains(s.T(), result, hostname)
}
// TODO(kradalby): Figure out why these connections are not set up
// // TODO: See if we can have a more deterministic wait here.
// time.Sleep(100 * time.Second)
// mainIps, err := getIPs(main.tailscales)
// assert.Nil(s.T(), err)
// sharedIps, err := getIPs(shared.tailscales)
// assert.Nil(s.T(), err)
// for hostname, tailscale := range main.tailscales {
// for peername, ip := range sharedIps {
// s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
// // We currently cant ping ourselves, so skip that.
// if peername != hostname {
// // We are only interested in "direct ping" which means what we
// // might need a couple of more attempts before reaching the node.
// command := []string{
// "tailscale", "ping",
// "--timeout=1s",
// "--c=20",
// "--until-direct=true",
// ip.String(),
// }
// fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, mainIps[hostname], peername, ip)
// result, err := executeCommand(
// &tailscale,
// command,
// )
// assert.Nil(t, err)
// fmt.Printf("Result for %s: %s\n", hostname, result)
// assert.Contains(t, result, "pong")
// }
// })
// }
// }
}
func getIPs(tailscales map[string]dockertest.Resource) (map[string]netaddr.IP, error) {
ips := make(map[string]netaddr.IP) ips := make(map[string]netaddr.IP)
for hostname, tailscale := range tailscales { for hostname, tailscale := range tailscales {
command := []string{"tailscale", "ip"} command := []string{"tailscale", "ip"}