1
0
mirror of https://github.com/juanfont/headscale.git synced 2024-12-20 19:09:07 +01:00

Move common integration test commands into common file

This commit is contained in:
Kristoffer Dalby 2021-11-04 22:10:57 +00:00
parent 4226da3d6b
commit 67adea5cab
2 changed files with 134 additions and 113 deletions

View File

@ -0,0 +1,69 @@
//go:build integration
// +build integration
package headscale
import (
"bytes"
"fmt"
"time"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
)
func ExecuteCommand(resource *dockertest.Resource, cmd []string, env []string) (string, error) {
var stdout bytes.Buffer
var stderr bytes.Buffer
// TODO(kradalby): Make configurable
timeout := 10 * time.Second
type result struct {
exitCode int
err error
}
resultChan := make(chan result, 1)
// Run your long running function in it's own goroutine and pass back it's
// response into our channel.
go func() {
exitCode, err := resource.Exec(
cmd,
dockertest.ExecOptions{
Env: append(env, "LOG_LEVEL=error"),
StdOut: &stdout,
StdErr: &stderr,
},
)
resultChan <- result{exitCode, err}
}()
// Listen on our channel AND a timeout channel - which ever happens first.
select {
case res := <-resultChan:
if res.err != nil {
return "", res.err
}
if res.exitCode != 0 {
fmt.Println("Command: ", cmd)
fmt.Println("stdout: ", stdout.String())
fmt.Println("stderr: ", stderr.String())
return "", fmt.Errorf("command failed with: %s", stderr.String())
}
return stdout.String(), nil
case <-time.After(timeout):
return "", fmt.Errorf("command timed out after %s", timeout)
}
}
func DockerRestartPolicy(config *docker.HostConfig) {
// set AutoRemove to true so that stopped container goes away by itself
config.AutoRemove = true
config.RestartPolicy = docker.RestartPolicy{
Name: "no",
}
}

View File

@ -18,6 +18,7 @@ import (
"testing"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"github.com/stretchr/testify/assert"
@ -28,17 +29,6 @@ import (
"inet.af/netaddr"
)
var (
integrationTmpDir string
ih Headscale
)
var (
pool dockertest.Pool
network dockertest.Network
headscale dockertest.Resource
)
var tailscaleVersions = []string{"1.16.2", "1.14.3", "1.12.3"}
type TestNamespace struct {
@ -50,6 +40,10 @@ type IntegrationTestSuite struct {
suite.Suite
stats *suite.SuiteInformation
pool dockertest.Pool
network dockertest.Network
headscale dockertest.Resource
namespaces map[string]TestNamespace
}
@ -74,76 +68,28 @@ func TestIntegrationTestSuite(t *testing.T) {
// we have potentially saved the logs.
for _, scales := range s.namespaces {
for _, tailscale := range scales.tailscales {
if err := pool.Purge(&tailscale); err != nil {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
}
if !s.stats.Passed() {
err := saveLog(&headscale, "test_output")
err := s.saveLog(&s.headscale, "test_output")
if err != nil {
log.Printf("Could not save log: %s\n", err)
}
}
if err := pool.Purge(&headscale); err != nil {
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
if err := network.Close(); err != nil {
if err := s.network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
}
func executeCommand(resource *dockertest.Resource, cmd []string, env []string) (string, error) {
var stdout bytes.Buffer
var stderr bytes.Buffer
// TODO(kradalby): Make configurable
timeout := 10 * time.Second
type result struct {
exitCode int
err error
}
resultChan := make(chan result, 1)
// Run your long running function in it's own goroutine and pass back it's
// response into our channel.
go func() {
exitCode, err := resource.Exec(
cmd,
dockertest.ExecOptions{
Env: env,
StdOut: &stdout,
StdErr: &stderr,
},
)
resultChan <- result{exitCode, err}
}()
// Listen on our channel AND a timeout channel - which ever happens first.
select {
case res := <-resultChan:
if res.err != nil {
return "", res.err
}
if res.exitCode != 0 {
fmt.Println("Command: ", cmd)
fmt.Println("stdout: ", stdout.String())
fmt.Println("stderr: ", stderr.String())
return "", fmt.Errorf("command failed with: %s", stderr.String())
}
return stdout.String(), nil
case <-time.After(timeout):
return "", fmt.Errorf("command timed out after %s", timeout)
}
}
func saveLog(resource *dockertest.Resource, basePath string) error {
func (s *IntegrationTestSuite) saveLog(resource *dockertest.Resource, basePath string) error {
err := os.MkdirAll(basePath, os.ModePerm)
if err != nil {
return err
@ -152,7 +98,7 @@ func saveLog(resource *dockertest.Resource, basePath string) error {
var stdout bytes.Buffer
var stderr bytes.Buffer
err = pool.Client.Logs(
err = s.pool.Client.Logs(
docker.LogsOptions{
Context: context.TODO(),
Container: resource.Container.ID,
@ -185,15 +131,9 @@ func saveLog(resource *dockertest.Resource, basePath string) error {
return nil
}
func dockerRestartPolicy(config *docker.HostConfig) {
// set AutoRemove to true so that stopped container goes away by itself
config.AutoRemove = true
config.RestartPolicy = docker.RestartPolicy{
Name: "no",
}
}
func tailscaleContainer(namespace, identifier, version string) (string, *dockertest.Resource) {
func (s *IntegrationTestSuite) tailscaleContainer(
namespace, identifier, version string,
) (string, *dockertest.Resource) {
tailscaleBuildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale",
ContextDir: ".",
@ -207,11 +147,11 @@ func tailscaleContainer(namespace, identifier, version string) (string, *dockert
hostname := fmt.Sprintf("%s-tailscale-%s-%s", namespace, strings.Replace(version, ".", "-", -1), identifier)
tailscaleOptions := &dockertest.RunOptions{
Name: hostname,
Networks: []*dockertest.Network{&network},
Networks: []*dockertest.Network{&s.network},
Cmd: []string{"tailscaled", "--tun=userspace-networking", "--socks5-server=localhost:1055"},
}
pts, err := pool.BuildAndRunWithBuildOptions(tailscaleBuildOptions, tailscaleOptions, dockerRestartPolicy)
pts, err := s.pool.BuildAndRunWithBuildOptions(tailscaleBuildOptions, tailscaleOptions, DockerRestartPolicy)
if err != nil {
log.Fatalf("Could not start resource: %s", err)
}
@ -227,13 +167,13 @@ func (s *IntegrationTestSuite) SetupSuite() {
}
if ppool, err := dockertest.NewPool(""); err == nil {
pool = *ppool
s.pool = *ppool
} else {
log.Fatalf("Could not connect to docker: %s", err)
}
if pnetwork, err := pool.CreateNetwork("headscale-test"); err == nil {
network = *pnetwork
if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil {
s.network = *pnetwork
} else {
log.Fatalf("Could not create network: %s", err)
}
@ -253,13 +193,13 @@ func (s *IntegrationTestSuite) SetupSuite() {
Mounts: []string{
fmt.Sprintf("%s/integration_test/etc:/etc/headscale", currentPath),
},
Networks: []*dockertest.Network{&network},
Networks: []*dockertest.Network{&s.network},
Cmd: []string{"headscale", "serve"},
}
fmt.Println("Creating headscale container")
if pheadscale, err := pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, dockerRestartPolicy); err == nil {
headscale = *pheadscale
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
s.headscale = *pheadscale
} else {
log.Fatalf("Could not start resource: %s", err)
}
@ -270,15 +210,15 @@ func (s *IntegrationTestSuite) SetupSuite() {
for i := 0; i < scales.count; i++ {
version := tailscaleVersions[i%len(tailscaleVersions)]
hostname, container := tailscaleContainer(namespace, fmt.Sprint(i), version)
hostname, container := s.tailscaleContainer(namespace, fmt.Sprint(i), version)
scales.tailscales[hostname] = *container
}
}
fmt.Println("Waiting for headscale to be ready")
hostEndpoint := fmt.Sprintf("localhost:%s", headscale.GetPort("8080/tcp"))
hostEndpoint := fmt.Sprintf("localhost:%s", s.headscale.GetPort("8080/tcp"))
if err := pool.Retry(func() error {
if err := s.pool.Retry(func() error {
url := fmt.Sprintf("http://%s/health", hostEndpoint)
resp, err := http.Get(url)
if err != nil {
@ -299,8 +239,8 @@ func (s *IntegrationTestSuite) SetupSuite() {
for namespace, scales := range s.namespaces {
fmt.Printf("Creating headscale namespace: %s\n", namespace)
result, err := executeCommand(
&headscale,
result, err := ExecuteCommand(
&s.headscale,
[]string{"headscale", "namespaces", "create", namespace},
[]string{},
)
@ -308,8 +248,8 @@ func (s *IntegrationTestSuite) SetupSuite() {
assert.Nil(s.T(), err)
fmt.Printf("Creating pre auth key for %s\n", namespace)
authKey, err := executeCommand(
&headscale,
preAuthResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
"--namespace",
@ -319,11 +259,17 @@ func (s *IntegrationTestSuite) SetupSuite() {
"--reusable",
"--expiration",
"24h",
"--output",
"json",
},
[]string{},
[]string{"LOG_LEVEL=error"},
)
assert.Nil(s.T(), err)
var preAuthKey v1.PreAuthKey
err = json.Unmarshal([]byte(preAuthResult), &preAuthKey)
assert.Nil(s.T(), err)
headscaleEndpoint := "http://headscale:8080"
fmt.Printf("Joining tailscale containers to headscale at %s\n", headscaleEndpoint)
@ -334,14 +280,14 @@ func (s *IntegrationTestSuite) SetupSuite() {
"-login-server",
headscaleEndpoint,
"--authkey",
strings.TrimSuffix(authKey, "\n"),
preAuthKey.Key,
"--hostname",
hostname,
}
fmt.Println("Join command:", command)
fmt.Printf("Running join command for %s\n", hostname)
result, err := executeCommand(
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
@ -367,8 +313,8 @@ func (s *IntegrationTestSuite) HandleStats(suiteName string, stats *suite.SuiteI
func (s *IntegrationTestSuite) TestListNodes() {
for namespace, scales := range s.namespaces {
fmt.Println("Listing nodes")
result, err := executeCommand(
&headscale,
result, err := ExecuteCommand(
&s.headscale,
[]string{"headscale", "--namespace", namespace, "nodes", "list"},
[]string{},
)
@ -419,7 +365,7 @@ func (s *IntegrationTestSuite) TestStatus() {
command := []string{"tailscale", "status", "--json"}
fmt.Printf("Getting status for %s\n", hostname)
result, err := executeCommand(
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
@ -481,7 +427,7 @@ func (s *IntegrationTestSuite) TestPingAllPeers() {
}
fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, ips[hostname], peername, ip)
result, err := executeCommand(
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
@ -500,22 +446,28 @@ func (s *IntegrationTestSuite) TestSharedNodes() {
main := s.namespaces["main"]
shared := s.namespaces["shared"]
result, err := executeCommand(
&headscale,
[]string{"headscale", "nodes", "list", "-o", "json", "--namespace", "shared"},
result, err := ExecuteCommand(
&s.headscale,
[]string{"headscale", "nodes", "list", "--output", "json", "--namespace", "shared"},
[]string{},
)
assert.Nil(s.T(), err)
var machineList []Machine
var machineList []v1.Machine
err = json.Unmarshal([]byte(result), &machineList)
assert.Nil(s.T(), err)
for _, machine := range machineList {
result, err := executeCommand(
&headscale,
[]string{"headscale", "nodes", "share", "--identifier", fmt.Sprint(machine.ID), "--namespace", "main"},
result, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
"nodes",
"share",
"--identifier", fmt.Sprint(machine.Id),
"--namespace", "main",
},
[]string{},
)
assert.Nil(s.T(), err)
@ -523,8 +475,8 @@ func (s *IntegrationTestSuite) TestSharedNodes() {
fmt.Println("Shared node with result: ", result)
}
result, err = executeCommand(
&headscale,
result, err = ExecuteCommand(
&s.headscale,
[]string{"headscale", "nodes", "list", "--namespace", "main"},
[]string{},
)
@ -568,7 +520,7 @@ func (s *IntegrationTestSuite) TestSharedNodes() {
}
fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, mainIps[hostname], peername, ip)
result, err := executeCommand(
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
@ -591,7 +543,7 @@ func (s *IntegrationTestSuite) TestTailDrop() {
for hostname, tailscale := range scales.tailscales {
command := []string{"touch", fmt.Sprintf("/tmp/file_from_%s", hostname)}
_, err := executeCommand(
_, err := ExecuteCommand(
&tailscale,
command,
[]string{},
@ -628,7 +580,7 @@ func (s *IntegrationTestSuite) TestTailDrop() {
fmt.Sprintf("%s/v0/put/file_from_%s", peerAPI, hostname),
}
fmt.Printf("Sending file from %s (%s) to %s (%s)\n", hostname, ips[hostname], peername, ip)
_, err = executeCommand(
_, err = ExecuteCommand(
&tailscale,
command,
[]string{"ALL_PROXY=socks5://localhost:1055"},
@ -655,7 +607,7 @@ func (s *IntegrationTestSuite) TestTailDrop() {
"get",
"/tmp/",
}
_, err := executeCommand(
_, err := ExecuteCommand(
&tailscale,
command,
[]string{},
@ -669,7 +621,7 @@ func (s *IntegrationTestSuite) TestTailDrop() {
fmt.Sprintf("/tmp/file_from_%s", peername),
}
fmt.Printf("Checking file in %s (%s) from %s (%s)\n", hostname, ips[hostname], peername, ip)
result, err := executeCommand(
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
@ -707,7 +659,7 @@ func (s *IntegrationTestSuite) TestMagicDNS() {
peername,
ip,
)
result, err := executeCommand(
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
@ -727,7 +679,7 @@ func getIPs(tailscales map[string]dockertest.Resource) (map[string]netaddr.IP, e
for hostname, tailscale := range tailscales {
command := []string{"tailscale", "ip"}
result, err := executeCommand(
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
@ -755,7 +707,7 @@ func getAPIURLs(tailscales map[string]dockertest.Resource) (map[netaddr.IP]strin
"/run/tailscale/tailscaled.sock",
"http://localhost/localapi/v0/file-targets",
}
result, err := executeCommand(
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},