1
0
mirror of https://github.com/juanfont/headscale.git synced 2024-12-20 19:09:07 +01:00

Make saving logs from tests an option (default false)

We currently have a bit of flaky logic which prevents the docker plugin
from cleaning up the containers if the tests or setup fatals or crashes,
this is due to a limitation in the save / passed stats handling.

This change makes it an environment variable which by default ditches
the logs and makes the containers clean up "correctly" in the teardown
method.
This commit is contained in:
Kristoffer Dalby 2022-06-27 11:56:37 +00:00
parent 4a200c308b
commit 911c5bddce
3 changed files with 113 additions and 32 deletions

View File

@ -6,7 +6,10 @@ package headscale
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"os"
"strconv"
"strings" "strings"
"time" "time"
@ -16,9 +19,13 @@ import (
"inet.af/netaddr" "inet.af/netaddr"
) )
const DOCKER_EXECUTE_TIMEOUT = 10 * time.Second const (
DOCKER_EXECUTE_TIMEOUT = 10 * time.Second
)
var ( var (
errEnvVarEmpty = errors.New("getenv: environment variable empty")
IpPrefix4 = netaddr.MustParseIPPrefix("100.64.0.0/10") IpPrefix4 = netaddr.MustParseIPPrefix("100.64.0.0/10")
IpPrefix6 = netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48") IpPrefix6 = netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48")
@ -283,3 +290,25 @@ func getMagicFQDN(
return hostnames, nil return hostnames, nil
} }
func GetEnvStr(key string) (string, error) {
v := os.Getenv(key)
if v == "" {
return v, errEnvVarEmpty
}
return v, nil
}
func GetEnvBool(key string) (bool, error) {
s, err := GetEnvStr(key)
if err != nil {
return false, err
}
v, err := strconv.ParseBool(s)
if err != nil {
return false, err
}
return v, nil
}

View File

@ -40,41 +40,50 @@ type IntegrationDERPTestSuite struct {
pool dockertest.Pool pool dockertest.Pool
networks map[int]dockertest.Network // so we keep the containers isolated networks map[int]dockertest.Network // so we keep the containers isolated
headscale dockertest.Resource headscale dockertest.Resource
saveLogs bool
tailscales map[string]dockertest.Resource tailscales map[string]dockertest.Resource
joinWaitGroup sync.WaitGroup joinWaitGroup sync.WaitGroup
} }
func TestDERPIntegrationTestSuite(t *testing.T) { func TestDERPIntegrationTestSuite(t *testing.T) {
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
if err != nil {
saveLogs = false
}
s := new(IntegrationDERPTestSuite) s := new(IntegrationDERPTestSuite)
s.tailscales = make(map[string]dockertest.Resource) s.tailscales = make(map[string]dockertest.Resource)
s.networks = make(map[int]dockertest.Network) s.networks = make(map[int]dockertest.Network)
s.saveLogs = saveLogs
suite.Run(t, s) suite.Run(t, s)
// HandleStats, which allows us to check if we passed and save logs // HandleStats, which allows us to check if we passed and save logs
// is called after TearDown, so we cannot tear down containers before // is called after TearDown, so we cannot tear down containers before
// we have potentially saved the logs. // we have potentially saved the logs.
for _, tailscale := range s.tailscales { if s.saveLogs {
if err := s.pool.Purge(&tailscale); err != nil { for _, tailscale := range s.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
if !s.stats.Passed() {
err := s.saveLog(&s.headscale, "test_output")
if err != nil {
log.Printf("Could not save log: %s\n", err)
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err) log.Printf("Could not purge resource: %s\n", err)
} }
}
if !s.stats.Passed() { for _, network := range s.networks {
err := s.saveLog(&s.headscale, "test_output") if err := network.Close(); err != nil {
if err != nil { log.Printf("Could not close network: %s\n", err)
log.Printf("Could not save log: %s\n", err) }
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
for _, network := range s.networks {
if err := network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
} }
} }
} }
@ -290,6 +299,23 @@ func (s *IntegrationDERPTestSuite) tailscaleContainer(
} }
func (s *IntegrationDERPTestSuite) TearDownSuite() { func (s *IntegrationDERPTestSuite) TearDownSuite() {
if !s.saveLogs {
for _, tailscale := range s.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
for _, network := range s.networks {
if err := network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
}
}
} }
func (s *IntegrationDERPTestSuite) HandleStats( func (s *IntegrationDERPTestSuite) HandleStats(

View File

@ -36,6 +36,7 @@ type IntegrationTestSuite struct {
pool dockertest.Pool pool dockertest.Pool
network dockertest.Network network dockertest.Network
headscale dockertest.Resource headscale dockertest.Resource
saveLogs bool
namespaces map[string]TestNamespace namespaces map[string]TestNamespace
@ -43,6 +44,11 @@ type IntegrationTestSuite struct {
} }
func TestIntegrationTestSuite(t *testing.T) { func TestIntegrationTestSuite(t *testing.T) {
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
if err != nil {
saveLogs = false
}
s := new(IntegrationTestSuite) s := new(IntegrationTestSuite)
s.namespaces = map[string]TestNamespace{ s.namespaces = map[string]TestNamespace{
@ -55,32 +61,35 @@ func TestIntegrationTestSuite(t *testing.T) {
tailscales: make(map[string]dockertest.Resource), tailscales: make(map[string]dockertest.Resource),
}, },
} }
s.saveLogs = saveLogs
suite.Run(t, s) suite.Run(t, s)
// HandleStats, which allows us to check if we passed and save logs // HandleStats, which allows us to check if we passed and save logs
// is called after TearDown, so we cannot tear down containers before // is called after TearDown, so we cannot tear down containers before
// we have potentially saved the logs. // we have potentially saved the logs.
for _, scales := range s.namespaces { if s.saveLogs {
for _, tailscale := range scales.tailscales { for _, scales := range s.namespaces {
if err := s.pool.Purge(&tailscale); err != nil { for _, tailscale := range scales.tailscales {
log.Printf("Could not purge resource: %s\n", err) if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
} }
} }
}
if !s.stats.Passed() { if !s.stats.Passed() {
err := s.saveLog(&s.headscale, "test_output") err := s.saveLog(&s.headscale, "test_output")
if err != nil { if err != nil {
log.Printf("Could not save log: %s\n", err) log.Printf("Could not save log: %s\n", err)
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
} }
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
if err := s.network.Close(); err != nil { if err := s.network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err) log.Printf("Could not close network: %s\n", err)
}
} }
} }
@ -338,6 +347,23 @@ func (s *IntegrationTestSuite) SetupSuite() {
} }
func (s *IntegrationTestSuite) TearDownSuite() { func (s *IntegrationTestSuite) TearDownSuite() {
if !s.saveLogs {
for _, scales := range s.namespaces {
for _, tailscale := range scales.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
if err := s.network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
}
} }
func (s *IntegrationTestSuite) HandleStats( func (s *IntegrationTestSuite) HandleStats(