mirror of
https://github.com/juanfont/headscale.git
synced 2025-06-01 01:15:56 +02:00
work
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
parent
d5fdbf16c2
commit
1b318c389e
131
.golangci.yaml
131
.golangci.yaml
@ -1,74 +1,79 @@
|
||||
---
|
||||
run:
|
||||
timeout: 10m
|
||||
build-tags:
|
||||
- ts2019
|
||||
|
||||
issues:
|
||||
skip-dirs:
|
||||
- gen
|
||||
version: "2"
|
||||
linters:
|
||||
enable-all: true
|
||||
default: all
|
||||
disable:
|
||||
- revive
|
||||
- lll
|
||||
- gofmt
|
||||
- cyclop
|
||||
- depguard
|
||||
- dupl
|
||||
- exhaustruct
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- funlen
|
||||
- tagliatelle
|
||||
- godox
|
||||
- ireturn
|
||||
- execinquery
|
||||
- exhaustruct
|
||||
- nolintlint
|
||||
- musttag # causes issues with imported libs
|
||||
- depguard
|
||||
- exportloopref
|
||||
- tenv
|
||||
|
||||
# We should strive to enable these:
|
||||
- wrapcheck
|
||||
- dupl
|
||||
- makezero
|
||||
- maintidx
|
||||
|
||||
# Limits the methods of an interface to 10. We have more in integration tests
|
||||
- interfacebloat
|
||||
|
||||
# We might want to enable this, but it might be a lot of work
|
||||
- cyclop
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- musttag
|
||||
- nestif
|
||||
- wsl # might be incompatible with gofumpt
|
||||
- testpackage
|
||||
- nolintlint
|
||||
- paralleltest
|
||||
- revive
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- wrapcheck
|
||||
- wsl
|
||||
settings:
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
- ifElseChain
|
||||
nlreturn:
|
||||
block-size: 4
|
||||
varnamelen:
|
||||
ignore-names:
|
||||
- err
|
||||
- db
|
||||
- id
|
||||
- ip
|
||||
- ok
|
||||
- c
|
||||
- tt
|
||||
- tx
|
||||
- rx
|
||||
- sb
|
||||
- wg
|
||||
- pr
|
||||
- p
|
||||
- p2
|
||||
ignore-type-assert-ok: true
|
||||
ignore-map-index-ok: true
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- gen
|
||||
|
||||
linters-settings:
|
||||
varnamelen:
|
||||
ignore-type-assert-ok: true
|
||||
ignore-map-index-ok: true
|
||||
ignore-names:
|
||||
- err
|
||||
- db
|
||||
- id
|
||||
- ip
|
||||
- ok
|
||||
- c
|
||||
- tt
|
||||
- tx
|
||||
- rx
|
||||
- sb
|
||||
- wg
|
||||
- pr
|
||||
- p
|
||||
- p2
|
||||
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
# TODO(kradalby): Remove this
|
||||
- ifElseChain
|
||||
|
||||
nlreturn:
|
||||
block-size: 4
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- gen
|
||||
|
@ -285,9 +285,6 @@ func (h *Headscale) handleRegisterInteractive(
|
||||
nodeToRegister.Node.Expiry = ®Req.Expiry
|
||||
}
|
||||
|
||||
// Ensure any auto approved routes are handled before saving.
|
||||
policy.AutoApproveRoutes(h.polMan, &nodeToRegister.Node)
|
||||
|
||||
h.registrationCache.Set(
|
||||
registrationId,
|
||||
nodeToRegister,
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/samber/lo"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
@ -87,9 +88,13 @@ func AutoApproveRoutes(pm PolicyManager, node *types.Node) bool {
|
||||
if pm == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
log.Trace().Msgf("AUTO APPROVE: node %s %d", node.Hostname, node.ID)
|
||||
var newApproved []netip.Prefix
|
||||
for _, route := range node.AnnouncedRoutes() {
|
||||
log.Trace().Msgf("AUTO APPROVE: node %s %d, checking %s", node.Hostname, node.ID, route.String())
|
||||
if pm.NodeCanApproveRoute(node, route) {
|
||||
log.Trace().Msgf("AUTO APPROVE: node %s %d, checking %s, %v", node.Hostname, node.ID, route.String(), true)
|
||||
newApproved = append(newApproved, route)
|
||||
}
|
||||
}
|
||||
|
@ -7,6 +7,9 @@ import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"slices"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/tailcfg"
|
||||
@ -145,13 +148,7 @@ func (pm *PolicyManager) NodeCanHaveTag(node *types.Node, tag string) bool {
|
||||
tags, invalid := pm.pol.TagsOfNode(pm.users, node)
|
||||
log.Debug().Strs("authorised_tags", tags).Strs("unauthorised_tags", invalid).Uint64("node.id", node.ID.Uint64()).Msg("tags provided by policy")
|
||||
|
||||
for _, t := range tags {
|
||||
if t == tag {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return slices.Contains(tags, tag)
|
||||
}
|
||||
|
||||
func (pm *PolicyManager) NodeCanApproveRoute(node *types.Node, route netip.Prefix) bool {
|
||||
@ -163,18 +160,27 @@ func (pm *PolicyManager) NodeCanApproveRoute(node *types.Node, route netip.Prefi
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
approvers, _ := pm.pol.AutoApprovers.GetRouteApprovers(route)
|
||||
log.Trace().Msgf("AUTO APPROVE: node %d, checking %s, approvers: %v", node.ID, route.String(), approvers)
|
||||
|
||||
for _, approvedAlias := range approvers {
|
||||
if approvedAlias == node.User.Username() {
|
||||
return true
|
||||
} else {
|
||||
log.Trace().Msgf("AUTO APPROVE: node %d, checking %s, expanding: %s", node.ID, route.String(), approvedAlias)
|
||||
|
||||
ips, err := pm.pol.ExpandAlias(pm.nodes, pm.users, approvedAlias)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
log.Trace().Msgf("AUTO APPROVE: node %d, checking %s, ips: %v", node.ID, route.String(), ips.Prefixes())
|
||||
log.Trace().Msgf("AUTO APPROVE: node %d, checking %s, contains? %v", node.ID, route.String(), node.IPs())
|
||||
// log.Trace().Msgf("AUTO APPROVE: node %d, checking %s, users %v", node.ID, route.String(), pm.users)
|
||||
// log.Trace().Msgf("AUTO APPROVE: node %d, checking %s, nodes %v", node.ID, route.String(), pm.nodes)
|
||||
spew.Dump(pm.users)
|
||||
spew.Dump(pm.nodes)
|
||||
|
||||
// approvedIPs should contain all of node's IPs if it matches the rule, so check for first
|
||||
if ips.Contains(*node.IPv4) {
|
||||
if ips != nil && ips.Contains(*node.IPv4) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -7,7 +7,10 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"slices"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
"go4.org/netipx"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
@ -83,10 +86,12 @@ func (pm *PolicyManager) updateLocked() (bool, error) {
|
||||
pm.tagOwnerMap = tagMap
|
||||
pm.tagOwnerMapHash = tagOwnerMapHash
|
||||
|
||||
log.Printf("AUTO APP: BUILDING AUTO APProvers")
|
||||
autoMap, err := resolveAutoApprovers(pm.pol, pm.users, pm.nodes)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("resolving auto approvers map: %w", err)
|
||||
}
|
||||
log.Printf("AUTO APP: BUILDING AUTO APProvers DONE")
|
||||
|
||||
autoApproveMapHash := deephash.Hash(&autoMap)
|
||||
autoApproveChanged := autoApproveMapHash != pm.autoApproveMapHash
|
||||
@ -174,10 +179,8 @@ func (pm *PolicyManager) NodeCanHaveTag(node *types.Node, tag string) bool {
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
if ips, ok := pm.tagOwnerMap[Tag(tag)]; ok {
|
||||
for _, nodeAddr := range node.IPs() {
|
||||
if ips.Contains(nodeAddr) {
|
||||
return true
|
||||
}
|
||||
if slices.ContainsFunc(node.IPs(), ips.Contains) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@ -192,14 +195,14 @@ func (pm *PolicyManager) NodeCanApproveRoute(node *types.Node, route netip.Prefi
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
log.Debug().Msg(pm.DebugString())
|
||||
|
||||
// The fast path is that a node requests to approve a prefix
|
||||
// where there is an exact entry, e.g. 10.0.0.0/8, then
|
||||
// check and return quickly
|
||||
if _, ok := pm.autoApproveMap[route]; ok {
|
||||
for _, nodeAddr := range node.IPs() {
|
||||
if pm.autoApproveMap[route].Contains(nodeAddr) {
|
||||
return true
|
||||
}
|
||||
if slices.ContainsFunc(node.IPs(), pm.autoApproveMap[route].Contains) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@ -220,10 +223,8 @@ func (pm *PolicyManager) NodeCanApproveRoute(node *types.Node, route netip.Prefi
|
||||
// Check if prefix is larger (so containing) and then overlaps
|
||||
// the route to see if the node can approve a subset of an autoapprover
|
||||
if prefix.Bits() <= route.Bits() && prefix.Overlaps(route) {
|
||||
for _, nodeAddr := range node.IPs() {
|
||||
if approveAddrs.Contains(nodeAddr) {
|
||||
return true
|
||||
}
|
||||
if slices.ContainsFunc(node.IPs(), approveAddrs.Contains) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -279,5 +280,8 @@ func (pm *PolicyManager) DebugString() string {
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteString("\n\n")
|
||||
sb.WriteString(pm.nodes.DebugString())
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"strings"
|
||||
"time"
|
||||
@ -160,6 +161,10 @@ func (g Group) CanBeAutoApprover() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (g Group) String() string {
|
||||
return string(g)
|
||||
}
|
||||
|
||||
func (g Group) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) {
|
||||
var ips netipx.IPSetBuilder
|
||||
var errs []error
|
||||
@ -209,15 +214,24 @@ func (t Tag) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.I
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Printf("AUTO APP: TAGMAP: %+v", tagMap)
|
||||
for tag, ips := range tagMap {
|
||||
log.Printf("AUTO APP: TAG %s, %v", tag, ips.Prefixes())
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
if node.HasTag(string(t)) {
|
||||
log.Printf("AUTO APP: NODE %d %q HAS TAG %s", node.ID, node.Hostname, t)
|
||||
node.AppendToIPSet(&ips)
|
||||
}
|
||||
|
||||
// TODO(kradalby): remove as part of #2417, see comment above
|
||||
if tagMap != nil {
|
||||
log.Printf("AUTO APP: NODE %d %q CHECKING REQUESTED TAGS: %v", node.ID, node.Hostname, node.Hostinfo.RequestTags)
|
||||
if tagips, ok := tagMap[t]; ok && node.InIPSet(tagips) && node.Hostinfo != nil {
|
||||
log.Printf("AUTO APP: NODE %d %q CHECKING tagips %v", node.ID, node.Hostname, tagips.Prefixes())
|
||||
for _, tag := range node.Hostinfo.RequestTags {
|
||||
log.Printf("AUTO APP: NODE %d %q CHECKING requested tag %s", node.ID, node.Hostname, tag)
|
||||
if tag == string(t) {
|
||||
node.AppendToIPSet(&ips)
|
||||
}
|
||||
@ -233,12 +247,16 @@ func (t Tag) CanBeAutoApprover() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t Tag) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
// Host is a string that represents a hostname.
|
||||
type Host string
|
||||
|
||||
func (h Host) Validate() error {
|
||||
if isHost(string(h)) {
|
||||
fmt.Errorf("Hostname %q is invalid", h)
|
||||
return fmt.Errorf("Hostname %q is invalid", h)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -591,6 +609,7 @@ func unmarshalPointer[T any](
|
||||
type AutoApprover interface {
|
||||
CanBeAutoApprover() bool
|
||||
UnmarshalJSON([]byte) error
|
||||
String() string
|
||||
}
|
||||
|
||||
type AutoApprovers []AutoApprover
|
||||
@ -826,6 +845,7 @@ func resolveAutoApprovers(p *Policy, users types.Users, nodes types.Nodes) (map[
|
||||
}
|
||||
// If it does not resolve, that means the autoApprover is not associated with any IP addresses.
|
||||
ips, _ := aa.Resolve(p, users, nodes)
|
||||
log.Printf("AUTO APP RESOLVED: tag: %q pref: %s ips: %v", autoApprover.String(), prefix, ips.Prefixes())
|
||||
routes[prefix].AddSet(ips)
|
||||
}
|
||||
}
|
||||
|
@ -462,6 +462,7 @@ func (m *mapSession) handleEndpointUpdate() {
|
||||
// auto approved. Any change here is not important as any
|
||||
// actual state change will be detected when the route manager
|
||||
// is updated.
|
||||
log.Trace().Msgf("AUTO APPROVE ROUTES CHANGE")
|
||||
policy.AutoApproveRoutes(m.h.polMan, m.node)
|
||||
|
||||
// Update the routes of the given node in the route manager to
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -190,19 +191,26 @@ func (node *Node) IsTagged() bool {
|
||||
// Currently, this function only handles tags set
|
||||
// via CLI ("forced tags" and preauthkeys)
|
||||
func (node *Node) HasTag(tag string) bool {
|
||||
if slices.Contains(node.ForcedTags, tag) {
|
||||
return true
|
||||
}
|
||||
return slices.Contains(node.Tags(), tag)
|
||||
}
|
||||
|
||||
if node.AuthKey != nil && slices.Contains(node.AuthKey.Tags, tag) {
|
||||
return true
|
||||
func (node *Node) Tags() []string {
|
||||
var tags []string
|
||||
|
||||
if node.AuthKey != nil {
|
||||
tags = append(tags, node.AuthKey.Tags...)
|
||||
}
|
||||
|
||||
// TODO(kradalby): Figure out how tagging should work
|
||||
// and hostinfo.requestedtags.
|
||||
// Do this in other work.
|
||||
// #2417
|
||||
|
||||
return false
|
||||
tags = append(tags, node.ForcedTags...)
|
||||
sort.Strings(tags)
|
||||
tags = slices.Compact(tags)
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
func (node *Node) RequestTags() []string {
|
||||
@ -404,9 +412,9 @@ func (node *Node) SubnetRoutes() []netip.Prefix {
|
||||
return routes
|
||||
}
|
||||
|
||||
func (node *Node) String() string {
|
||||
return node.Hostname
|
||||
}
|
||||
// func (node *Node) String() string {
|
||||
// return node.Hostname
|
||||
// }
|
||||
|
||||
// PeerChangeFromMapRequest takes a MapRequest and compares it to the node
|
||||
// to produce a PeerChange struct that can be used to updated the node and
|
||||
@ -526,15 +534,15 @@ func (node *Node) ApplyPeerChange(change *tailcfg.PeerChange) {
|
||||
node.LastSeen = change.LastSeen
|
||||
}
|
||||
|
||||
func (nodes Nodes) String() string {
|
||||
temp := make([]string, len(nodes))
|
||||
// func (nodes Nodes) String() string {
|
||||
// temp := make([]string, len(nodes))
|
||||
|
||||
for index, node := range nodes {
|
||||
temp[index] = node.Hostname
|
||||
}
|
||||
// for index, node := range nodes {
|
||||
// temp[index] = node.Hostname
|
||||
// }
|
||||
|
||||
return fmt.Sprintf("[ %s ](%d)", strings.Join(temp, ", "), len(temp))
|
||||
}
|
||||
// return fmt.Sprintf("[ %s ](%d)", strings.Join(temp, ", "), len(temp))
|
||||
// }
|
||||
|
||||
func (nodes Nodes) IDMap() map[NodeID]*Node {
|
||||
ret := map[NodeID]*Node{}
|
||||
@ -545,3 +553,25 @@ func (nodes Nodes) IDMap() map[NodeID]*Node {
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (nodes Nodes) DebugString() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("Nodes:\n")
|
||||
for _, node := range nodes {
|
||||
sb.WriteString(node.DebugString())
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func (node Node) DebugString() string {
|
||||
var sb strings.Builder
|
||||
fmt.Fprintf(&sb, "%s(%s):\n", node.Hostname, node.ID)
|
||||
fmt.Fprintf(&sb, "\tUser: %s (%d, %q)\n", node.User.Display(), node.User.ID, node.User.Username())
|
||||
fmt.Fprintf(&sb, "\tTags: %v\n", node.Tags())
|
||||
fmt.Fprintf(&sb, "\tIPs: %v\n", node.IPs())
|
||||
fmt.Fprintf(&sb, "\tApprovedRoutes: %v\n", node.ApprovedRoutes)
|
||||
fmt.Fprintf(&sb, "\tSubnetRoutes: %v\n", node.SubnetRoutes())
|
||||
sb.WriteString("\n")
|
||||
return sb.String()
|
||||
}
|
||||
|
@ -19,6 +19,20 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func oidcHSICOpts(s *Scenario) []hsic.Option {
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": s.mockOIDC.Issuer(),
|
||||
"HEADSCALE_OIDC_CLIENT_ID": s.mockOIDC.ClientID(),
|
||||
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
|
||||
}
|
||||
return []hsic.Option{
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(s.mockOIDC.ClientSecret())),
|
||||
hsic.WithTLS(),
|
||||
}
|
||||
}
|
||||
|
||||
func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
@ -40,19 +54,9 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
|
||||
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
|
||||
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnvWithLoginURL(
|
||||
nil,
|
||||
hsic.WithTestName("oidcauthping"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
append(oidcHSICOpts(scenario), hsic.WithTestName("oidcauthping"))...,
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
|
||||
|
@ -1367,359 +1367,426 @@ func TestSubnetRouterMultiNetworkExitNode(t *testing.T) {
|
||||
// - Verify that routes can now be seen by peers.
|
||||
func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 3,
|
||||
Users: []string{"user1", "user2"},
|
||||
Networks: map[string][]string{
|
||||
"usernet1": {"user1"},
|
||||
"usernet2": {"user2"},
|
||||
},
|
||||
ExtraService: map[string][]extraServiceFunc{
|
||||
"usernet1": {Webservice},
|
||||
},
|
||||
// We build the head image with curl and traceroute, so only use
|
||||
// that for this test.
|
||||
Versions: []string{"head"},
|
||||
}
|
||||
|
||||
rootRoute := netip.MustParsePrefix("10.42.0.0/16")
|
||||
subRoute := netip.MustParsePrefix("10.42.7.0/24")
|
||||
notApprovedRoute := netip.MustParsePrefix("192.168.0.0/24")
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoErrorf(t, err, "failed to create scenario: %s", err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
pol := &policyv1.ACLPolicy{
|
||||
ACLs: []policyv1.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
tests := []struct {
|
||||
name string
|
||||
spec ScenarioSpec
|
||||
withURL bool
|
||||
withOIDC bool
|
||||
}{
|
||||
{
|
||||
name: "authkey",
|
||||
spec: ScenarioSpec{
|
||||
NodesPerUser: 3,
|
||||
Users: []string{"user1", "user2"},
|
||||
Networks: map[string][]string{
|
||||
"usernet1": {"user1"},
|
||||
"usernet2": {"user2"},
|
||||
},
|
||||
ExtraService: map[string][]extraServiceFunc{
|
||||
"usernet1": {Webservice},
|
||||
},
|
||||
// We build the head image with curl and traceroute, so only use
|
||||
// that for this test.
|
||||
Versions: []string{"head"},
|
||||
},
|
||||
},
|
||||
TagOwners: map[string][]string{
|
||||
"tag:approve": {"user1@"},
|
||||
},
|
||||
AutoApprovers: policyv1.AutoApprovers{
|
||||
Routes: map[string][]string{
|
||||
rootRoute.String(): {"tag:approve"},
|
||||
{
|
||||
name: "webauth",
|
||||
spec: ScenarioSpec{
|
||||
NodesPerUser: 3,
|
||||
Users: []string{"user1", "user2"},
|
||||
Networks: map[string][]string{
|
||||
"usernet1": {"user1"},
|
||||
"usernet2": {"user2"},
|
||||
},
|
||||
ExtraService: map[string][]extraServiceFunc{
|
||||
"usernet1": {Webservice},
|
||||
},
|
||||
// We build the head image with curl and traceroute, so only use
|
||||
// that for this test.
|
||||
Versions: []string{"head"},
|
||||
},
|
||||
ExitNode: []string{"tag:approve"},
|
||||
withURL: true,
|
||||
},
|
||||
// TODO(kradalby): multinetwork isnt really working on the oidc
|
||||
// {
|
||||
// name: "oidc",
|
||||
// spec: ScenarioSpec{
|
||||
// NodesPerUser: 3,
|
||||
// Users: []string{"user1", "user2"},
|
||||
// OIDCUsers: []mockoidc.MockUser{
|
||||
// oidcMockUser("user1", false),
|
||||
// oidcMockUser("user1", false),
|
||||
// oidcMockUser("user1", false),
|
||||
// oidcMockUser("user2", false),
|
||||
// oidcMockUser("user2", false),
|
||||
// oidcMockUser("user2", false),
|
||||
// },
|
||||
// Networks: map[string][]string{
|
||||
// "usernet1": {"user1"},
|
||||
// "usernet2": {"user2"},
|
||||
// },
|
||||
// ExtraService: map[string][]extraServiceFunc{
|
||||
// "usernet1": {Webservice},
|
||||
// },
|
||||
// // We build the head image with curl and traceroute, so only use
|
||||
// // that for this test.
|
||||
// Versions: []string{"head"},
|
||||
// },
|
||||
// withURL: true,
|
||||
// withOIDC: true,
|
||||
// },
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{
|
||||
tsic.WithAcceptRoutes(),
|
||||
tsic.WithTags([]string{"tag:approve"}),
|
||||
},
|
||||
hsic.WithTestName("clienableroute"),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithACLPolicy(pol),
|
||||
hsic.WithPolicyMode(types.PolicyModeDB),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
rootRoute := netip.MustParsePrefix("10.42.0.0/16")
|
||||
subRoute := netip.MustParsePrefix("10.42.7.0/24")
|
||||
notApprovedRoute := netip.MustParsePrefix("192.168.0.0/24")
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
scenario, err := NewScenario(tt.spec)
|
||||
require.NoErrorf(t, err, "failed to create scenario: %s", err)
|
||||
// defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
pol := &policyv1.ACLPolicy{
|
||||
ACLs: []policyv1.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
TagOwners: map[string][]string{
|
||||
"tag:approve": {"user1@"},
|
||||
},
|
||||
AutoApprovers: policyv1.AutoApprovers{
|
||||
Routes: map[string][]string{
|
||||
rootRoute.String(): {"tag:approve"},
|
||||
},
|
||||
ExitNode: []string{"tag:approve"},
|
||||
},
|
||||
}
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
assert.NotNil(t, headscale)
|
||||
opts := []hsic.Option{
|
||||
hsic.WithTestName("clienableroute"),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithACLPolicy(pol),
|
||||
hsic.WithPolicyMode(types.PolicyModeDB),
|
||||
}
|
||||
|
||||
route, err := scenario.SubnetOfNetwork("usernet1")
|
||||
require.NoError(t, err)
|
||||
if tt.withOIDC {
|
||||
opts = append(opts, oidcHSICOpts(scenario)...)
|
||||
}
|
||||
|
||||
// Set the route of usernet1 to be autoapproved
|
||||
pol.AutoApprovers.Routes[route.String()] = []string{"tag:approve"}
|
||||
err = headscale.SetPolicy(pol)
|
||||
require.NoError(t, err)
|
||||
err = scenario.createHeadscaleEnv(tt.withURL, []tsic.Option{
|
||||
tsic.WithAcceptRoutes(),
|
||||
tsic.WithTags([]string{"tag:approve"}),
|
||||
},
|
||||
opts...,
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
|
||||
services, err := scenario.Services("usernet1")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, services, 1)
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
|
||||
usernet1, err := scenario.Network("usernet1")
|
||||
require.NoError(t, err)
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
|
||||
web := services[0]
|
||||
webip := netip.MustParseAddr(web.GetIPInNetwork(usernet1))
|
||||
weburl := fmt.Sprintf("http://%s/etc/hostname", webip)
|
||||
t.Logf("webservice: %s, %s", webip.String(), weburl)
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
assert.NotNil(t, headscale)
|
||||
|
||||
// Sort nodes by ID
|
||||
sort.SliceStable(allClients, func(i, j int) bool {
|
||||
statusI := allClients[i].MustStatus()
|
||||
statusJ := allClients[j].MustStatus()
|
||||
route, err := scenario.SubnetOfNetwork("usernet1")
|
||||
require.NoError(t, err)
|
||||
|
||||
return statusI.Self.ID < statusJ.Self.ID
|
||||
})
|
||||
// Set the route of usernet1 to be autoapproved
|
||||
pol.AutoApprovers.Routes[route.String()] = []string{"tag:approve"}
|
||||
err = headscale.SetPolicy(pol)
|
||||
require.NoError(t, err)
|
||||
|
||||
// This is ok because the scenario makes users in order, so the three first
|
||||
// nodes, which are subnet routes, will be created first, and the last user
|
||||
// will be created with the second.
|
||||
routerUsernet1 := allClients[0]
|
||||
routerSubRoute := allClients[1]
|
||||
routerExitNode := allClients[2]
|
||||
services, err := scenario.Services("usernet1")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, services, 1)
|
||||
|
||||
client := allClients[3]
|
||||
usernet1, err := scenario.Network("usernet1")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Advertise the route for the dockersubnet of user1
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-routes=" + route.String(),
|
||||
}
|
||||
_, _, err = routerUsernet1.Execute(command)
|
||||
require.NoErrorf(t, err, "failed to advertise route: %s", err)
|
||||
web := services[0]
|
||||
webip := netip.MustParseAddr(web.GetIPInNetwork(usernet1))
|
||||
weburl := fmt.Sprintf("http://%s/etc/hostname", webip)
|
||||
t.Logf("webservice: %s, %s", webip.String(), weburl)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
// Sort nodes by ID
|
||||
sort.SliceStable(allClients, func(i, j int) bool {
|
||||
statusI := allClients[i].MustStatus()
|
||||
statusJ := allClients[j].MustStatus()
|
||||
|
||||
// These route should auto approve, so the node is expected to have a route
|
||||
// for all counts.
|
||||
nodes, err := headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
|
||||
return statusI.Self.ID < statusJ.Self.ID
|
||||
})
|
||||
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err := client.Status()
|
||||
require.NoError(t, err)
|
||||
// This is ok because the scenario makes users in order, so the three first
|
||||
// nodes, which are subnet routes, will be created first, and the last user
|
||||
// will be created with the second.
|
||||
routerUsernet1 := allClients[0]
|
||||
routerSubRoute := allClients[1]
|
||||
routerExitNode := allClients[2]
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
client := allClients[3]
|
||||
|
||||
if peerStatus.ID == "1" {
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
|
||||
} else {
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
}
|
||||
// Advertise the route for the dockersubnet of user1
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-routes=" + route.String(),
|
||||
}
|
||||
_, _, err = routerUsernet1.Execute(command)
|
||||
require.NoErrorf(t, err, "failed to advertise route: %s", err)
|
||||
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", webip)
|
||||
t.Logf("url from %s to %s", client.Hostname(), url)
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
result, err := client.Curl(url)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, result, 13)
|
||||
// These route should auto approve, so the node is expected to have a route
|
||||
// for all counts.
|
||||
nodes, err := headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
|
||||
|
||||
tr, err := client.Traceroute(webip)
|
||||
require.NoError(t, err)
|
||||
assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4())
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err := client.Status()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Remove the auto approval from the policy, any routes already enabled should be allowed.
|
||||
delete(pol.AutoApprovers.Routes, route.String())
|
||||
err = headscale.SetPolicy(pol)
|
||||
require.NoError(t, err)
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
if peerStatus.ID == "1" {
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
|
||||
} else {
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// These route should auto approve, so the node is expected to have a route
|
||||
// for all counts.
|
||||
nodes, err = headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", webip)
|
||||
t.Logf("url from %s to %s", client.Hostname(), url)
|
||||
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err = client.Status()
|
||||
require.NoError(t, err)
|
||||
result, err := client.Curl(url)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, result, 13)
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
tr, err := client.Traceroute(webip)
|
||||
require.NoError(t, err)
|
||||
assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4())
|
||||
|
||||
if peerStatus.ID == "1" {
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
|
||||
} else {
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
}
|
||||
// Remove the auto approval from the policy, any routes already enabled should be allowed.
|
||||
delete(pol.AutoApprovers.Routes, route.String())
|
||||
err = headscale.SetPolicy(pol)
|
||||
require.NoError(t, err)
|
||||
|
||||
url = fmt.Sprintf("http://%s/etc/hostname", webip)
|
||||
t.Logf("url from %s to %s", client.Hostname(), url)
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
result, err = client.Curl(url)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, result, 13)
|
||||
// These route should auto approve, so the node is expected to have a route
|
||||
// for all counts.
|
||||
nodes, err = headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
|
||||
|
||||
tr, err = client.Traceroute(webip)
|
||||
require.NoError(t, err)
|
||||
assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4())
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err = client.Status()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Disable the route, making it unavailable since it is no longer auto-approved
|
||||
_, err = headscale.ApproveRoutes(
|
||||
nodes[0].GetId(),
|
||||
[]netip.Prefix{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
if peerStatus.ID == "1" {
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
|
||||
} else {
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// These route should auto approve, so the node is expected to have a route
|
||||
// for all counts.
|
||||
nodes, err = headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 0, 0)
|
||||
url = fmt.Sprintf("http://%s/etc/hostname", webip)
|
||||
t.Logf("url from %s to %s", client.Hostname(), url)
|
||||
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err = client.Status()
|
||||
require.NoError(t, err)
|
||||
result, err = client.Curl(url)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, result, 13)
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
tr, err = client.Traceroute(webip)
|
||||
require.NoError(t, err)
|
||||
assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4())
|
||||
|
||||
// Add the route back to the auto approver in the policy, the route should
|
||||
// now become available again.
|
||||
pol.AutoApprovers.Routes[route.String()] = []string{"tag:approve"}
|
||||
err = headscale.SetPolicy(pol)
|
||||
require.NoError(t, err)
|
||||
// Disable the route, making it unavailable since it is no longer auto-approved
|
||||
_, err = headscale.ApproveRoutes(
|
||||
nodes[0].GetId(),
|
||||
[]netip.Prefix{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// These route should auto approve, so the node is expected to have a route
|
||||
// for all counts.
|
||||
nodes, err = headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
|
||||
// These route should auto approve, so the node is expected to have a route
|
||||
// for all counts.
|
||||
nodes, err = headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 0, 0)
|
||||
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err = client.Status()
|
||||
require.NoError(t, err)
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err = client.Status()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
|
||||
if peerStatus.ID == "1" {
|
||||
require.NotNil(t, peerStatus.PrimaryRoutes)
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
|
||||
} else {
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
}
|
||||
// Add the route back to the auto approver in the policy, the route should
|
||||
// now become available again.
|
||||
pol.AutoApprovers.Routes[route.String()] = []string{"tag:approve"}
|
||||
err = headscale.SetPolicy(pol)
|
||||
require.NoError(t, err)
|
||||
|
||||
url = fmt.Sprintf("http://%s/etc/hostname", webip)
|
||||
t.Logf("url from %s to %s", client.Hostname(), url)
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
result, err = client.Curl(url)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, result, 13)
|
||||
// These route should auto approve, so the node is expected to have a route
|
||||
// for all counts.
|
||||
nodes, err = headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
|
||||
|
||||
tr, err = client.Traceroute(webip)
|
||||
require.NoError(t, err)
|
||||
assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4())
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err = client.Status()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Advertise and validate a subnet of an auto approved route, /24 inside the
|
||||
// auto approved /16.
|
||||
command = []string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-routes=" + subRoute.String(),
|
||||
}
|
||||
_, _, err = routerSubRoute.Execute(command)
|
||||
require.NoErrorf(t, err, "failed to advertise route: %s", err)
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
if peerStatus.ID == "1" {
|
||||
require.NotNil(t, peerStatus.PrimaryRoutes)
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
|
||||
} else {
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// These route should auto approve, so the node is expected to have a route
|
||||
// for all counts.
|
||||
nodes, err = headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
|
||||
assertNodeRouteCount(t, nodes[1], 1, 1, 1)
|
||||
url = fmt.Sprintf("http://%s/etc/hostname", webip)
|
||||
t.Logf("url from %s to %s", client.Hostname(), url)
|
||||
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err = client.Status()
|
||||
require.NoError(t, err)
|
||||
result, err = client.Curl(url)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, result, 13)
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
tr, err = client.Traceroute(webip)
|
||||
require.NoError(t, err)
|
||||
assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4())
|
||||
|
||||
if peerStatus.ID == "1" {
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
|
||||
} else if peerStatus.ID == "2" {
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), subRoute)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{subRoute})
|
||||
} else {
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
}
|
||||
// Advertise and validate a subnet of an auto approved route, /24 inside the
|
||||
// auto approved /16.
|
||||
command = []string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-routes=" + subRoute.String(),
|
||||
}
|
||||
_, _, err = routerSubRoute.Execute(command)
|
||||
require.NoErrorf(t, err, "failed to advertise route: %s", err)
|
||||
|
||||
// Advertise a not approved route will not end up anywhere
|
||||
command = []string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-routes=" + notApprovedRoute.String(),
|
||||
}
|
||||
_, _, err = routerSubRoute.Execute(command)
|
||||
require.NoErrorf(t, err, "failed to advertise route: %s", err)
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
// These route should auto approve, so the node is expected to have a route
|
||||
// for all counts.
|
||||
nodes, err = headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
|
||||
assertNodeRouteCount(t, nodes[1], 1, 1, 1)
|
||||
|
||||
// These route should auto approve, so the node is expected to have a route
|
||||
// for all counts.
|
||||
nodes, err = headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
|
||||
assertNodeRouteCount(t, nodes[1], 1, 1, 0)
|
||||
assertNodeRouteCount(t, nodes[2], 0, 0, 0)
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err = client.Status()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err = client.Status()
|
||||
require.NoError(t, err)
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
if peerStatus.ID == "1" {
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
|
||||
} else if peerStatus.ID == "2" {
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), subRoute)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{subRoute})
|
||||
} else {
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
}
|
||||
|
||||
if peerStatus.ID == "1" {
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
|
||||
} else {
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
}
|
||||
// Advertise a not approved route will not end up anywhere
|
||||
command = []string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-routes=" + notApprovedRoute.String(),
|
||||
}
|
||||
_, _, err = routerSubRoute.Execute(command)
|
||||
require.NoErrorf(t, err, "failed to advertise route: %s", err)
|
||||
|
||||
// Exit routes are also automatically approved
|
||||
command = []string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-exit-node",
|
||||
}
|
||||
_, _, err = routerExitNode.Execute(command)
|
||||
require.NoErrorf(t, err, "failed to advertise route: %s", err)
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
// These route should auto approve, so the node is expected to have a route
|
||||
// for all counts.
|
||||
nodes, err = headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
|
||||
assertNodeRouteCount(t, nodes[1], 1, 1, 0)
|
||||
assertNodeRouteCount(t, nodes[2], 0, 0, 0)
|
||||
|
||||
nodes, err = headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
|
||||
assertNodeRouteCount(t, nodes[1], 1, 1, 0)
|
||||
assertNodeRouteCount(t, nodes[2], 2, 2, 2)
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err = client.Status()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err = client.Status()
|
||||
require.NoError(t, err)
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
if peerStatus.ID == "1" {
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
|
||||
} else {
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
}
|
||||
|
||||
if peerStatus.ID == "1" {
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
|
||||
} else if peerStatus.ID == "3" {
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()})
|
||||
} else {
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
// Exit routes are also automatically approved
|
||||
command = []string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-exit-node",
|
||||
}
|
||||
_, _, err = routerExitNode.Execute(command)
|
||||
require.NoErrorf(t, err, "failed to advertise route: %s", err)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
nodes, err = headscale.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
|
||||
assertNodeRouteCount(t, nodes[1], 1, 1, 0)
|
||||
assertNodeRouteCount(t, nodes[2], 2, 2, 2)
|
||||
|
||||
// Verify that the routes have been sent to the client.
|
||||
status, err = client.Status()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
|
||||
if peerStatus.ID == "1" {
|
||||
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
|
||||
} else if peerStatus.ID == "3" {
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()})
|
||||
} else {
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user