diff --git a/integration/route_test.go b/integration/route_test.go index 479a6aaf..be1503e3 100644 --- a/integration/route_test.go +++ b/integration/route_test.go @@ -1,6 +1,7 @@ package integration import ( + "fmt" "net/netip" "sort" "testing" @@ -1035,15 +1036,16 @@ func assertNodeRouteCount(t *testing.T, node *v1.Node, announced, approved, subn assert.Len(t, node.GetSubnetRoutes(), subnet) } -func TestHASubnetRouterFailover2(t *testing.T) { +func TestSubnetRouterMultiNetwork(t *testing.T) { IntegrationSkip(t) t.Parallel() spec := ScenarioSpec{ - NodesPerUser: 4, - Users: []string{"user1"}, + NodesPerUser: 1, + Users: []string{"user1", "user2"}, Networks: map[string][]string{ "usernet1": {"user1"}, + "usernet2": {"user2"}, }, ExtraService: map[string][]extraServiceFunc{ "usernet1": {Webservice}, @@ -1054,7 +1056,7 @@ func TestHASubnetRouterFailover2(t *testing.T) { require.NoErrorf(t, err, "failed to create scenario: %s", err) defer scenario.ShutdownAssertNoPanics(t) - err = scenario.CreateHeadscaleEnv([]tsic.Option{}, + err = scenario.CreateHeadscaleEnv([]tsic.Option{tsic.WithAcceptRoutes()}, hsic.WithTestName("clienableroute"), hsic.WithEmbeddedDERPServerOnly(), hsic.WithTLS(), @@ -1069,6 +1071,96 @@ func TestHASubnetRouterFailover2(t *testing.T) { headscale, err := scenario.Headscale() assertNoErrGetHeadscale(t, err) + assert.NotNil(t, headscale) + + pref, err := scenario.SubnetOfNetwork("usernet1") + require.NoError(t, err) + + var user1c, user2c TailscaleClient + + for _, c := range allClients { + s := c.MustStatus() + if s.User[s.Self.UserID].LoginName == "user1@test.no" { + user1c = c + } + if s.User[s.Self.UserID].LoginName == "user2@test.no" { + user2c = c + } + } + require.NotNil(t, user1c) + require.NotNil(t, user2c) + + // Advertise the route for the dockersubnet of user1 + command := []string{ + "tailscale", + "set", + "--advertise-routes=" + pref.String(), + } + _, _, err = user1c.Execute(command) + require.NoErrorf(t, err, "failed to advertise route: %s", err) + + nodes, err := headscale.ListNodes() + require.NoError(t, err) + assert.Len(t, nodes, 2) + assertNodeRouteCount(t, nodes[0], 1, 0, 0) + + // Verify that no routes has been sent to the client, + // they are not yet enabled. + status, err := user1c.Status() + require.NoError(t, err) + + for _, peerKey := range status.Peers() { + peerStatus := status.Peer[peerKey] + + assert.Nil(t, peerStatus.PrimaryRoutes) + assertPeerSubnetRoutes(t, peerStatus, nil) + } + + // Enable route + _, err = headscale.ApproveRoutes( + nodes[0].Id, + []netip.Prefix{*pref}, + ) + require.NoError(t, err) + + time.Sleep(5 * time.Second) + + nodes, err = headscale.ListNodes() + require.NoError(t, err) + assert.Len(t, nodes, 2) + assertNodeRouteCount(t, nodes[0], 1, 1, 1) + + // Verify that no routes has been sent to the client, + // they are not yet enabled. + status, err = user2c.Status() + require.NoError(t, err) + + for _, peerKey := range status.Peers() { + peerStatus := status.Peer[peerKey] + + assert.Nil(t, peerStatus.PrimaryRoutes) + assertPeerSubnetRoutes(t, peerStatus, []netip.Prefix{*pref}) + } + + usernet1, err := scenario.Network("usernet1") + require.NoError(t, err) + + services, err := scenario.Services("usernet1") + require.NoError(t, err) + require.Len(t, services, 1) + + web := services[0] + webip := netip.MustParseAddr(web.GetIPInNetwork(usernet1)) + + url := fmt.Sprintf("http://%s/etc/hostname", webip) + t.Logf("url from %s to %s", user2c.Hostname(), url) + + result, err := user2c.Curl(url) + require.NoError(t, err) + assert.Len(t, result, 13) + + stdout, stderr, err := user2c.Execute([]string{"traceroute", webip.String()}) + assert.Contains(t, stdout+stderr, user1c.MustIPv4().String()) } // requirePeerSubnetRoutes asserts that the peer has the expected subnet routes. diff --git a/integration/scenario.go b/integration/scenario.go index d1303e45..b1cbbe34 100644 --- a/integration/scenario.go +++ b/integration/scenario.go @@ -150,6 +150,10 @@ type ScenarioSpec struct { var TestHashPrefix = "hs-" + util.MustGenerateRandomStringDNSSafe(scenarioHashLength) var TestDefaultNetwork = TestHashPrefix + "-default" +func prefixedNetworkName(name string) string { + return TestHashPrefix + "-" + name +} + // NewScenario creates a test Scenario which can be used to bootstraps a ControlServer with // a set of Users and TailscaleClients. func NewScenario(spec ScenarioSpec) (*Scenario, error) { @@ -201,7 +205,7 @@ func NewScenario(spec ScenarioSpec) (*Scenario, error) { if err != nil { return nil, err } - s.extraServices[TestHashPrefix+"-"+network] = append(s.extraServices[TestHashPrefix+"-"+network], svc) + mak.Set(&s.extraServices, prefixedNetworkName(network), append(s.extraServices[prefixedNetworkName(network)], svc)) } } @@ -248,6 +252,42 @@ func (s *Scenario) Networks() []*dockertest.Network { return xmaps.Values(s.networks) } +func (s *Scenario) Network(name string) (*dockertest.Network, error) { + net, ok := s.networks[prefixedNetworkName(name)] + if !ok { + return nil, fmt.Errorf("no network named: %s", name) + } + + return net, nil +} + +func (s *Scenario) SubnetOfNetwork(name string) (*netip.Prefix, error) { + net, ok := s.networks[prefixedNetworkName(name)] + if !ok { + return nil, fmt.Errorf("no network named: %s", name) + } + + for _, ipam := range net.Network.IPAM.Config { + pref, err := netip.ParsePrefix(ipam.Subnet) + if err != nil { + return nil, err + } + + return &pref, nil + } + + return nil, fmt.Errorf("no prefix found in network: %s", name) +} + +func (s *Scenario) Services(name string) ([]*dockertest.Resource, error) { + res, ok := s.extraServices[prefixedNetworkName(name)] + if !ok { + return nil, fmt.Errorf("no network named: %s", name) + } + + return res, nil +} + func (s *Scenario) ShutdownAssertNoPanics(t *testing.T) { s.controlServers.Range(func(_ string, control ControlServer) bool { stdoutPath, stderrPath, err := control.Shutdown() @@ -298,10 +338,12 @@ func (s *Scenario) ShutdownAssertNoPanics(t *testing.T) { } } - for _, svc := range s.extraServices { - err := svc.Close() - if err != nil { - log.Printf("failed to tear down service %q: %s", svc.Container.Name, err) + for _, svcs := range s.extraServices { + for _, svc := range svcs { + err := svc.Close() + if err != nil { + log.Printf("failed to tear down service %q: %s", svc.Container.Name, err) + } } } @@ -1125,14 +1167,14 @@ func Webservice(s *Scenario, networkName string) (*dockertest.Resource, error) { hostname := fmt.Sprintf("hs-webservice-%s", hash) - network, ok := s.networks[TestHashPrefix+"-"+networkName] + network, ok := s.networks[prefixedNetworkName(networkName)] if !ok { return nil, fmt.Errorf("network does not exist: %s", networkName) } webOpts := &dockertest.RunOptions{ Name: hostname, - Cmd: []string{"/bin/sh", "-c", "python3 -m http.server --bind :: 80"}, + Cmd: []string{"/bin/sh", "-c", "cd / ; python3 -m http.server --bind :: 80"}, // ExposedPorts: []string{portNotation}, // PortBindings: map[docker.Port][]docker.PortBinding{ // docker.Port(portNotation): {{HostPort: strconv.Itoa(port)}}, diff --git a/integration/tailscale.go b/integration/tailscale.go index 9ab6e1e2..2b383c2c 100644 --- a/integration/tailscale.go +++ b/integration/tailscale.go @@ -27,6 +27,9 @@ type TailscaleClient interface { Up() error Down() error IPs() ([]netip.Addr, error) + MustIPs() []netip.Addr + MustIPv4() netip.Addr + MustIPv6() netip.Addr FQDN() (string, error) Status(...bool) (*ipnstate.Status, error) MustStatus() *ipnstate.Status diff --git a/integration/tsic/tsic.go b/integration/tsic/tsic.go index b60393f7..18373fc3 100644 --- a/integration/tsic/tsic.go +++ b/integration/tsic/tsic.go @@ -82,6 +82,7 @@ type TailscaleInContainer struct { workdir string netfilter string extraLoginArgs []string + withAcceptRoutes bool // build options, solely for HEAD buildConfig TailscaleInContainerBuildConfig @@ -197,6 +198,13 @@ func WithExtraLoginArgs(args []string) Option { } } +// WithAcceptRoutes tells the node to accept incomming routes. +func WithAcceptRoutes() Option { + return func(tsic *TailscaleInContainer) { + tsic.withAcceptRoutes = true + } +} + // New returns a new TailscaleInContainer instance. func New( pool *dockertest.Pool, @@ -429,7 +437,7 @@ func (t *TailscaleInContainer) Login( "--login-server=" + loginServer, "--authkey=" + authKey, "--hostname=" + t.hostname, - "--accept-routes=false", + fmt.Sprintf("--accept-routes=%t", t.withAcceptRoutes), } if t.extraLoginArgs != nil { @@ -584,6 +592,33 @@ func (t *TailscaleInContainer) IPs() ([]netip.Addr, error) { return ips, nil } +func (t *TailscaleInContainer) MustIPs() []netip.Addr { + ips, err := t.IPs() + if err != nil { + panic(err) + } + + return ips +} + +func (t *TailscaleInContainer) MustIPv4() netip.Addr { + for _, ip := range t.MustIPs() { + if ip.Is4() { + return ip + } + } + panic("no ipv4 found") +} + +func (t *TailscaleInContainer) MustIPv6() netip.Addr { + for _, ip := range t.MustIPs() { + if ip.Is6() { + return ip + } + } + panic("no ipv6 found") +} + // Status returns the ipnstate.Status of the Tailscale instance. func (t *TailscaleInContainer) Status(save ...bool) (*ipnstate.Status, error) { command := []string{