1
0
mirror of https://github.com/juanfont/headscale.git synced 2024-12-20 19:09:07 +01:00

Remove variables and leftovers of pregenerated ACL content

Prior to the code reorg, we would generate rules from the Policy and
store it on the global object. Now we generate it on the fly for each node
and this commit cleans up the old variables to make sure we have no
unexpected side effects.

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
Kristoffer Dalby 2023-05-31 18:45:04 +02:00 committed by Kristoffer Dalby
parent 084d1d5d6e
commit 725bbd7408
11 changed files with 410 additions and 292 deletions

View File

@ -87,8 +87,6 @@ type Headscale struct {
DERPServer *DERPServer DERPServer *DERPServer
ACLPolicy *policy.ACLPolicy ACLPolicy *policy.ACLPolicy
aclRules []tailcfg.FilterRule
sshPolicy *tailcfg.SSHPolicy
lastStateChange *xsync.MapOf[string, time.Time] lastStateChange *xsync.MapOf[string, time.Time]
@ -102,12 +100,6 @@ type Headscale struct {
stateUpdateChan chan struct{} stateUpdateChan chan struct{}
cancelStateUpdateChan chan struct{} cancelStateUpdateChan chan struct{}
// TODO(kradalby): Temporary measure to make sure we can update policy
// across modules, will be removed when aclRules are no longer stored
// globally but generated per node basis.
policyUpdateChan chan struct{}
cancelPolicyUpdateChan chan struct{}
} }
func NewHeadscale(cfg *Config) (*Headscale, error) { func NewHeadscale(cfg *Config) (*Headscale, error) {
@ -168,20 +160,15 @@ func NewHeadscale(cfg *Config) (*Headscale, error) {
dbString: dbString, dbString: dbString,
privateKey2019: privateKey, privateKey2019: privateKey,
noisePrivateKey: noisePrivateKey, noisePrivateKey: noisePrivateKey,
aclRules: tailcfg.FilterAllowAll, // default allowall
registrationCache: registrationCache, registrationCache: registrationCache,
pollNetMapStreamWG: sync.WaitGroup{}, pollNetMapStreamWG: sync.WaitGroup{},
lastStateChange: xsync.NewMapOf[time.Time](), lastStateChange: xsync.NewMapOf[time.Time](),
stateUpdateChan: make(chan struct{}), stateUpdateChan: make(chan struct{}),
cancelStateUpdateChan: make(chan struct{}), cancelStateUpdateChan: make(chan struct{}),
policyUpdateChan: make(chan struct{}),
cancelPolicyUpdateChan: make(chan struct{}),
} }
go app.watchStateChannel() go app.watchStateChannel()
go app.watchPolicyChannel()
database, err := db.NewHeadscaleDatabase( database, err := db.NewHeadscaleDatabase(
cfg.DBtype, cfg.DBtype,
@ -189,7 +176,6 @@ func NewHeadscale(cfg *Config) (*Headscale, error) {
cfg.OIDC.StripEmaildomain, cfg.OIDC.StripEmaildomain,
app.dbDebug, app.dbDebug,
app.stateUpdateChan, app.stateUpdateChan,
app.policyUpdateChan,
cfg.IPPrefixes, cfg.IPPrefixes,
cfg.BaseDomain) cfg.BaseDomain)
if err != nil { if err != nil {
@ -750,10 +736,6 @@ func (h *Headscale) Serve() error {
close(h.stateUpdateChan) close(h.stateUpdateChan)
close(h.cancelStateUpdateChan) close(h.cancelStateUpdateChan)
<-h.cancelPolicyUpdateChan
close(h.policyUpdateChan)
close(h.cancelPolicyUpdateChan)
// Close db connections // Close db connections
err = h.db.Close() err = h.db.Close()
if err != nil { if err != nil {
@ -862,30 +844,6 @@ func (h *Headscale) watchStateChannel() {
} }
} }
// TODO(kradalby): baby steps, make this more robust.
func (h *Headscale) watchPolicyChannel() {
for {
select {
case <-h.policyUpdateChan:
machines, err := h.db.ListMachines()
if err != nil {
log.Error().Err(err).Msg("failed to fetch machines during policy update")
}
rules, sshPolicy, err := policy.GenerateFilterRules(h.ACLPolicy, machines, h.cfg.OIDC.StripEmaildomain)
if err != nil {
log.Error().Err(err).Msg("failed to update ACL rules")
}
h.aclRules = rules
h.sshPolicy = sshPolicy
case <-h.cancelPolicyUpdateChan:
return
}
}
}
func (h *Headscale) setLastStateChangeToNow() { func (h *Headscale) setLastStateChangeToNow() {
var err error var err error

View File

@ -38,7 +38,6 @@ type KV struct {
type HSDatabase struct { type HSDatabase struct {
db *gorm.DB db *gorm.DB
notifyStateChan chan<- struct{} notifyStateChan chan<- struct{}
notifyPolicyChan chan<- struct{}
ipAllocationMutex sync.Mutex ipAllocationMutex sync.Mutex
@ -53,7 +52,6 @@ func NewHeadscaleDatabase(
dbType, connectionAddr string, dbType, connectionAddr string,
stripEmailDomain, debug bool, stripEmailDomain, debug bool,
notifyStateChan chan<- struct{}, notifyStateChan chan<- struct{},
notifyPolicyChan chan<- struct{},
ipPrefixes []netip.Prefix, ipPrefixes []netip.Prefix,
baseDomain string, baseDomain string,
) (*HSDatabase, error) { ) (*HSDatabase, error) {
@ -65,7 +63,6 @@ func NewHeadscaleDatabase(
db := HSDatabase{ db := HSDatabase{
db: dbConn, db: dbConn,
notifyStateChan: notifyStateChan, notifyStateChan: notifyStateChan,
notifyPolicyChan: notifyPolicyChan,
ipPrefixes: ipPrefixes, ipPrefixes: ipPrefixes,
baseDomain: baseDomain, baseDomain: baseDomain,

View File

@ -33,7 +33,7 @@ var (
) )
) )
// ListPeers returns all peers of machine, regardless of any Policy. // ListPeers returns all peers of machine, regardless of any Policy or if the node is expired.
func (hsdb *HSDatabase) ListPeers(machine *types.Machine) (types.Machines, error) { func (hsdb *HSDatabase) ListPeers(machine *types.Machine) (types.Machines, error) {
log.Trace(). log.Trace().
Caller(). Caller().
@ -218,7 +218,6 @@ func (hsdb *HSDatabase) SetTags(
} }
machine.ForcedTags = newTags machine.ForcedTags = newTags
hsdb.notifyPolicyChan <- struct{}{}
hsdb.notifyStateChange() hsdb.notifyStateChange()
if err := hsdb.db.Save(machine).Error; err != nil { if err := hsdb.db.Save(machine).Error; err != nil {

View File

@ -459,7 +459,7 @@ func (s *Suite) TestSetTags(c *check.C) {
types.StringList([]string{"tag:bar", "tag:test", "tag:unknown"}), types.StringList([]string{"tag:bar", "tag:test", "tag:unknown"}),
) )
c.Assert(channelUpdates, check.Equals, int32(4)) c.Assert(channelUpdates, check.Equals, int32(2))
} }
func TestHeadscale_generateGivenName(t *testing.T) { func TestHeadscale_generateGivenName(t *testing.T) {

View File

@ -62,7 +62,6 @@ func (s *Suite) ResetDB(c *check.C) {
false, false,
false, false,
sink, sink,
sink,
[]netip.Prefix{ []netip.Prefix{
netip.MustParsePrefix("10.27.0.0/23"), netip.MustParsePrefix("10.27.0.0/23"),
}, },

View File

@ -16,6 +16,7 @@ import (
"github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/hscontrol/util"
"github.com/klauspost/compress/zstd" "github.com/klauspost/compress/zstd"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"github.com/samber/lo"
"tailscale.com/smallzstd" "tailscale.com/smallzstd"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/dnstype" "tailscale.com/types/dnstype"
@ -69,33 +70,18 @@ func NewMapper(
} }
} }
func (m *Mapper) tempWrap( // TODO: Optimise
machine *types.Machine, // As this work continues, the idea is that there will be one Mapper instance
pol *policy.ACLPolicy, // per node, attached to the open stream between the control and client.
) (*tailcfg.MapResponse, error) { // This means that this can hold a state per machine and we can use that to
peers, err := m.db.ListPeers(machine) // improve the mapresponses sent.
if err != nil { // We could:
log.Error(). // - Keep information about the previous mapresponse so we can send a diff
Caller(). // - Store hashes
Err(err). // - Create a "minifier" that removes info not needed for the node
Msg("Cannot fetch peers")
return nil, err
}
return fullMapResponse(
pol,
machine,
peers,
m.stripEmailDomain,
m.baseDomain,
m.dnsCfg,
m.derpMap,
m.logtail,
m.randomClientPort,
)
}
// fullMapResponse is the internal function for generating a MapResponse
// for a machine.
func fullMapResponse( func fullMapResponse(
pol *policy.ACLPolicy, pol *policy.ACLPolicy,
machine *types.Machine, machine *types.Machine,
@ -113,11 +99,23 @@ func fullMapResponse(
return nil, err return nil, err
} }
rules, sshPolicy, err := policy.GenerateFilterRules(pol, peers, stripEmailDomain) rules, sshPolicy, err := policy.GenerateFilterRules(
pol,
// The policy is currently calculated for the entire Headscale network
append(peers, *machine),
stripEmailDomain,
)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Filter out peers that have expired.
peers = lo.Filter(peers, func(item types.Machine, index int) bool {
return !item.IsExpired()
})
// If there are filter rules present, see if there are any machines that cannot
// access eachother at all and remove them from the peers.
if len(rules) > 0 { if len(rules) > 0 {
peers = policy.FilterMachinesByACL(machine, peers, rules) peers = policy.FilterMachinesByACL(machine, peers, rules)
} }
@ -278,12 +276,33 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, machine types.Machine) {
} }
} }
// CreateMapResponse returns a MapResponse for the given machine.
func (m Mapper) CreateMapResponse( func (m Mapper) CreateMapResponse(
mapRequest tailcfg.MapRequest, mapRequest tailcfg.MapRequest,
machine *types.Machine, machine *types.Machine,
pol *policy.ACLPolicy, pol *policy.ACLPolicy,
) ([]byte, error) { ) ([]byte, error) {
mapResponse, err := m.tempWrap(machine, pol) peers, err := m.db.ListPeers(machine)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Cannot fetch peers")
return nil, err
}
mapResponse, err := fullMapResponse(
pol,
machine,
peers,
m.stripEmailDomain,
m.baseDomain,
m.dnsCfg,
m.derpMap,
m.logtail,
m.randomClientPort,
)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -6,6 +6,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/davecgh/go-spew/spew"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/cmpopts"
"github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/policy"
@ -164,35 +165,7 @@ func Test_fullMapResponse(t *testing.T) {
lastSeen := time.Date(2009, time.November, 10, 23, 9, 0, 0, time.UTC) lastSeen := time.Date(2009, time.November, 10, 23, 9, 0, 0, time.UTC)
expire := time.Date(2500, time.November, 11, 23, 0, 0, 0, time.UTC) expire := time.Date(2500, time.November, 11, 23, 0, 0, 0, time.UTC)
tests := []struct { mini := &types.Machine{
name string
pol *policy.ACLPolicy
machine *types.Machine
peers types.Machines
stripEmailDomain bool
baseDomain string
dnsConfig *tailcfg.DNSConfig
derpMap *tailcfg.DERPMap
logtail bool
randomClientPort bool
want *tailcfg.MapResponse
wantErr bool
}{
// {
// name: "empty-machine",
// machine: types.Machine{},
// pol: &policy.ACLPolicy{},
// dnsConfig: &tailcfg.DNSConfig{},
// baseDomain: "",
// stripEmailDomain: false,
// want: nil,
// wantErr: true,
// },
{
name: "no-pol-no-peers-map-response",
pol: &policy.ACLPolicy{},
machine: &types.Machine{
ID: 0, ID: 0,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507", MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe", NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
@ -230,17 +203,9 @@ func Test_fullMapResponse(t *testing.T) {
}, },
}, },
CreatedAt: created, CreatedAt: created,
}, }
peers: []types.Machine{},
stripEmailDomain: false, tailMini := &tailcfg.Node{
baseDomain: "",
dnsConfig: &tailcfg.DNSConfig{},
derpMap: &tailcfg.DERPMap{},
logtail: false,
randomClientPort: false,
want: &tailcfg.MapResponse{
KeepAlive: false,
Node: &tailcfg.Node{
ID: 0, ID: 0,
StableID: "0", StableID: "0",
Name: "mini", Name: "mini",
@ -276,64 +241,9 @@ func Test_fullMapResponse(t *testing.T) {
tailcfg.CapabilityAdmin, tailcfg.CapabilityAdmin,
tailcfg.CapabilitySSH, tailcfg.CapabilitySSH,
}, },
}, }
DERPMap: &tailcfg.DERPMap{},
Peers: []*tailcfg.Node{}, peer1 := types.Machine{
DNSConfig: &tailcfg.DNSConfig{},
Domain: "",
CollectServices: "false",
PacketFilter: []tailcfg.FilterRule{},
UserProfiles: []tailcfg.UserProfile{{LoginName: "mini", DisplayName: "mini"}},
SSHPolicy: nil,
ControlTime: &time.Time{},
Debug: &tailcfg.Debug{
DisableLogTail: true,
},
},
wantErr: false,
},
{
name: "no-pol-map-response",
pol: &policy.ACLPolicy{},
machine: &types.Machine{
ID: 0,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
Hostname: "mini",
GivenName: "mini",
UserID: 0,
User: types.User{Name: "mini"},
ForcedTags: []string{},
LastSeen: &lastSeen,
Expiry: &expire,
HostInfo: types.HostInfo{},
Endpoints: []string{},
Routes: []types.Route{
{
Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")),
Advertised: true,
Enabled: true,
IsPrimary: false,
},
{
Prefix: types.IPPrefix(netip.MustParsePrefix("192.168.0.0/24")),
Advertised: true,
Enabled: true,
IsPrimary: true,
},
{
Prefix: types.IPPrefix(netip.MustParsePrefix("172.0.0.0/10")),
Advertised: true,
Enabled: false,
IsPrimary: true,
},
},
CreatedAt: created,
},
peers: []types.Machine{
{
ID: 1, ID: 1,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507", MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe", NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
@ -350,56 +260,9 @@ func Test_fullMapResponse(t *testing.T) {
Endpoints: []string{}, Endpoints: []string{},
Routes: []types.Route{}, Routes: []types.Route{},
CreatedAt: created, CreatedAt: created,
}, }
},
stripEmailDomain: false, tailPeer1 := &tailcfg.Node{
baseDomain: "",
dnsConfig: &tailcfg.DNSConfig{},
derpMap: &tailcfg.DERPMap{},
logtail: false,
randomClientPort: false,
want: &tailcfg.MapResponse{
KeepAlive: false,
Node: &tailcfg.Node{
ID: 0,
StableID: "0",
Name: "mini",
User: 0,
Key: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
KeyExpiry: expire,
Machine: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")},
AllowedIPs: []netip.Prefix{
netip.MustParsePrefix("100.64.0.1/32"),
netip.MustParsePrefix("0.0.0.0/0"),
netip.MustParsePrefix("192.168.0.0/24"),
},
Endpoints: []string{},
DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created,
Tags: []string{},
PrimaryRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
LastSeen: &lastSeen,
Online: new(bool),
KeepAlive: true,
MachineAuthorized: true,
Capabilities: []string{
tailcfg.CapabilityFileSharing,
tailcfg.CapabilityAdmin,
tailcfg.CapabilitySSH,
},
},
DERPMap: &tailcfg.DERPMap{},
Peers: []*tailcfg.Node{
{
ID: 1, ID: 1,
StableID: "1", StableID: "1",
Name: "peer1", Name: "peer1",
@ -430,8 +293,101 @@ func Test_fullMapResponse(t *testing.T) {
tailcfg.CapabilityAdmin, tailcfg.CapabilityAdmin,
tailcfg.CapabilitySSH, tailcfg.CapabilitySSH,
}, },
}
peer2 := types.Machine{
ID: 2,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2",
GivenName: "peer2",
UserID: 1,
User: types.User{Name: "peer2"},
ForcedTags: []string{},
LastSeen: &lastSeen,
Expiry: &expire,
HostInfo: types.HostInfo{},
Endpoints: []string{},
Routes: []types.Route{},
CreatedAt: created,
}
tests := []struct {
name string
pol *policy.ACLPolicy
machine *types.Machine
peers types.Machines
stripEmailDomain bool
baseDomain string
dnsConfig *tailcfg.DNSConfig
derpMap *tailcfg.DERPMap
logtail bool
randomClientPort bool
want *tailcfg.MapResponse
wantErr bool
}{
// {
// name: "empty-machine",
// machine: types.Machine{},
// pol: &policy.ACLPolicy{},
// dnsConfig: &tailcfg.DNSConfig{},
// baseDomain: "",
// stripEmailDomain: false,
// want: nil,
// wantErr: true,
// },
{
name: "no-pol-no-peers-map-response",
pol: &policy.ACLPolicy{},
machine: mini,
peers: []types.Machine{},
stripEmailDomain: false,
baseDomain: "",
dnsConfig: &tailcfg.DNSConfig{},
derpMap: &tailcfg.DERPMap{},
logtail: false,
randomClientPort: false,
want: &tailcfg.MapResponse{
Node: tailMini,
KeepAlive: false,
DERPMap: &tailcfg.DERPMap{},
Peers: []*tailcfg.Node{},
DNSConfig: &tailcfg.DNSConfig{},
Domain: "",
CollectServices: "false",
PacketFilter: []tailcfg.FilterRule{},
UserProfiles: []tailcfg.UserProfile{{LoginName: "mini", DisplayName: "mini"}},
SSHPolicy: nil,
ControlTime: &time.Time{},
Debug: &tailcfg.Debug{
DisableLogTail: true,
}, },
}, },
wantErr: false,
},
{
name: "no-pol-with-peer-map-response",
pol: &policy.ACLPolicy{},
machine: mini,
peers: []types.Machine{
peer1,
},
stripEmailDomain: false,
baseDomain: "",
dnsConfig: &tailcfg.DNSConfig{},
derpMap: &tailcfg.DERPMap{},
logtail: false,
randomClientPort: false,
want: &tailcfg.MapResponse{
KeepAlive: false,
Node: tailMini,
DERPMap: &tailcfg.DERPMap{},
Peers: []*tailcfg.Node{
tailPeer1,
},
DNSConfig: &tailcfg.DNSConfig{}, DNSConfig: &tailcfg.DNSConfig{},
Domain: "", Domain: "",
CollectServices: "false", CollectServices: "false",
@ -445,6 +401,55 @@ func Test_fullMapResponse(t *testing.T) {
}, },
wantErr: false, wantErr: false,
}, },
{
name: "with-pol-map-response",
pol: &policy.ACLPolicy{
ACLs: []policy.ACL{
{
Action: "accept",
Sources: []string{"mini"},
Destinations: []string{"100.64.0.2:*"},
},
},
},
machine: mini,
peers: []types.Machine{
peer1,
peer2,
},
stripEmailDomain: false,
baseDomain: "",
dnsConfig: &tailcfg.DNSConfig{},
derpMap: &tailcfg.DERPMap{},
logtail: false,
randomClientPort: false,
want: &tailcfg.MapResponse{
KeepAlive: false,
Node: tailMini,
DERPMap: &tailcfg.DERPMap{},
Peers: []*tailcfg.Node{
tailPeer1,
},
DNSConfig: &tailcfg.DNSConfig{},
Domain: "",
CollectServices: "false",
PacketFilter: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.2/32", Ports: tailcfg.PortRangeAny},
},
},
},
UserProfiles: []tailcfg.UserProfile{{LoginName: "mini", DisplayName: "mini"}},
SSHPolicy: nil,
ControlTime: &time.Time{},
Debug: &tailcfg.Debug{
DisableLogTail: true,
},
},
wantErr: false,
},
} }
for _, tt := range tests { for _, tt := range tests {
@ -467,6 +472,8 @@ func Test_fullMapResponse(t *testing.T) {
return return
} }
spew.Dump(got)
if diff := cmp.Diff( if diff := cmp.Diff(
tt.want, tt.want,
got, got,

View File

@ -125,8 +125,9 @@ func GenerateFilterRules(
machines types.Machines, machines types.Machines,
stripEmailDomain bool, stripEmailDomain bool,
) ([]tailcfg.FilterRule, *tailcfg.SSHPolicy, error) { ) ([]tailcfg.FilterRule, *tailcfg.SSHPolicy, error) {
// If there is no policy defined, we default to allow all
if policy == nil { if policy == nil {
return []tailcfg.FilterRule{}, &tailcfg.SSHPolicy{}, nil return tailcfg.FilterAllowAll, &tailcfg.SSHPolicy{}, nil
} }
rules, err := policy.generateFilterRules(machines, stripEmailDomain) rules, err := policy.generateFilterRules(machines, stripEmailDomain)

View File

@ -2319,6 +2319,48 @@ func Test_getFilteredByACLPeers(t *testing.T) {
}, },
}, },
}, },
{
name: "failing-edge-case-during-p3-refactor",
args: args{
machines: []types.Machine{
{
ID: 1,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
Hostname: "peer1",
User: types.User{Name: "mini"},
},
{
ID: 2,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2",
User: types.User{Name: "peer2"},
},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny},
{IP: "::/0", Ports: tailcfg.PortRangeAny},
},
},
},
machine: &types.Machine{
ID: 0,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
Hostname: "mini",
User: types.User{Name: "mini"},
},
},
want: []types.Machine{
{
ID: 2,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2",
User: types.User{Name: "peer2"},
},
},
},
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {

View File

@ -59,10 +59,6 @@ func (h *Headscale) handlePollCommon(
// update ACLRules with peer informations (to update server tags if necessary) // update ACLRules with peer informations (to update server tags if necessary)
if h.ACLPolicy != nil { if h.ACLPolicy != nil {
// TODO(kradalby): Since this is not blocking, I might have introduced a bug here.
// It will be resolved later as we change up the policy stuff.
h.policyUpdateChan <- struct{}{}
// update routes with peer information // update routes with peer information
err = h.db.EnableAutoApprovedRoutes(h.ACLPolicy, machine) err = h.db.EnableAutoApprovedRoutes(h.ACLPolicy, machine)
if err != nil { if err != nil {

View File

@ -1 +1,101 @@
package types package types
import (
"net/netip"
"testing"
"tailscale.com/tailcfg"
)
func Test_MachineCanAccess(t *testing.T) {
tests := []struct {
name string
machine1 Machine
machine2 Machine
rules []tailcfg.FilterRule
want bool
}{
{
name: "other-cant-access-src",
machine1: Machine{
ID: 0,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
Hostname: "mini",
User: User{Name: "mini"},
},
machine2: Machine{
ID: 2,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2",
User: User{Name: "peer2"},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.2/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny},
},
},
},
want: false,
},
{
name: "dest-cant-access-src",
machine1: Machine{
ID: 2,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2",
User: User{Name: "peer2"},
},
machine2: Machine{
ID: 0,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
Hostname: "mini",
User: User{Name: "mini"},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.2/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny},
},
},
},
want: false,
},
{
name: "src-can-access-dest",
machine1: Machine{
ID: 0,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
Hostname: "mini",
User: User{Name: "mini"},
},
machine2: Machine{
ID: 2,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2",
User: User{Name: "peer2"},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.2/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny},
},
},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.machine1.CanAccess(tt.rules, &tt.machine2)
if got != tt.want {
t.Errorf("canAccess() failed: want (%t), got (%t)", tt.want, got)
}
})
}
}