mirror of
https://github.com/juanfont/headscale.git
synced 2025-10-19 11:15:48 +02:00
Initial work on a nodestore which stores all of the nodes and their relations in memory with relationship for peers precalculated. It is a copy-on-write structure, replacing the "snapshot" when a change to the structure occurs. It is optimised for reads, and while batches are not fast, they are grouped together to do less of the expensive peer calculation if there are many changes rapidly. Writes will block until commited, while reads are never blocked. Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
213 lines
6.0 KiB
Go
213 lines
6.0 KiB
Go
package policy
|
|
|
|
import (
|
|
"net/netip"
|
|
"slices"
|
|
|
|
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
|
"github.com/juanfont/headscale/hscontrol/types"
|
|
"github.com/juanfont/headscale/hscontrol/util"
|
|
"github.com/rs/zerolog/log"
|
|
"github.com/samber/lo"
|
|
"tailscale.com/net/tsaddr"
|
|
"tailscale.com/tailcfg"
|
|
"tailscale.com/types/views"
|
|
)
|
|
|
|
// ReduceNodes returns the list of peers authorized to be accessed from a given node.
|
|
func ReduceNodes(
|
|
node types.NodeView,
|
|
nodes views.Slice[types.NodeView],
|
|
matchers []matcher.Match,
|
|
) views.Slice[types.NodeView] {
|
|
var result []types.NodeView
|
|
|
|
for _, peer := range nodes.All() {
|
|
if peer.ID() == node.ID() {
|
|
continue
|
|
}
|
|
|
|
if node.CanAccess(matchers, peer) || peer.CanAccess(matchers, node) {
|
|
result = append(result, peer)
|
|
}
|
|
}
|
|
|
|
return views.SliceOf(result)
|
|
}
|
|
|
|
// ReduceRoutes returns a reduced list of routes for a given node that it can access.
|
|
func ReduceRoutes(
|
|
node types.NodeView,
|
|
routes []netip.Prefix,
|
|
matchers []matcher.Match,
|
|
) []netip.Prefix {
|
|
var result []netip.Prefix
|
|
|
|
for _, route := range routes {
|
|
if node.CanAccessRoute(matchers, route) {
|
|
result = append(result, route)
|
|
}
|
|
}
|
|
|
|
return result
|
|
}
|
|
|
|
// BuildPeerMap builds a map of all peers that can be accessed by each node.
|
|
func BuildPeerMap(
|
|
nodes views.Slice[types.NodeView],
|
|
matchers []matcher.Match,
|
|
) map[types.NodeID][]types.NodeView {
|
|
ret := make(map[types.NodeID][]types.NodeView, nodes.Len())
|
|
|
|
// Build the map of all peers according to the matchers.
|
|
// Compared to ReduceNodes, which builds the list per node, we end up with doing
|
|
// the full work for every node (On^2), while this will reduce the list as we see
|
|
// relationships while building the map, making it O(n^2/2) in the end, but with less work per node.
|
|
for i := range nodes.Len() {
|
|
for j := i + 1; j < nodes.Len(); j++ {
|
|
if nodes.At(i).ID() == nodes.At(j).ID() {
|
|
continue
|
|
}
|
|
|
|
if nodes.At(i).CanAccess(matchers, nodes.At(j)) || nodes.At(j).CanAccess(matchers, nodes.At(i)) {
|
|
ret[nodes.At(i).ID()] = append(ret[nodes.At(i).ID()], nodes.At(j))
|
|
ret[nodes.At(j).ID()] = append(ret[nodes.At(j).ID()], nodes.At(i))
|
|
}
|
|
}
|
|
}
|
|
|
|
return ret
|
|
}
|
|
|
|
// ReduceFilterRules takes a node and a set of rules and removes all rules and destinations
|
|
// that are not relevant to that particular node.
|
|
func ReduceFilterRules(node types.NodeView, rules []tailcfg.FilterRule) []tailcfg.FilterRule {
|
|
ret := []tailcfg.FilterRule{}
|
|
|
|
for _, rule := range rules {
|
|
// record if the rule is actually relevant for the given node.
|
|
var dests []tailcfg.NetPortRange
|
|
DEST_LOOP:
|
|
for _, dest := range rule.DstPorts {
|
|
expanded, err := util.ParseIPSet(dest.IP, nil)
|
|
// Fail closed, if we can't parse it, then we should not allow
|
|
// access.
|
|
if err != nil {
|
|
continue DEST_LOOP
|
|
}
|
|
|
|
if node.InIPSet(expanded) {
|
|
dests = append(dests, dest)
|
|
continue DEST_LOOP
|
|
}
|
|
|
|
// If the node exposes routes, ensure they are note removed
|
|
// when the filters are reduced.
|
|
if node.Hostinfo().Valid() {
|
|
routableIPs := node.Hostinfo().RoutableIPs()
|
|
if routableIPs.Len() > 0 {
|
|
for _, routableIP := range routableIPs.All() {
|
|
if expanded.OverlapsPrefix(routableIP) {
|
|
dests = append(dests, dest)
|
|
continue DEST_LOOP
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Also check approved subnet routes - nodes should have access
|
|
// to subnets they're approved to route traffic for.
|
|
subnetRoutes := node.SubnetRoutes()
|
|
|
|
for _, subnetRoute := range subnetRoutes {
|
|
if expanded.OverlapsPrefix(subnetRoute) {
|
|
dests = append(dests, dest)
|
|
continue DEST_LOOP
|
|
}
|
|
}
|
|
}
|
|
|
|
if len(dests) > 0 {
|
|
ret = append(ret, tailcfg.FilterRule{
|
|
SrcIPs: rule.SrcIPs,
|
|
DstPorts: dests,
|
|
IPProto: rule.IPProto,
|
|
})
|
|
}
|
|
}
|
|
|
|
return ret
|
|
}
|
|
|
|
// ApproveRoutesWithPolicy checks if the node can approve the announced routes
|
|
// and returns the new list of approved routes.
|
|
// The approved routes will include:
|
|
// 1. ALL previously approved routes (regardless of whether they're still advertised)
|
|
// 2. New routes from announcedRoutes that can be auto-approved by policy
|
|
// This ensures that:
|
|
// - Previously approved routes are ALWAYS preserved (auto-approval never removes routes)
|
|
// - New routes can be auto-approved according to policy
|
|
// - Routes can only be removed by explicit admin action (not by auto-approval).
|
|
func ApproveRoutesWithPolicy(pm PolicyManager, nv types.NodeView, currentApproved, announcedRoutes []netip.Prefix) ([]netip.Prefix, bool) {
|
|
if pm == nil {
|
|
return currentApproved, false
|
|
}
|
|
|
|
// Start with ALL currently approved routes - we never remove approved routes
|
|
newApproved := make([]netip.Prefix, len(currentApproved))
|
|
copy(newApproved, currentApproved)
|
|
|
|
// Then, check for new routes that can be auto-approved
|
|
for _, route := range announcedRoutes {
|
|
// Skip if already approved
|
|
if slices.Contains(newApproved, route) {
|
|
continue
|
|
}
|
|
|
|
// Check if this new route can be auto-approved by policy
|
|
canApprove := pm.NodeCanApproveRoute(nv, route)
|
|
if canApprove {
|
|
newApproved = append(newApproved, route)
|
|
}
|
|
}
|
|
|
|
// Sort and deduplicate
|
|
tsaddr.SortPrefixes(newApproved)
|
|
newApproved = slices.Compact(newApproved)
|
|
newApproved = lo.Filter(newApproved, func(route netip.Prefix, index int) bool {
|
|
return route.IsValid()
|
|
})
|
|
|
|
// Sort the current approved for comparison
|
|
sortedCurrent := make([]netip.Prefix, len(currentApproved))
|
|
copy(sortedCurrent, currentApproved)
|
|
tsaddr.SortPrefixes(sortedCurrent)
|
|
|
|
// Only update if the routes actually changed
|
|
if !slices.Equal(sortedCurrent, newApproved) {
|
|
// Log what changed
|
|
var added, kept []netip.Prefix
|
|
for _, route := range newApproved {
|
|
if !slices.Contains(sortedCurrent, route) {
|
|
added = append(added, route)
|
|
} else {
|
|
kept = append(kept, route)
|
|
}
|
|
}
|
|
|
|
if len(added) > 0 {
|
|
log.Debug().
|
|
Uint64("node.id", nv.ID().Uint64()).
|
|
Str("node.name", nv.Hostname()).
|
|
Strs("routes.added", util.PrefixesToString(added)).
|
|
Strs("routes.kept", util.PrefixesToString(kept)).
|
|
Int("routes.total", len(newApproved)).
|
|
Msg("Routes auto-approved by policy")
|
|
}
|
|
|
|
return newApproved, true
|
|
}
|
|
|
|
return newApproved, false
|
|
}
|