2023-05-26 12:26:34 +02:00
|
|
|
package mapper
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
2023-07-17 11:13:48 +02:00
|
|
|
"io/fs"
|
2023-05-26 12:26:34 +02:00
|
|
|
"net/url"
|
2023-07-17 11:13:48 +02:00
|
|
|
"os"
|
|
|
|
"path"
|
2023-06-29 12:20:22 +02:00
|
|
|
"sort"
|
2023-05-26 12:26:34 +02:00
|
|
|
"strings"
|
|
|
|
"sync"
|
2023-07-17 11:13:48 +02:00
|
|
|
"sync/atomic"
|
2023-05-26 12:26:34 +02:00
|
|
|
"time"
|
|
|
|
|
|
|
|
mapset "github.com/deckarep/golang-set/v2"
|
|
|
|
"github.com/juanfont/headscale/hscontrol/policy"
|
|
|
|
"github.com/juanfont/headscale/hscontrol/types"
|
|
|
|
"github.com/juanfont/headscale/hscontrol/util"
|
|
|
|
"github.com/klauspost/compress/zstd"
|
|
|
|
"github.com/rs/zerolog/log"
|
2023-05-31 18:45:04 +02:00
|
|
|
"github.com/samber/lo"
|
2023-07-17 11:13:48 +02:00
|
|
|
"tailscale.com/envknob"
|
2023-05-26 12:26:34 +02:00
|
|
|
"tailscale.com/smallzstd"
|
|
|
|
"tailscale.com/tailcfg"
|
|
|
|
"tailscale.com/types/dnstype"
|
|
|
|
"tailscale.com/types/key"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
nextDNSDoHPrefix = "https://dns.nextdns.io"
|
|
|
|
reservedResponseHeaderSize = 4
|
2023-07-26 11:53:42 +02:00
|
|
|
mapperIDLength = 8
|
|
|
|
debugMapResponsePerm = 0o755
|
2023-05-26 12:26:34 +02:00
|
|
|
)
|
|
|
|
|
2023-07-17 11:13:48 +02:00
|
|
|
var debugDumpMapResponsePath = envknob.String("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH")
|
|
|
|
|
2023-08-09 22:56:21 +02:00
|
|
|
// TODO: Optimise
|
|
|
|
// As this work continues, the idea is that there will be one Mapper instance
|
|
|
|
// per node, attached to the open stream between the control and client.
|
2023-09-24 13:42:05 +02:00
|
|
|
// This means that this can hold a state per node and we can use that to
|
2023-08-09 22:56:21 +02:00
|
|
|
// improve the mapresponses sent.
|
|
|
|
// We could:
|
|
|
|
// - Keep information about the previous mapresponse so we can send a diff
|
|
|
|
// - Store hashes
|
|
|
|
// - Create a "minifier" that removes info not needed for the node
|
|
|
|
|
2023-05-26 12:26:34 +02:00
|
|
|
type Mapper struct {
|
|
|
|
privateKey2019 *key.MachinePrivate
|
|
|
|
isNoise bool
|
2023-09-28 21:33:53 +02:00
|
|
|
capVer tailcfg.CapabilityVersion
|
2023-05-26 12:26:34 +02:00
|
|
|
|
|
|
|
// Configuration
|
|
|
|
// TODO(kradalby): figure out if this is the format we want this in
|
|
|
|
derpMap *tailcfg.DERPMap
|
|
|
|
baseDomain string
|
|
|
|
dnsCfg *tailcfg.DNSConfig
|
|
|
|
logtail bool
|
|
|
|
randomClientPort bool
|
2023-07-24 08:58:51 +02:00
|
|
|
|
|
|
|
uid string
|
|
|
|
created time.Time
|
|
|
|
seq uint64
|
2023-08-09 22:20:05 +02:00
|
|
|
|
|
|
|
// Map isnt concurrency safe, so we need to ensure
|
|
|
|
// only one func is accessing it over time.
|
|
|
|
mu sync.Mutex
|
2023-09-24 13:42:05 +02:00
|
|
|
peers map[uint64]*types.Node
|
2023-05-26 12:26:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func NewMapper(
|
2023-09-24 13:42:05 +02:00
|
|
|
node *types.Node,
|
|
|
|
peers types.Nodes,
|
2023-05-26 12:26:34 +02:00
|
|
|
privateKey *key.MachinePrivate,
|
|
|
|
isNoise bool,
|
2023-09-28 21:33:53 +02:00
|
|
|
capVer tailcfg.CapabilityVersion,
|
2023-05-26 12:26:34 +02:00
|
|
|
derpMap *tailcfg.DERPMap,
|
|
|
|
baseDomain string,
|
|
|
|
dnsCfg *tailcfg.DNSConfig,
|
|
|
|
logtail bool,
|
|
|
|
randomClientPort bool,
|
|
|
|
) *Mapper {
|
2023-07-24 08:58:51 +02:00
|
|
|
log.Debug().
|
|
|
|
Caller().
|
|
|
|
Bool("noise", isNoise).
|
2023-09-24 13:42:05 +02:00
|
|
|
Str("node", node.Hostname).
|
2023-07-24 08:58:51 +02:00
|
|
|
Msg("creating new mapper")
|
|
|
|
|
2023-07-26 11:53:42 +02:00
|
|
|
uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength)
|
2023-07-24 08:58:51 +02:00
|
|
|
|
2023-05-26 12:26:34 +02:00
|
|
|
return &Mapper{
|
|
|
|
privateKey2019: privateKey,
|
|
|
|
isNoise: isNoise,
|
2023-09-28 21:33:53 +02:00
|
|
|
capVer: capVer,
|
2023-05-26 12:26:34 +02:00
|
|
|
|
|
|
|
derpMap: derpMap,
|
|
|
|
baseDomain: baseDomain,
|
|
|
|
dnsCfg: dnsCfg,
|
|
|
|
logtail: logtail,
|
|
|
|
randomClientPort: randomClientPort,
|
2023-07-24 08:58:51 +02:00
|
|
|
|
|
|
|
uid: uid,
|
|
|
|
created: time.Now(),
|
|
|
|
seq: 0,
|
2023-08-09 22:20:05 +02:00
|
|
|
|
|
|
|
// TODO: populate
|
|
|
|
peers: peers.IDMap(),
|
2023-05-26 12:26:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-24 08:58:51 +02:00
|
|
|
func (m *Mapper) String() string {
|
|
|
|
return fmt.Sprintf("Mapper: { seq: %d, uid: %s, created: %s }", m.seq, m.uid, m.created)
|
|
|
|
}
|
|
|
|
|
2023-05-26 12:26:34 +02:00
|
|
|
func generateUserProfiles(
|
2023-09-24 13:42:05 +02:00
|
|
|
node *types.Node,
|
|
|
|
peers types.Nodes,
|
2023-05-26 12:26:34 +02:00
|
|
|
baseDomain string,
|
|
|
|
) []tailcfg.UserProfile {
|
|
|
|
userMap := make(map[string]types.User)
|
2023-09-24 13:42:05 +02:00
|
|
|
userMap[node.User.Name] = node.User
|
2023-05-26 12:26:34 +02:00
|
|
|
for _, peer := range peers {
|
|
|
|
userMap[peer.User.Name] = peer.User // not worth checking if already is there
|
|
|
|
}
|
|
|
|
|
|
|
|
profiles := []tailcfg.UserProfile{}
|
|
|
|
for _, user := range userMap {
|
|
|
|
displayName := user.Name
|
|
|
|
|
|
|
|
if baseDomain != "" {
|
|
|
|
displayName = fmt.Sprintf("%s@%s", user.Name, baseDomain)
|
|
|
|
}
|
|
|
|
|
|
|
|
profiles = append(profiles,
|
|
|
|
tailcfg.UserProfile{
|
|
|
|
ID: tailcfg.UserID(user.ID),
|
|
|
|
LoginName: user.Name,
|
|
|
|
DisplayName: displayName,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return profiles
|
|
|
|
}
|
|
|
|
|
|
|
|
func generateDNSConfig(
|
|
|
|
base *tailcfg.DNSConfig,
|
|
|
|
baseDomain string,
|
2023-09-24 13:42:05 +02:00
|
|
|
node *types.Node,
|
|
|
|
peers types.Nodes,
|
2023-05-26 12:26:34 +02:00
|
|
|
) *tailcfg.DNSConfig {
|
|
|
|
dnsConfig := base.Clone()
|
|
|
|
|
|
|
|
// if MagicDNS is enabled
|
|
|
|
if base != nil && base.Proxied {
|
|
|
|
// Only inject the Search Domain of the current user
|
|
|
|
// shared nodes should use their full FQDN
|
|
|
|
dnsConfig.Domains = append(
|
|
|
|
dnsConfig.Domains,
|
|
|
|
fmt.Sprintf(
|
|
|
|
"%s.%s",
|
2023-09-24 13:42:05 +02:00
|
|
|
node.User.Name,
|
2023-05-26 12:26:34 +02:00
|
|
|
baseDomain,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
userSet := mapset.NewSet[types.User]()
|
2023-09-24 13:42:05 +02:00
|
|
|
userSet.Add(node.User)
|
2023-05-26 12:26:34 +02:00
|
|
|
for _, p := range peers {
|
|
|
|
userSet.Add(p.User)
|
|
|
|
}
|
|
|
|
for _, user := range userSet.ToSlice() {
|
|
|
|
dnsRoute := fmt.Sprintf("%v.%v", user.Name, baseDomain)
|
|
|
|
dnsConfig.Routes[dnsRoute] = nil
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dnsConfig = base
|
|
|
|
}
|
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
addNextDNSMetadata(dnsConfig.Resolvers, node)
|
2023-05-26 12:26:34 +02:00
|
|
|
|
|
|
|
return dnsConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
// If any nextdns DoH resolvers are present in the list of resolvers it will
|
2023-09-24 13:42:05 +02:00
|
|
|
// take metadata from the node metadata and instruct tailscale to add it
|
2023-05-26 12:26:34 +02:00
|
|
|
// to the requests. This makes it possible to identify from which device the
|
|
|
|
// requests come in the NextDNS dashboard.
|
|
|
|
//
|
|
|
|
// This will produce a resolver like:
|
|
|
|
// `https://dns.nextdns.io/<nextdns-id>?device_name=node-name&device_model=linux&device_ip=100.64.0.1`
|
2023-09-24 13:42:05 +02:00
|
|
|
func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
|
2023-05-26 12:26:34 +02:00
|
|
|
for _, resolver := range resolvers {
|
|
|
|
if strings.HasPrefix(resolver.Addr, nextDNSDoHPrefix) {
|
|
|
|
attrs := url.Values{
|
2023-09-24 13:42:05 +02:00
|
|
|
"device_name": []string{node.Hostname},
|
2023-11-21 18:20:06 +01:00
|
|
|
"device_model": []string{node.Hostinfo.OS},
|
2023-05-26 12:26:34 +02:00
|
|
|
}
|
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
if len(node.IPAddresses) > 0 {
|
|
|
|
attrs.Add("device_ip", node.IPAddresses[0].String())
|
2023-05-26 12:26:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
resolver.Addr = fmt.Sprintf("%s?%s", resolver.Addr, attrs.Encode())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-09 22:56:21 +02:00
|
|
|
// fullMapResponse creates a complete MapResponse for a node.
|
|
|
|
// It is a separate function to make testing easier.
|
|
|
|
func (m *Mapper) fullMapResponse(
|
2023-09-24 13:42:05 +02:00
|
|
|
node *types.Node,
|
2023-08-09 22:56:21 +02:00
|
|
|
pol *policy.ACLPolicy,
|
|
|
|
) (*tailcfg.MapResponse, error) {
|
2023-09-24 13:42:05 +02:00
|
|
|
peers := nodeMapToList(m.peers)
|
2023-08-09 22:56:21 +02:00
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
resp, err := m.baseWithConfigMapResponse(node, pol)
|
2023-08-09 22:56:21 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = appendPeerChanges(
|
|
|
|
resp,
|
|
|
|
pol,
|
2023-09-24 13:42:05 +02:00
|
|
|
node,
|
2023-09-28 21:33:53 +02:00
|
|
|
m.capVer,
|
2023-08-09 22:56:21 +02:00
|
|
|
peers,
|
|
|
|
peers,
|
|
|
|
m.baseDomain,
|
|
|
|
m.dnsCfg,
|
2023-09-28 21:33:53 +02:00
|
|
|
m.randomClientPort,
|
2023-08-09 22:56:21 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
// FullMapResponse returns a MapResponse for the given node.
|
2023-07-24 08:58:51 +02:00
|
|
|
func (m *Mapper) FullMapResponse(
|
2023-05-26 12:26:34 +02:00
|
|
|
mapRequest tailcfg.MapRequest,
|
2023-09-24 13:42:05 +02:00
|
|
|
node *types.Node,
|
2023-05-26 12:26:34 +02:00
|
|
|
pol *policy.ACLPolicy,
|
|
|
|
) ([]byte, error) {
|
2023-08-09 22:20:05 +02:00
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
2023-05-31 18:45:04 +02:00
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
resp, err := m.fullMapResponse(node, pol)
|
2023-05-26 12:26:34 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.isNoise {
|
2023-09-24 13:42:05 +02:00
|
|
|
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
2023-05-26 12:26:34 +02:00
|
|
|
}
|
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
2023-06-29 12:20:22 +02:00
|
|
|
}
|
2023-05-26 12:26:34 +02:00
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
// LiteMapResponse returns a MapResponse for the given node.
|
2023-07-26 14:42:12 +02:00
|
|
|
// Lite means that the peers has been omitted, this is intended
|
2023-07-26 13:55:03 +02:00
|
|
|
// to be used to answer MapRequests with OmitPeers set to true.
|
|
|
|
func (m *Mapper) LiteMapResponse(
|
|
|
|
mapRequest tailcfg.MapRequest,
|
2023-09-24 13:42:05 +02:00
|
|
|
node *types.Node,
|
2023-07-26 13:55:03 +02:00
|
|
|
pol *policy.ACLPolicy,
|
|
|
|
) ([]byte, error) {
|
2023-09-24 13:42:05 +02:00
|
|
|
resp, err := m.baseWithConfigMapResponse(node, pol)
|
2023-07-26 13:55:03 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.isNoise {
|
2023-09-24 13:42:05 +02:00
|
|
|
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
2023-07-26 13:55:03 +02:00
|
|
|
}
|
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
2023-07-26 13:55:03 +02:00
|
|
|
}
|
|
|
|
|
2023-07-24 08:58:51 +02:00
|
|
|
func (m *Mapper) KeepAliveResponse(
|
2023-06-29 12:20:22 +02:00
|
|
|
mapRequest tailcfg.MapRequest,
|
2023-09-24 13:42:05 +02:00
|
|
|
node *types.Node,
|
2023-06-29 12:20:22 +02:00
|
|
|
) ([]byte, error) {
|
2023-08-09 22:56:21 +02:00
|
|
|
resp := m.baseMapResponse()
|
2023-06-29 12:20:22 +02:00
|
|
|
resp.KeepAlive = true
|
2023-05-26 12:26:34 +02:00
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
|
2023-05-26 12:26:34 +02:00
|
|
|
}
|
|
|
|
|
2023-07-24 08:58:51 +02:00
|
|
|
func (m *Mapper) DERPMapResponse(
|
2023-05-26 12:26:34 +02:00
|
|
|
mapRequest tailcfg.MapRequest,
|
2023-09-24 13:42:05 +02:00
|
|
|
node *types.Node,
|
2023-06-29 12:20:22 +02:00
|
|
|
derpMap tailcfg.DERPMap,
|
2023-05-26 12:26:34 +02:00
|
|
|
) ([]byte, error) {
|
2023-08-09 22:56:21 +02:00
|
|
|
resp := m.baseMapResponse()
|
2023-06-29 12:20:22 +02:00
|
|
|
resp.DERPMap = &derpMap
|
2023-05-26 12:26:34 +02:00
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
|
2023-06-29 12:20:22 +02:00
|
|
|
}
|
|
|
|
|
2023-07-24 08:58:51 +02:00
|
|
|
func (m *Mapper) PeerChangedResponse(
|
2023-06-29 12:20:22 +02:00
|
|
|
mapRequest tailcfg.MapRequest,
|
2023-09-24 13:42:05 +02:00
|
|
|
node *types.Node,
|
|
|
|
changed types.Nodes,
|
2023-06-29 12:20:22 +02:00
|
|
|
pol *policy.ACLPolicy,
|
|
|
|
) ([]byte, error) {
|
2023-08-09 22:20:05 +02:00
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
|
|
|
|
2023-06-29 12:20:22 +02:00
|
|
|
lastSeen := make(map[tailcfg.NodeID]bool)
|
|
|
|
|
2023-08-09 22:20:05 +02:00
|
|
|
// Update our internal map.
|
2023-09-24 13:42:05 +02:00
|
|
|
for _, node := range changed {
|
|
|
|
m.peers[node.ID] = node
|
2023-06-29 12:20:22 +02:00
|
|
|
|
|
|
|
// We have just seen the node, let the peers update their list.
|
2023-09-24 13:42:05 +02:00
|
|
|
lastSeen[tailcfg.NodeID(node.ID)] = true
|
2023-05-26 12:26:34 +02:00
|
|
|
}
|
|
|
|
|
2023-08-09 22:56:21 +02:00
|
|
|
resp := m.baseMapResponse()
|
|
|
|
|
|
|
|
err := appendPeerChanges(
|
|
|
|
&resp,
|
2023-06-29 12:20:22 +02:00
|
|
|
pol,
|
2023-09-24 13:42:05 +02:00
|
|
|
node,
|
2023-09-28 21:33:53 +02:00
|
|
|
m.capVer,
|
2023-09-24 13:42:05 +02:00
|
|
|
nodeMapToList(m.peers),
|
2023-08-09 22:56:21 +02:00
|
|
|
changed,
|
|
|
|
m.baseDomain,
|
|
|
|
m.dnsCfg,
|
2023-09-28 21:33:53 +02:00
|
|
|
m.randomClientPort,
|
2023-06-29 12:20:22 +02:00
|
|
|
)
|
2023-05-26 12:26:34 +02:00
|
|
|
if err != nil {
|
2023-06-29 12:20:22 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-07-24 08:58:51 +02:00
|
|
|
// resp.PeerSeenChange = lastSeen
|
2023-06-29 12:20:22 +02:00
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
|
2023-05-26 12:26:34 +02:00
|
|
|
}
|
|
|
|
|
2023-07-24 08:58:51 +02:00
|
|
|
func (m *Mapper) PeerRemovedResponse(
|
2023-06-29 12:20:22 +02:00
|
|
|
mapRequest tailcfg.MapRequest,
|
2023-09-24 13:42:05 +02:00
|
|
|
node *types.Node,
|
2023-06-29 12:20:22 +02:00
|
|
|
removed []tailcfg.NodeID,
|
2023-05-26 12:26:34 +02:00
|
|
|
) ([]byte, error) {
|
2023-08-09 22:20:05 +02:00
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
|
|
|
|
|
|
|
// remove from our internal map
|
|
|
|
for _, id := range removed {
|
|
|
|
delete(m.peers, uint64(id))
|
|
|
|
}
|
|
|
|
|
2023-08-09 22:56:21 +02:00
|
|
|
resp := m.baseMapResponse()
|
2023-06-29 12:20:22 +02:00
|
|
|
resp.PeersRemoved = removed
|
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
|
2023-06-29 12:20:22 +02:00
|
|
|
}
|
|
|
|
|
2023-07-24 08:58:51 +02:00
|
|
|
func (m *Mapper) marshalMapResponse(
|
2023-07-26 14:42:12 +02:00
|
|
|
mapRequest tailcfg.MapRequest,
|
2023-06-29 12:20:22 +02:00
|
|
|
resp *tailcfg.MapResponse,
|
2023-09-24 13:42:05 +02:00
|
|
|
node *types.Node,
|
2023-06-29 12:20:22 +02:00
|
|
|
compression string,
|
|
|
|
) ([]byte, error) {
|
2023-07-24 08:58:51 +02:00
|
|
|
atomic.AddUint64(&m.seq, 1)
|
|
|
|
|
2023-05-26 12:26:34 +02:00
|
|
|
jsonBody, err := json.Marshal(resp)
|
|
|
|
if err != nil {
|
|
|
|
log.Error().
|
|
|
|
Caller().
|
|
|
|
Err(err).
|
|
|
|
Msg("Cannot marshal map response")
|
|
|
|
}
|
|
|
|
|
2023-07-17 11:13:48 +02:00
|
|
|
if debugDumpMapResponsePath != "" {
|
|
|
|
data := map[string]interface{}{
|
|
|
|
"MapRequest": mapRequest,
|
|
|
|
"MapResponse": resp,
|
|
|
|
}
|
|
|
|
|
|
|
|
body, err := json.Marshal(data)
|
|
|
|
if err != nil {
|
|
|
|
log.Error().
|
|
|
|
Caller().
|
|
|
|
Err(err).
|
|
|
|
Msg("Cannot marshal map response")
|
|
|
|
}
|
|
|
|
|
|
|
|
perms := fs.FileMode(debugMapResponsePerm)
|
2023-09-24 13:42:05 +02:00
|
|
|
mPath := path.Join(debugDumpMapResponsePath, node.Hostname)
|
2023-07-17 11:13:48 +02:00
|
|
|
err = os.MkdirAll(mPath, perms)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2023-08-09 22:56:21 +02:00
|
|
|
now := time.Now().UnixNano()
|
2023-07-17 11:13:48 +02:00
|
|
|
|
|
|
|
mapResponsePath := path.Join(
|
|
|
|
mPath,
|
2023-09-11 12:25:52 +02:00
|
|
|
fmt.Sprintf("%d-%s-%d.json", now, m.uid, atomic.LoadUint64(&m.seq)),
|
2023-07-17 11:13:48 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath)
|
2023-07-26 14:42:12 +02:00
|
|
|
err = os.WriteFile(mapResponsePath, body, perms)
|
2023-07-17 11:13:48 +02:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-26 12:26:34 +02:00
|
|
|
var respBody []byte
|
|
|
|
if compression == util.ZstdCompression {
|
|
|
|
respBody = zstdEncode(jsonBody)
|
|
|
|
if !m.isNoise { // if legacy protocol
|
2023-11-19 22:37:04 +01:00
|
|
|
respBody = m.privateKey2019.SealTo(node.MachineKey, respBody)
|
2023-05-26 12:26:34 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if !m.isNoise { // if legacy protocol
|
2023-11-19 22:37:04 +01:00
|
|
|
respBody = m.privateKey2019.SealTo(node.MachineKey, jsonBody)
|
2023-05-26 12:26:34 +02:00
|
|
|
} else {
|
|
|
|
respBody = jsonBody
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
data := make([]byte, reservedResponseHeaderSize)
|
|
|
|
binary.LittleEndian.PutUint32(data, uint32(len(respBody)))
|
|
|
|
data = append(data, respBody...)
|
|
|
|
|
|
|
|
return data, nil
|
|
|
|
}
|
|
|
|
|
2023-06-29 12:20:22 +02:00
|
|
|
// MarshalResponse takes an Tailscale Response, marhsal it to JSON.
|
|
|
|
// If isNoise is set, then the JSON body will be returned
|
|
|
|
// If !isNoise and privateKey2019 is set, the JSON body will be sealed in a Nacl box.
|
|
|
|
func MarshalResponse(
|
|
|
|
resp interface{},
|
|
|
|
isNoise bool,
|
|
|
|
privateKey2019 *key.MachinePrivate,
|
|
|
|
machineKey key.MachinePublic,
|
|
|
|
) ([]byte, error) {
|
|
|
|
jsonBody, err := json.Marshal(resp)
|
|
|
|
if err != nil {
|
|
|
|
log.Error().
|
|
|
|
Caller().
|
|
|
|
Err(err).
|
|
|
|
Msg("Cannot marshal response")
|
|
|
|
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !isNoise && privateKey2019 != nil {
|
|
|
|
return privateKey2019.SealTo(machineKey, jsonBody), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return jsonBody, nil
|
|
|
|
}
|
|
|
|
|
2023-05-26 12:26:34 +02:00
|
|
|
func zstdEncode(in []byte) []byte {
|
|
|
|
encoder, ok := zstdEncoderPool.Get().(*zstd.Encoder)
|
|
|
|
if !ok {
|
|
|
|
panic("invalid type in sync pool")
|
|
|
|
}
|
|
|
|
out := encoder.EncodeAll(in, nil)
|
|
|
|
_ = encoder.Close()
|
|
|
|
zstdEncoderPool.Put(encoder)
|
|
|
|
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
|
|
|
var zstdEncoderPool = &sync.Pool{
|
|
|
|
New: func() any {
|
|
|
|
encoder, err := smallzstd.NewEncoder(
|
|
|
|
nil,
|
|
|
|
zstd.WithEncoderLevel(zstd.SpeedFastest))
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return encoder
|
|
|
|
},
|
|
|
|
}
|
2023-06-29 12:20:22 +02:00
|
|
|
|
2023-08-09 22:56:21 +02:00
|
|
|
// baseMapResponse returns a tailcfg.MapResponse with
|
|
|
|
// KeepAlive false and ControlTime set to now.
|
|
|
|
func (m *Mapper) baseMapResponse() tailcfg.MapResponse {
|
2023-06-29 12:20:22 +02:00
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
resp := tailcfg.MapResponse{
|
|
|
|
KeepAlive: false,
|
|
|
|
ControlTime: &now,
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp
|
|
|
|
}
|
2023-08-09 22:20:05 +02:00
|
|
|
|
2023-08-09 22:56:21 +02:00
|
|
|
// baseWithConfigMapResponse returns a tailcfg.MapResponse struct
|
|
|
|
// with the basic configuration from headscale set.
|
|
|
|
// It is used in for bigger updates, such as full and lite, not
|
|
|
|
// incremental.
|
|
|
|
func (m *Mapper) baseWithConfigMapResponse(
|
2023-09-24 13:42:05 +02:00
|
|
|
node *types.Node,
|
2023-08-09 22:56:21 +02:00
|
|
|
pol *policy.ACLPolicy,
|
|
|
|
) (*tailcfg.MapResponse, error) {
|
|
|
|
resp := m.baseMapResponse()
|
|
|
|
|
2023-09-28 21:33:53 +02:00
|
|
|
tailnode, err := tailNode(node, m.capVer, pol, m.dnsCfg, m.baseDomain, m.randomClientPort)
|
2023-08-09 22:56:21 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
resp.Node = tailnode
|
|
|
|
|
|
|
|
resp.DERPMap = m.derpMap
|
|
|
|
|
|
|
|
resp.Domain = m.baseDomain
|
|
|
|
|
|
|
|
// Do not instruct clients to collect services we do not
|
|
|
|
// support or do anything with them
|
|
|
|
resp.CollectServices = "false"
|
|
|
|
|
|
|
|
resp.KeepAlive = false
|
|
|
|
|
|
|
|
resp.Debug = &tailcfg.Debug{
|
2023-09-28 21:33:53 +02:00
|
|
|
DisableLogTail: !m.logtail,
|
2023-08-09 22:56:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return &resp, nil
|
|
|
|
}
|
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes {
|
|
|
|
ret := make(types.Nodes, 0)
|
2023-08-09 22:20:05 +02:00
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
for _, node := range nodes {
|
|
|
|
ret = append(ret, node)
|
2023-08-09 22:20:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
func filterExpiredAndNotReady(peers types.Nodes) types.Nodes {
|
|
|
|
return lo.Filter(peers, func(item *types.Node, index int) bool {
|
2023-08-09 22:20:05 +02:00
|
|
|
// Filter out nodes that are expired OR
|
|
|
|
// nodes that has no endpoints, this typically means they have
|
|
|
|
// registered, but are not configured.
|
|
|
|
return !item.IsExpired() || len(item.Endpoints) > 0
|
|
|
|
})
|
|
|
|
}
|
2023-08-09 22:56:21 +02:00
|
|
|
|
|
|
|
// appendPeerChanges mutates a tailcfg.MapResponse with all the
|
|
|
|
// necessary changes when peers have changed.
|
|
|
|
func appendPeerChanges(
|
|
|
|
resp *tailcfg.MapResponse,
|
|
|
|
|
|
|
|
pol *policy.ACLPolicy,
|
2023-09-24 13:42:05 +02:00
|
|
|
node *types.Node,
|
2023-09-28 21:33:53 +02:00
|
|
|
capVer tailcfg.CapabilityVersion,
|
2023-09-24 13:42:05 +02:00
|
|
|
peers types.Nodes,
|
|
|
|
changed types.Nodes,
|
2023-08-09 22:56:21 +02:00
|
|
|
baseDomain string,
|
|
|
|
dnsCfg *tailcfg.DNSConfig,
|
2023-09-28 21:33:53 +02:00
|
|
|
randomClientPort bool,
|
2023-08-09 22:56:21 +02:00
|
|
|
) error {
|
|
|
|
fullChange := len(peers) == len(changed)
|
|
|
|
|
|
|
|
rules, sshPolicy, err := policy.GenerateFilterAndSSHRules(
|
|
|
|
pol,
|
2023-09-24 13:42:05 +02:00
|
|
|
node,
|
2023-08-09 22:56:21 +02:00
|
|
|
peers,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Filter out peers that have expired.
|
|
|
|
changed = filterExpiredAndNotReady(changed)
|
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
// If there are filter rules present, see if there are any nodes that cannot
|
2023-08-09 22:56:21 +02:00
|
|
|
// access eachother at all and remove them from the peers.
|
|
|
|
if len(rules) > 0 {
|
2023-09-24 13:42:05 +02:00
|
|
|
changed = policy.FilterNodesByACL(node, changed, rules)
|
2023-08-09 22:56:21 +02:00
|
|
|
}
|
|
|
|
|
2023-09-24 13:42:05 +02:00
|
|
|
profiles := generateUserProfiles(node, changed, baseDomain)
|
2023-08-09 22:56:21 +02:00
|
|
|
|
|
|
|
dnsConfig := generateDNSConfig(
|
|
|
|
dnsCfg,
|
|
|
|
baseDomain,
|
2023-09-24 13:42:05 +02:00
|
|
|
node,
|
2023-08-09 22:56:21 +02:00
|
|
|
peers,
|
|
|
|
)
|
|
|
|
|
2023-09-28 21:33:53 +02:00
|
|
|
tailPeers, err := tailNodes(changed, capVer, pol, dnsCfg, baseDomain, randomClientPort)
|
2023-08-09 22:56:21 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Peers is always returned sorted by Node.ID.
|
|
|
|
sort.SliceStable(tailPeers, func(x, y int) bool {
|
|
|
|
return tailPeers[x].ID < tailPeers[y].ID
|
|
|
|
})
|
|
|
|
|
|
|
|
if fullChange {
|
|
|
|
resp.Peers = tailPeers
|
|
|
|
} else {
|
|
|
|
resp.PeersChanged = tailPeers
|
|
|
|
}
|
|
|
|
resp.DNSConfig = dnsConfig
|
2023-09-24 13:42:05 +02:00
|
|
|
resp.PacketFilter = policy.ReduceFilterRules(node, rules)
|
2023-08-09 22:56:21 +02:00
|
|
|
resp.UserProfiles = profiles
|
|
|
|
resp.SSHPolicy = sshPolicy
|
|
|
|
|
2023-09-25 23:27:14 +02:00
|
|
|
// TODO(kradalby): This currently does not take last seen in keepalives into account
|
|
|
|
resp.OnlineChange = peers.OnlineNodeMap()
|
|
|
|
|
2023-08-09 22:56:21 +02:00
|
|
|
return nil
|
|
|
|
}
|