batch updates in notifier (#1905)
This commit is contained in:
parent
fef8261339
commit
cb0b495ea9
8 changed files with 541 additions and 158 deletions
|
@ -66,10 +66,16 @@ func (h *Headscale) newMapSession(
|
|||
) *mapSession {
|
||||
warnf, infof, tracef, errf := logPollFunc(req, node)
|
||||
|
||||
// Use a buffered channel in case a node is not fully ready
|
||||
// to receive a message to make sure we dont block the entire
|
||||
// notifier.
|
||||
updateChan := make(chan types.StateUpdate, h.cfg.Tuning.NodeMapSessionBufferedChanSize)
|
||||
var updateChan chan types.StateUpdate
|
||||
if req.Stream {
|
||||
// Use a buffered channel in case a node is not fully ready
|
||||
// to receive a message to make sure we dont block the entire
|
||||
// notifier.
|
||||
updateChan = make(chan types.StateUpdate, h.cfg.Tuning.NodeMapSessionBufferedChanSize)
|
||||
updateChan <- types.StateUpdate{
|
||||
Type: types.StateFullUpdate,
|
||||
}
|
||||
}
|
||||
|
||||
return &mapSession{
|
||||
h: h,
|
||||
|
@ -218,33 +224,26 @@ func (m *mapSession) serve() {
|
|||
ctx, cancel := context.WithCancel(context.WithValue(m.ctx, nodeNameContextKey, m.node.Hostname))
|
||||
defer cancel()
|
||||
|
||||
// TODO(kradalby): Make this available through a tuning envvar
|
||||
wait := time.Second
|
||||
|
||||
// Add a circuit breaker, if the loop is not interrupted
|
||||
// inbetween listening for the channels, some updates
|
||||
// might get stale and stucked in the "changed" map
|
||||
// defined below.
|
||||
blockBreaker := time.NewTicker(wait)
|
||||
|
||||
// true means changed, false means removed
|
||||
var changed map[types.NodeID]bool
|
||||
var patches []*tailcfg.PeerChange
|
||||
var derp bool
|
||||
|
||||
// Set full to true to immediatly send a full mapresponse
|
||||
full := true
|
||||
prev := time.Now()
|
||||
lastMessage := ""
|
||||
|
||||
// Loop through updates and continuously send them to the
|
||||
// client.
|
||||
for {
|
||||
// If a full update has been requested or there are patches, then send it immediately
|
||||
// otherwise wait for the "batching" of changes or patches
|
||||
if full || patches != nil || (changed != nil && time.Since(prev) > wait) {
|
||||
// consume channels with update, keep alives or "batch" blocking signals
|
||||
select {
|
||||
case <-m.cancelCh:
|
||||
m.tracef("poll cancelled received")
|
||||
return
|
||||
case <-ctx.Done():
|
||||
m.tracef("poll context done")
|
||||
return
|
||||
|
||||
// Consume all updates sent to node
|
||||
case update := <-m.ch:
|
||||
m.tracef("received stream update: %s %s", update.Type.String(), update.Message)
|
||||
mapResponseUpdateReceived.WithLabelValues(update.Type.String()).Inc()
|
||||
|
||||
var data []byte
|
||||
var err error
|
||||
var lastMessage string
|
||||
|
||||
// Ensure the node object is updated, for example, there
|
||||
// might have been a hostinfo update in a sidechannel
|
||||
|
@ -256,62 +255,43 @@ func (m *mapSession) serve() {
|
|||
return
|
||||
}
|
||||
|
||||
// If there are patches _and_ fully changed nodes, filter the
|
||||
// patches and remove all patches that are present for the full
|
||||
// changes updates. This allows us to send them as part of the
|
||||
// PeerChange update, but only for nodes that are not fully changed.
|
||||
// The fully changed nodes will be updated from the database and
|
||||
// have all the updates needed.
|
||||
// This means that the patches left are for nodes that has no
|
||||
// updates that requires a full update.
|
||||
// Patches are not suppose to be mixed in, but can be.
|
||||
//
|
||||
// From tailcfg docs:
|
||||
// These are applied after Peers* above, but in practice the
|
||||
// control server should only send these on their own, without
|
||||
//
|
||||
// Currently, there is no effort to merge patch updates, they
|
||||
// are all sent, and the client will apply them in order.
|
||||
// TODO(kradalby): Merge Patches for the same IDs to send less
|
||||
// data and give the client less work.
|
||||
if patches != nil && changed != nil {
|
||||
var filteredPatches []*tailcfg.PeerChange
|
||||
|
||||
for _, patch := range patches {
|
||||
if _, ok := changed[types.NodeID(patch.NodeID)]; !ok {
|
||||
filteredPatches = append(filteredPatches, patch)
|
||||
}
|
||||
}
|
||||
|
||||
patches = filteredPatches
|
||||
}
|
||||
|
||||
updateType := "full"
|
||||
// When deciding what update to send, the following is considered,
|
||||
// Full is a superset of all updates, when a full update is requested,
|
||||
// send only that and move on, all other updates will be present in
|
||||
// a full map response.
|
||||
//
|
||||
// If a map of changed nodes exists, prefer sending that as it will
|
||||
// contain all the updates for the node, including patches, as it
|
||||
// is fetched freshly from the database when building the response.
|
||||
//
|
||||
// If there is full changes registered, but we have patches for individual
|
||||
// nodes, send them.
|
||||
//
|
||||
// Finally, if a DERP map is the only request, send that alone.
|
||||
if full {
|
||||
switch update.Type {
|
||||
case types.StateFullUpdate:
|
||||
m.tracef("Sending Full MapResponse")
|
||||
data, err = m.mapper.FullMapResponse(m.req, m.node, m.h.ACLPolicy, fmt.Sprintf("from mapSession: %p, stream: %t", m, m.isStreaming()))
|
||||
} else if changed != nil {
|
||||
case types.StatePeerChanged:
|
||||
changed := make(map[types.NodeID]bool, len(update.ChangeNodes))
|
||||
|
||||
for _, nodeID := range update.ChangeNodes {
|
||||
changed[nodeID] = true
|
||||
}
|
||||
|
||||
lastMessage = update.Message
|
||||
m.tracef(fmt.Sprintf("Sending Changed MapResponse: %v", lastMessage))
|
||||
data, err = m.mapper.PeerChangedResponse(m.req, m.node, changed, patches, m.h.ACLPolicy, lastMessage)
|
||||
data, err = m.mapper.PeerChangedResponse(m.req, m.node, changed, update.ChangePatches, m.h.ACLPolicy, lastMessage)
|
||||
updateType = "change"
|
||||
} else if patches != nil {
|
||||
|
||||
case types.StatePeerChangedPatch:
|
||||
m.tracef(fmt.Sprintf("Sending Changed Patch MapResponse: %v", lastMessage))
|
||||
data, err = m.mapper.PeerChangedPatchResponse(m.req, m.node, patches, m.h.ACLPolicy)
|
||||
data, err = m.mapper.PeerChangedPatchResponse(m.req, m.node, update.ChangePatches, m.h.ACLPolicy)
|
||||
updateType = "patch"
|
||||
} else if derp {
|
||||
case types.StatePeerRemoved:
|
||||
changed := make(map[types.NodeID]bool, len(update.Removed))
|
||||
|
||||
for _, nodeID := range update.Removed {
|
||||
changed[nodeID] = false
|
||||
}
|
||||
m.tracef(fmt.Sprintf("Sending Changed MapResponse: %v", lastMessage))
|
||||
data, err = m.mapper.PeerChangedResponse(m.req, m.node, changed, update.ChangePatches, m.h.ACLPolicy, lastMessage)
|
||||
updateType = "remove"
|
||||
case types.StateSelfUpdate:
|
||||
lastMessage = update.Message
|
||||
m.tracef(fmt.Sprintf("Sending Changed MapResponse: %v", lastMessage))
|
||||
// create the map so an empty (self) update is sent
|
||||
data, err = m.mapper.PeerChangedResponse(m.req, m.node, make(map[types.NodeID]bool), update.ChangePatches, m.h.ACLPolicy, lastMessage)
|
||||
updateType = "remove"
|
||||
case types.StateDERPUpdated:
|
||||
m.tracef("Sending DERPUpdate MapResponse")
|
||||
data, err = m.mapper.DERPMapResponse(m.req, m.node, m.h.DERPMap)
|
||||
updateType = "derp"
|
||||
|
@ -348,68 +328,6 @@ func (m *mapSession) serve() {
|
|||
m.tracef("update sent")
|
||||
}
|
||||
|
||||
// reset
|
||||
changed = nil
|
||||
patches = nil
|
||||
lastMessage = ""
|
||||
full = false
|
||||
derp = false
|
||||
prev = time.Now()
|
||||
}
|
||||
|
||||
// consume channels with update, keep alives or "batch" blocking signals
|
||||
select {
|
||||
case <-m.cancelCh:
|
||||
m.tracef("poll cancelled received")
|
||||
return
|
||||
case <-ctx.Done():
|
||||
m.tracef("poll context done")
|
||||
return
|
||||
|
||||
// Avoid infinite block that would potentially leave
|
||||
// some updates in the changed map.
|
||||
case <-blockBreaker.C:
|
||||
continue
|
||||
|
||||
// Consume all updates sent to node
|
||||
case update := <-m.ch:
|
||||
m.tracef("received stream update: %s %s", update.Type.String(), update.Message)
|
||||
mapResponseUpdateReceived.WithLabelValues(update.Type.String()).Inc()
|
||||
|
||||
switch update.Type {
|
||||
case types.StateFullUpdate:
|
||||
full = true
|
||||
case types.StatePeerChanged:
|
||||
if changed == nil {
|
||||
changed = make(map[types.NodeID]bool)
|
||||
}
|
||||
|
||||
for _, nodeID := range update.ChangeNodes {
|
||||
changed[nodeID] = true
|
||||
}
|
||||
|
||||
lastMessage = update.Message
|
||||
case types.StatePeerChangedPatch:
|
||||
patches = append(patches, update.ChangePatches...)
|
||||
case types.StatePeerRemoved:
|
||||
if changed == nil {
|
||||
changed = make(map[types.NodeID]bool)
|
||||
}
|
||||
|
||||
for _, nodeID := range update.Removed {
|
||||
changed[nodeID] = false
|
||||
}
|
||||
case types.StateSelfUpdate:
|
||||
// create the map so an empty (self) update is sent
|
||||
if changed == nil {
|
||||
changed = make(map[types.NodeID]bool)
|
||||
}
|
||||
|
||||
lastMessage = update.Message
|
||||
case types.StateDERPUpdated:
|
||||
derp = true
|
||||
}
|
||||
|
||||
case <-m.keepAliveTicker.C:
|
||||
data, err := m.mapper.KeepAliveResponse(m.req, m.node)
|
||||
if err != nil {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue