diff --git a/CHANGELOG.md b/CHANGELOG.md index bd14cd092..e56bbac67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,8 @@ - [#1436](https://github.com/influxdata/kapacitor/issues/1436): Add linear fill support for QueryNode. - [#1345](https://github.com/influxdata/kapacitor/issues/1345): Add MQTT Alert Handler - [#1390](https://github.com/influxdata/kapacitor/issues/1390): Add built in functions to convert timestamps to integers +- [#1425](https://github.com/influxdata/kapacitor/pull/1425): BREAKING: Change over internal API to use message passing semantics. + The breaking change is that the Combine and Flatten nodes previously, but erroneously, operated across batch boundaries; this has been fixed. ### Bugfixes diff --git a/alert.go b/alert.go index be0800ecc..c8d503d8d 100644 --- a/alert.go +++ b/alert.go @@ -12,6 +12,7 @@ import ( "time" "github.com/influxdata/kapacitor/alert" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" @@ -56,13 +57,10 @@ type AlertNode struct { handlers []alert.Handler levels []stateful.Expression scopePools []stateful.ScopePool - states map[models.GroupID]*alertState idTmpl *text.Template messageTmpl *text.Template detailsTmpl *html.Template - statesMu sync.RWMutex - alertsTriggered *expvar.Int oksTriggered *expvar.Int infosTriggered *expvar.Int @@ -451,7 +449,6 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an * if n.History < 2 { n.History = 2 } - an.states = make(map[models.GroupID]*alertState) // Configure flapping if n.UseFlapping { @@ -463,301 +460,115 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an * return } -func (a *AlertNode) runAlert([]byte) error { - valueF := func() int64 { - a.statesMu.RLock() - l := len(a.states) - a.statesMu.RUnlock() - return int64(l) - } - a.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - +func (n *AlertNode) runAlert([]byte) error { // Register delete hook - if a.hasAnonTopic() { - a.et.tm.registerDeleteHookForTask(a.et.Task.ID, deleteAlertHook(a.anonTopic)) + if n.hasAnonTopic() { + n.et.tm.registerDeleteHookForTask(n.et.Task.ID, deleteAlertHook(n.anonTopic)) // Register Handlers on topic - for _, h := range a.handlers { - a.et.tm.AlertService.RegisterAnonHandler(a.anonTopic, h) + for _, h := range n.handlers { + n.et.tm.AlertService.RegisterAnonHandler(n.anonTopic, h) } // Restore anonTopic - a.et.tm.AlertService.RestoreTopic(a.anonTopic) + n.et.tm.AlertService.RestoreTopic(n.anonTopic) } // Setup stats - a.alertsTriggered = &expvar.Int{} - a.statMap.Set(statsAlertsTriggered, a.alertsTriggered) + n.alertsTriggered = &expvar.Int{} + n.statMap.Set(statsAlertsTriggered, n.alertsTriggered) - a.oksTriggered = &expvar.Int{} - a.statMap.Set(statsOKsTriggered, a.oksTriggered) + n.oksTriggered = &expvar.Int{} + n.statMap.Set(statsOKsTriggered, n.oksTriggered) - a.infosTriggered = &expvar.Int{} - a.statMap.Set(statsInfosTriggered, a.infosTriggered) + n.infosTriggered = &expvar.Int{} + n.statMap.Set(statsInfosTriggered, n.infosTriggered) - a.warnsTriggered = &expvar.Int{} - a.statMap.Set(statsWarnsTriggered, a.warnsTriggered) + n.warnsTriggered = &expvar.Int{} + n.statMap.Set(statsWarnsTriggered, n.warnsTriggered) - a.critsTriggered = &expvar.Int{} - a.statMap.Set(statsCritsTriggered, a.critsTriggered) + n.critsTriggered = &expvar.Int{} + n.statMap.Set(statsCritsTriggered, n.critsTriggered) - a.eventsDropped = &expvar.Int{} - a.statMap.Set(statsCritsTriggered, a.critsTriggered) + n.eventsDropped = &expvar.Int{} + n.statMap.Set(statsCritsTriggered, n.critsTriggered) - switch a.Wants() { - case pipeline.StreamEdge: - for p, ok := a.ins[0].NextPoint(); ok; p, ok = a.ins[0].NextPoint() { - a.timer.Start() - id, err := a.renderID(p.Name, p.Group, p.Tags) - if err != nil { - return err - } - var currentLevel alert.Level - if state, ok := a.getAlertState(p.Group); ok { - currentLevel = state.currentLevel() - } else { - // Check for previous state - var triggered time.Time - currentLevel, triggered = a.restoreEventState(id) - if currentLevel != alert.OK { - // Update the state with the restored state - state = a.updateState(p.Time, currentLevel, p.Group) - state.triggered(triggered) - } - } - l := a.determineLevel(p.Time, p.Fields, p.Tags, currentLevel) - state := a.updateState(p.Time, l, p.Group) - if (a.a.UseFlapping && state.flapping) || (a.a.IsStateChangesOnly && !state.changed && !state.expired) { - a.timer.Stop() - continue - } - // send alert if we are not OK or we are OK and state changed (i.e recovery) - if l != alert.OK || state.changed { - batch := models.Batch{ - Name: p.Name, - Group: p.Group, - ByName: p.Dimensions.ByName, - Tags: p.Tags, - Points: []models.BatchPoint{models.BatchPointFromPoint(p)}, - } - state.triggered(p.Time) - // Suppress the recovery event. - if a.a.NoRecoveriesFlag && l == alert.OK { - a.timer.Stop() - continue - } - duration := state.duration() - event, err := a.event(id, p.Name, p.Group, p.Tags, p.Fields, l, p.Time, duration, batch) - if err != nil { - return err - } - a.handleEvent(event) - if a.a.LevelTag != "" || a.a.IdTag != "" { - p.Tags = p.Tags.Copy() - if a.a.LevelTag != "" { - p.Tags[a.a.LevelTag] = l.String() - } - if a.a.IdTag != "" { - p.Tags[a.a.IdTag] = event.State.ID - } - } - if a.a.LevelField != "" || a.a.IdField != "" || a.a.DurationField != "" || a.a.MessageField != "" { - p.Fields = p.Fields.Copy() - if a.a.LevelField != "" { - p.Fields[a.a.LevelField] = l.String() - } - if a.a.MessageField != "" { - p.Fields[a.a.MessageField] = event.State.Message - } - if a.a.IdField != "" { - p.Fields[a.a.IdField] = event.State.ID - } - if a.a.DurationField != "" { - p.Fields[a.a.DurationField] = int64(duration) - } - } - a.timer.Pause() - for _, child := range a.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } - a.timer.Resume() - } - a.timer.Stop() - } - case pipeline.BatchEdge: - for b, ok := a.ins[0].NextBatch(); ok; b, ok = a.ins[0].NextBatch() { - a.timer.Start() - id, err := a.renderID(b.Name, b.Group, b.Tags) - if err != nil { - return err - } - if len(b.Points) == 0 { - a.timer.Stop() - continue - } - // Keep track of lowest level for any point - lowestLevel := alert.Critical - // Keep track of highest level and point - highestLevel := alert.OK - var highestPoint *models.BatchPoint - - var currentLevel alert.Level - if state, ok := a.getAlertState(b.Group); ok { - currentLevel = state.currentLevel() - } else { - // Check for previous state - var triggered time.Time - currentLevel, triggered = a.restoreEventState(id) - if currentLevel != alert.OK { - // Update the state with the restored state - state = a.updateState(b.TMax, currentLevel, b.Group) - state.triggered(triggered) - } - } - for i, p := range b.Points { - l := a.determineLevel(p.Time, p.Fields, p.Tags, currentLevel) - if l < lowestLevel { - lowestLevel = l - } - if l > highestLevel || highestPoint == nil { - highestLevel = l - highestPoint = &b.Points[i] - } - } - - // Default the determined level to lowest. - l := lowestLevel - // Update determined level to highest if we don't care about all - if !a.a.AllFlag { - l = highestLevel - } - // Create alert Data - t := highestPoint.Time - if a.a.AllFlag || l == alert.OK { - t = b.TMax - } + // Setup consumer + consumer := edge.NewGroupedConsumer( + n.ins[0], + n, + ) + n.statMap.Set(statCardinalityGauge, consumer.CardinalityVar()) - // Update state - state := a.updateState(t, l, b.Group) - // Trigger alert if: - // l == OK and state.changed (aka recovery) - // OR - // l != OK and flapping/statechanges checkout - if state.changed && l == alert.OK || - (l != alert.OK && - !((a.a.UseFlapping && state.flapping) || - (a.a.IsStateChangesOnly && !state.changed && !state.expired))) { - state.triggered(t) - // Suppress the recovery event. - if a.a.NoRecoveriesFlag && l == alert.OK { - a.timer.Stop() - continue - } - - duration := state.duration() - event, err := a.event(id, b.Name, b.Group, b.Tags, highestPoint.Fields, l, t, duration, b) - if err != nil { - return err - } - a.handleEvent(event) - // Update tags or fields for Level property - if a.a.LevelTag != "" || - a.a.LevelField != "" || - a.a.IdTag != "" || - a.a.IdField != "" || - a.a.DurationField != "" || - a.a.MessageField != "" { - b.Points = b.ShallowCopyPoints() - for i := range b.Points { - if a.a.LevelTag != "" || a.a.IdTag != "" { - b.Points[i].Tags = b.Points[i].Tags.Copy() - if a.a.LevelTag != "" { - b.Points[i].Tags[a.a.LevelTag] = l.String() - } - if a.a.IdTag != "" { - b.Points[i].Tags[a.a.IdTag] = event.State.ID - } - } - if a.a.LevelField != "" || a.a.IdField != "" || a.a.DurationField != "" || a.a.MessageField != "" { - b.Points[i].Fields = b.Points[i].Fields.Copy() - if a.a.LevelField != "" { - b.Points[i].Fields[a.a.LevelField] = l.String() - } - if a.a.MessageField != "" { - b.Points[i].Fields[a.a.MessageField] = event.State.Message - } - if a.a.IdField != "" { - b.Points[i].Fields[a.a.IdField] = event.State.ID - } - if a.a.DurationField != "" { - b.Points[i].Fields[a.a.DurationField] = int64(duration) - } - } - } - if a.a.LevelTag != "" || a.a.IdTag != "" { - b.Tags = b.Tags.Copy() - if a.a.LevelTag != "" { - b.Tags[a.a.LevelTag] = l.String() - } - if a.a.IdTag != "" { - b.Tags[a.a.IdTag] = event.State.ID - } - } - } - a.timer.Pause() - for _, child := range a.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } - a.timer.Resume() - } - a.timer.Stop() - } + if err := consumer.Consume(); err != nil { + return err } + // Close the anonymous topic. - a.et.tm.AlertService.CloseTopic(a.anonTopic) + n.et.tm.AlertService.CloseTopic(n.anonTopic) + // Deregister Handlers on topic - for _, h := range a.handlers { - a.et.tm.AlertService.DeregisterAnonHandler(a.anonTopic, h) + for _, h := range n.handlers { + n.et.tm.AlertService.DeregisterAnonHandler(n.anonTopic, h) } return nil } -func deleteAlertHook(anonTopic string) deleteHook { - return func(tm *TaskMaster) { - tm.AlertService.DeleteTopic(anonTopic) +func (n *AlertNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) { + id, err := n.renderID(first.Name(), first.GroupID(), first.Tags()) + if err != nil { + return nil, err } + t := first.Time() + + state := n.restoreEventState(id, t) + + return edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver( + n.timer, + state, + ), + ), nil } -func (a *AlertNode) hasAnonTopic() bool { - return len(a.handlers) > 0 +func (n *AlertNode) restoreEventState(id string, t time.Time) *alertState { + state := n.newAlertState() + currentLevel, triggered := n.restoreEvent(id) + if currentLevel != alert.OK { + // Add initial event + state.addEvent(t, currentLevel) + // Record triggered time + state.triggered(triggered) + } + return state } -func (a *AlertNode) hasTopic() bool { - return a.topic != "" + +func (n *AlertNode) newAlertState() *alertState { + return &alertState{ + history: make([]alert.Level, n.a.History), + n: n, + buffer: new(edge.BatchBuffer), + } } -func (a *AlertNode) restoreEventState(id string) (alert.Level, time.Time) { +func (n *AlertNode) restoreEvent(id string) (alert.Level, time.Time) { var topicState, anonTopicState alert.EventState var anonFound, topicFound bool // Check for previous state on anonTopic - if a.hasAnonTopic() { - if state, ok, err := a.et.tm.AlertService.EventState(a.anonTopic, id); err != nil { - a.incrementErrorCount() - a.logger.Printf("E! failed to get event state for anonymous topic %s, event %s: %v", a.anonTopic, id, err) + if n.hasAnonTopic() { + if state, ok, err := n.et.tm.AlertService.EventState(n.anonTopic, id); err != nil { + n.incrementErrorCount() + n.logger.Printf("E! failed to get event state for anonymous topic %s, event %s: %v", n.anonTopic, id, err) } else if ok { anonTopicState = state anonFound = true } } // Check for previous state on topic. - if a.hasTopic() { - if state, ok, err := a.et.tm.AlertService.EventState(a.topic, id); err != nil { - a.incrementErrorCount() - a.logger.Printf("E! failed to get event state for topic %s, event %s: %v", a.topic, id, err) + if n.hasTopic() { + if state, ok, err := n.et.tm.AlertService.EventState(n.topic, id); err != nil { + n.incrementErrorCount() + n.logger.Printf("E! failed to get event state for topic %s, event %s: %v", n.topic, id, err) } else if ok { topicState = state topicFound = true @@ -766,15 +577,15 @@ func (a *AlertNode) restoreEventState(id string) (alert.Level, time.Time) { if topicState.Level != anonTopicState.Level { if anonFound && topicFound { // Anon topic takes precedence - if err := a.et.tm.AlertService.UpdateEvent(a.topic, anonTopicState); err != nil { - a.incrementErrorCount() - a.logger.Printf("E! failed to update topic %q event state for event %q", a.topic, id) + if err := n.et.tm.AlertService.UpdateEvent(n.topic, anonTopicState); err != nil { + n.incrementErrorCount() + n.logger.Printf("E! failed to update topic %q event state for event %q", n.topic, id) } - } else if topicFound && a.hasAnonTopic() { + } else if topicFound && n.hasAnonTopic() { // Update event state for topic - if err := a.et.tm.AlertService.UpdateEvent(a.anonTopic, topicState); err != nil { - a.incrementErrorCount() - a.logger.Printf("E! failed to update topic %q event state for event %q", a.topic, id) + if err := n.et.tm.AlertService.UpdateEvent(n.anonTopic, topicState); err != nil { + n.incrementErrorCount() + n.logger.Printf("E! failed to update topic %q event state for event %q", n.topic, id) } } // else nothing was found, nothing to do } @@ -784,73 +595,86 @@ func (a *AlertNode) restoreEventState(id string) (alert.Level, time.Time) { return topicState.Level, topicState.Time } -func (a *AlertNode) handleEvent(event alert.Event) { - a.alertsTriggered.Add(1) +func deleteAlertHook(anonTopic string) deleteHook { + return func(tm *TaskMaster) { + tm.AlertService.DeleteTopic(anonTopic) + } +} + +func (n *AlertNode) hasAnonTopic() bool { + return len(n.handlers) > 0 +} +func (n *AlertNode) hasTopic() bool { + return n.topic != "" +} + +func (n *AlertNode) handleEvent(event alert.Event) { + n.alertsTriggered.Add(1) switch event.State.Level { case alert.OK: - a.oksTriggered.Add(1) + n.oksTriggered.Add(1) case alert.Info: - a.infosTriggered.Add(1) + n.infosTriggered.Add(1) case alert.Warning: - a.warnsTriggered.Add(1) + n.warnsTriggered.Add(1) case alert.Critical: - a.critsTriggered.Add(1) + n.critsTriggered.Add(1) } - a.logger.Printf("D! %v alert triggered id:%s msg:%s data:%v", event.State.Level, event.State.ID, event.State.Message, event.Data.Result.Series[0]) + n.logger.Printf("D! %v alert triggered id:%s msg:%s data:%v", event.State.Level, event.State.ID, event.State.Message, event.Data.Result.Series[0]) // If we have anon handlers, emit event to the anonTopic - if a.hasAnonTopic() { - event.Topic = a.anonTopic - err := a.et.tm.AlertService.Collect(event) + if n.hasAnonTopic() { + event.Topic = n.anonTopic + err := n.et.tm.AlertService.Collect(event) if err != nil { - a.eventsDropped.Add(1) - a.incrementErrorCount() - a.logger.Println("E!", err) + n.eventsDropped.Add(1) + n.incrementErrorCount() + n.logger.Println("E!", err) } } // If we have a user define topic, emit event to the topic. - if a.hasTopic() { - event.Topic = a.topic - err := a.et.tm.AlertService.Collect(event) + if n.hasTopic() { + event.Topic = n.topic + err := n.et.tm.AlertService.Collect(event) if err != nil { - a.eventsDropped.Add(1) - a.incrementErrorCount() - a.logger.Println("E!", err) + n.eventsDropped.Add(1) + n.incrementErrorCount() + n.logger.Println("E!", err) } } } -func (a *AlertNode) determineLevel(now time.Time, fields models.Fields, tags map[string]string, currentLevel alert.Level) alert.Level { - if higherLevel, found := a.findFirstMatchLevel(alert.Critical, currentLevel-1, now, fields, tags); found { +func (n *AlertNode) determineLevel(p edge.FieldsTagsTimeGetter, currentLevel alert.Level) alert.Level { + if higherLevel, found := n.findFirstMatchLevel(alert.Critical, currentLevel-1, p); found { return higherLevel } - if rse := a.levelResets[currentLevel]; rse != nil { - if pass, err := EvalPredicate(rse, a.lrScopePools[currentLevel], now, fields, tags); err != nil { - a.incrementErrorCount() - a.logger.Printf("E! error evaluating reset expression for current level %v: %s", currentLevel, err) + if rse := n.levelResets[currentLevel]; rse != nil { + if pass, err := EvalPredicate(rse, n.lrScopePools[currentLevel], p); err != nil { + n.incrementErrorCount() + n.logger.Printf("E! error evaluating reset expression for current level %v: %s", currentLevel, err) } else if !pass { return currentLevel } } - if newLevel, found := a.findFirstMatchLevel(currentLevel, alert.OK, now, fields, tags); found { + if newLevel, found := n.findFirstMatchLevel(currentLevel, alert.OK, p); found { return newLevel } return alert.OK } -func (a *AlertNode) findFirstMatchLevel(start alert.Level, stop alert.Level, now time.Time, fields models.Fields, tags map[string]string) (alert.Level, bool) { +func (n *AlertNode) findFirstMatchLevel(start alert.Level, stop alert.Level, p edge.FieldsTagsTimeGetter) (alert.Level, bool) { if stop < alert.OK { stop = alert.OK } for l := start; l > stop; l-- { - se := a.levels[l] + se := n.levels[l] if se == nil { continue } - if pass, err := EvalPredicate(se, a.scopePools[l], now, fields, tags); err != nil { - a.incrementErrorCount() - a.logger.Printf("E! error evaluating expression for level %v: %s", alert.Level(l), err) + if pass, err := EvalPredicate(se, n.scopePools[l], p); err != nil { + n.incrementErrorCount() + n.logger.Printf("E! error evaluating expression for level %v: %s", alert.Level(l), err) continue } else if pass { return alert.Level(l), true @@ -859,7 +683,7 @@ func (a *AlertNode) findFirstMatchLevel(start alert.Level, stop alert.Level, now return alert.OK, false } -func (a *AlertNode) event( +func (n *AlertNode) event( id, name string, group models.GroupID, tags models.Tags, @@ -867,14 +691,14 @@ func (a *AlertNode) event( level alert.Level, t time.Time, d time.Duration, - b models.Batch, + result models.Result, ) (alert.Event, error) { - msg, details, err := a.renderMessageAndDetails(id, name, t, group, tags, fields, level) + msg, details, err := n.renderMessageAndDetails(id, name, t, group, tags, fields, level) if err != nil { return alert.Event{}, err } event := alert.Event{ - Topic: a.anonTopic, + Topic: n.anonTopic, State: alert.EventState{ ID: id, Message: msg, @@ -885,21 +709,27 @@ func (a *AlertNode) event( }, Data: alert.EventData{ Name: name, - TaskName: a.et.Task.ID, + TaskName: n.et.Task.ID, Group: string(group), Tags: tags, Fields: fields, - Result: models.BatchToResult(b), + Result: result, }, } return event, nil } type alertState struct { - history []alert.Level - idx int + n *AlertNode + + buffer *edge.BatchBuffer + + history []alert.Level + idx int + flapping bool - changed bool + + changed bool // Time when first alert was triggered firstTriggered time.Time // Time when last alert was triggered. @@ -908,6 +738,196 @@ type alertState struct { expired bool } +func (a *alertState) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + return nil, a.buffer.BeginBatch(begin) +} + +func (a *alertState) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + return nil, a.buffer.BatchPoint(bp) +} + +func (a *alertState) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return a.BufferedBatch(a.buffer.BufferedBatchMessage(end)) +} + +func (a *alertState) BufferedBatch(b edge.BufferedBatchMessage) (edge.Message, error) { + begin := b.Begin() + id, err := a.n.renderID(begin.Name(), begin.GroupID(), begin.Tags()) + if err != nil { + return nil, err + } + if len(b.Points()) == 0 { + return nil, nil + } + // Keep track of lowest level for any point + lowestLevel := alert.Critical + // Keep track of highest level and point + highestLevel := alert.OK + var highestPoint edge.BatchPointMessage + + currentLevel := a.currentLevel() + for _, bp := range b.Points() { + l := a.n.determineLevel(bp, currentLevel) + if l < lowestLevel { + lowestLevel = l + } + if l > highestLevel || highestPoint == nil { + highestLevel = l + highestPoint = bp + } + } + + // Default the determined level to lowest. + l := lowestLevel + // Update determined level to highest if we don't care about all + if !a.n.a.AllFlag { + l = highestLevel + } + // Create alert Data + t := highestPoint.Time() + if a.n.a.AllFlag || l == alert.OK { + t = begin.Time() + } + + a.addEvent(t, l) + + // Trigger alert only if: + // l == OK and state.changed (aka recovery) + // OR + // l != OK and flapping/statechanges checkout + if !(a.changed && l == alert.OK || + (l != alert.OK && + !((a.n.a.UseFlapping && a.flapping) || + (a.n.a.IsStateChangesOnly && !a.changed && !a.expired)))) { + return nil, nil + } + + a.triggered(t) + + // Suppress the recovery event. + if a.n.a.NoRecoveriesFlag && l == alert.OK { + return nil, nil + } + + duration := a.duration() + event, err := a.n.event(id, begin.Name(), begin.GroupID(), begin.Tags(), highestPoint.Fields(), l, t, duration, b.ToResult()) + if err != nil { + return nil, err + } + + a.n.handleEvent(event) + + // Update tags or fields with event state + if a.n.a.LevelTag != "" || + a.n.a.LevelField != "" || + a.n.a.IdTag != "" || + a.n.a.IdField != "" || + a.n.a.DurationField != "" || + a.n.a.MessageField != "" { + + b = b.ShallowCopy() + points := make([]edge.BatchPointMessage, len(b.Points())) + for i, bp := range b.Points() { + bp = bp.ShallowCopy() + a.augmentTagsWithEventState(bp, event.State) + a.augmentFieldsWithEventState(bp, event.State) + points[i] = bp + } + b.SetPoints(points) + + newBegin := begin.ShallowCopy() + a.augmentTagsWithEventState(newBegin, event.State) + b.SetBegin(newBegin) + } + return b, nil +} + +func (a *alertState) Point(p edge.PointMessage) (edge.Message, error) { + id, err := a.n.renderID(p.Name(), p.GroupID(), p.Tags()) + if err != nil { + return nil, err + } + l := a.n.determineLevel(p, a.currentLevel()) + + a.addEvent(p.Time(), l) + + if (a.n.a.UseFlapping && a.flapping) || (a.n.a.IsStateChangesOnly && !a.changed && !a.expired) { + return nil, nil + } + // send alert if we are not OK or we are OK and state changed (i.e recovery) + if l != alert.OK || a.changed { + a.triggered(p.Time()) + // Suppress the recovery event. + if a.n.a.NoRecoveriesFlag && l == alert.OK { + return nil, nil + } + // Create an alert event + duration := a.duration() + event, err := a.n.event( + id, + p.Name(), + p.GroupID(), + p.Tags(), + p.Fields(), + l, + p.Time(), + duration, + p.ToResult(), + ) + if err != nil { + return nil, err + } + + a.n.handleEvent(event) + + // Prepare an augmented point to return + p = p.ShallowCopy() + a.augmentTagsWithEventState(p, event.State) + a.augmentFieldsWithEventState(p, event.State) + return p, nil + } + return nil, nil +} + +func (a *alertState) augmentTagsWithEventState(p edge.TagSetter, eventState alert.EventState) { + if a.n.a.LevelTag != "" || a.n.a.IdTag != "" { + tags := p.Tags().Copy() + if a.n.a.LevelTag != "" { + tags[a.n.a.LevelTag] = eventState.Level.String() + } + if a.n.a.IdTag != "" { + tags[a.n.a.IdTag] = eventState.ID + } + p.SetTags(tags) + } +} + +func (a *alertState) augmentFieldsWithEventState(p edge.FieldSetter, eventState alert.EventState) { + if a.n.a.LevelField != "" || a.n.a.IdField != "" || a.n.a.DurationField != "" || a.n.a.MessageField != "" { + fields := p.Fields().Copy() + if a.n.a.LevelField != "" { + fields[a.n.a.LevelField] = eventState.Level.String() + } + if a.n.a.MessageField != "" { + fields[a.n.a.MessageField] = eventState.Message + } + if a.n.a.IdField != "" { + fields[a.n.a.IdField] = eventState.ID + } + if a.n.a.DurationField != "" { + fields[a.n.a.DurationField] = int64(eventState.Duration) + } + p.SetFields(fields) + } +} + +func (a *alertState) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (a *alertState) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil +} + // Return the duration of the current alert state. func (a *alertState) duration() time.Duration { return a.lastTriggered.Sub(a.firstTriggered) @@ -928,10 +948,16 @@ func (a *alertState) triggered(t time.Time) { } // Record an event in the alert history. -func (a *alertState) addEvent(level alert.Level) { +func (a *alertState) addEvent(t time.Time, level alert.Level) { + // Check for changes a.changed = a.history[a.idx] != level + + // Add event to history a.idx = (a.idx + 1) % len(a.history) a.history[a.idx] = level + + a.updateFlapping() + a.updateExpired(t) } // Return current level of this state @@ -964,28 +990,20 @@ func (a *alertState) percentChange() float64 { return p } -func (a *AlertNode) updateState(t time.Time, level alert.Level, group models.GroupID) *alertState { - state, ok := a.getAlertState(group) - if !ok { - state = &alertState{ - history: make([]alert.Level, a.a.History), - } - a.statesMu.Lock() - a.states[group] = state - a.statesMu.Unlock() - } - state.addEvent(level) - - if a.a.UseFlapping { - p := state.percentChange() - if state.flapping && p < a.a.FlapLow { - state.flapping = false - } else if !state.flapping && p > a.a.FlapHigh { - state.flapping = true - } +func (a *alertState) updateFlapping() { + if !a.n.a.UseFlapping { + return + } + p := a.percentChange() + if a.flapping && p < a.n.a.FlapLow { + a.flapping = false + } else if !a.flapping && p > a.n.a.FlapHigh { + a.flapping = true } - state.expired = !state.changed && a.a.StateChangesOnlyDuration != 0 && t.Sub(state.lastTriggered) >= a.a.StateChangesOnlyDuration - return state +} + +func (a *alertState) updateExpired(t time.Time) { + a.expired = !a.changed && a.n.a.StateChangesOnlyDuration != 0 && t.Sub(a.lastTriggered) >= a.n.a.StateChangesOnlyDuration } type serverInfo struct { @@ -1034,40 +1052,40 @@ type detailsInfo struct { Message string } -func (a *AlertNode) serverInfo() serverInfo { +func (n *AlertNode) serverInfo() serverInfo { return serverInfo{ - Hostname: a.et.tm.ServerInfo.Hostname(), - ClusterID: a.et.tm.ServerInfo.ClusterID().String(), - ServerID: a.et.tm.ServerInfo.ServerID().String(), + Hostname: n.et.tm.ServerInfo.Hostname(), + ClusterID: n.et.tm.ServerInfo.ClusterID().String(), + ServerID: n.et.tm.ServerInfo.ServerID().String(), } } -func (a *AlertNode) renderID(name string, group models.GroupID, tags models.Tags) (string, error) { +func (n *AlertNode) renderID(name string, group models.GroupID, tags models.Tags) (string, error) { g := string(group) if group == models.NilGroup { g = "nil" } info := idInfo{ Name: name, - TaskName: a.et.Task.ID, + TaskName: n.et.Task.ID, Group: g, Tags: tags, - ServerInfo: a.serverInfo(), + ServerInfo: n.serverInfo(), } - id := a.bufPool.Get().(*bytes.Buffer) + id := n.bufPool.Get().(*bytes.Buffer) defer func() { id.Reset() - a.bufPool.Put(id) + n.bufPool.Put(id) }() - err := a.idTmpl.Execute(id, info) + err := n.idTmpl.Execute(id, info) if err != nil { return "", err } return id.String(), nil } -func (a *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group models.GroupID, tags models.Tags, fields models.Fields, level alert.Level) (string, string, error) { +func (n *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group models.GroupID, tags models.Tags, fields models.Fields, level alert.Level) (string, string, error) { g := string(group) if group == models.NilGroup { g = "nil" @@ -1075,10 +1093,10 @@ func (a *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group minfo := messageInfo{ idInfo: idInfo{ Name: name, - TaskName: a.et.Task.ID, + TaskName: n.et.Task.ID, Group: g, Tags: tags, - ServerInfo: a.serverInfo(), + ServerInfo: n.serverInfo(), }, ID: id, Fields: fields, @@ -1087,14 +1105,14 @@ func (a *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group } // Grab a buffer for the message template and the details template - tmpBuffer := a.bufPool.Get().(*bytes.Buffer) + tmpBuffer := n.bufPool.Get().(*bytes.Buffer) defer func() { tmpBuffer.Reset() - a.bufPool.Put(tmpBuffer) + n.bufPool.Put(tmpBuffer) }() tmpBuffer.Reset() - err := a.messageTmpl.Execute(tmpBuffer, minfo) + err := n.messageTmpl.Execute(tmpBuffer, minfo) if err != nil { return "", "", err } @@ -1107,7 +1125,7 @@ func (a *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group // Reuse the buffer, for the details template tmpBuffer.Reset() - err = a.detailsTmpl.Execute(tmpBuffer, dinfo) + err = n.detailsTmpl.Execute(tmpBuffer, dinfo) if err != nil { return "", "", err } @@ -1115,10 +1133,3 @@ func (a *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group details := tmpBuffer.String() return msg, details, nil } - -func (a *AlertNode) getAlertState(id models.GroupID) (state *alertState, ok bool) { - a.statesMu.RLock() - state, ok = a.states[id] - a.statesMu.RUnlock() - return state, ok -} diff --git a/batch.go b/batch.go index 0d7502563..155a3fbe5 100644 --- a/batch.go +++ b/batch.go @@ -9,9 +9,9 @@ import ( "github.com/gorhill/cronexpr" "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/influxdb" - "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" "github.com/pkg/errors" ) @@ -35,38 +35,38 @@ func newBatchNode(et *ExecutingTask, n *pipeline.BatchNode, l *log.Logger) (*Bat return sn, nil } -func (s *BatchNode) linkChild(c Node) error { +func (n *BatchNode) linkChild(c Node) error { // add child - if s.Provides() != c.Wants() { - return fmt.Errorf("cannot add child mismatched edges: %s -> %s", s.Provides(), c.Wants()) + if n.Provides() != c.Wants() { + return fmt.Errorf("cannot add child mismatched edges: %s -> %s", n.Provides(), c.Wants()) } - s.children = append(s.children, c) + n.children = append(n.children, c) // add parent - c.addParent(s) + c.addParent(n) return nil } -func (s *BatchNode) addParentEdge(in *Edge) { +func (n *BatchNode) addParentEdge(in edge.StatsEdge) { // Pass edges down to children - s.children[s.idx].addParentEdge(in) - s.idx++ + n.children[n.idx].addParentEdge(in) + n.idx++ } -func (s *BatchNode) start([]byte) { +func (n *BatchNode) start([]byte) { } -func (s *BatchNode) Wait() error { +func (n *BatchNode) Wait() error { return nil } // Return list of databases and retention policies // the batcher will query. -func (s *BatchNode) DBRPs() ([]DBRP, error) { +func (n *BatchNode) DBRPs() ([]DBRP, error) { var dbrps []DBRP - for _, b := range s.children { + for _, b := range n.children { d, err := b.(*QueryNode).DBRPs() if err != nil { return nil, err @@ -76,18 +76,18 @@ func (s *BatchNode) DBRPs() ([]DBRP, error) { return dbrps, nil } -func (s *BatchNode) Count() int { - return len(s.children) +func (n *BatchNode) Count() int { + return len(n.children) } -func (s *BatchNode) Start() { - for _, b := range s.children { +func (n *BatchNode) Start() { + for _, b := range n.children { b.(*QueryNode).Start() } } -func (s *BatchNode) Abort() { - for _, b := range s.children { +func (n *BatchNode) Abort() { + for _, b := range n.children { b.(*QueryNode).Abort() } } @@ -98,9 +98,9 @@ type BatchQueries struct { GroupByMeasurement bool } -func (s *BatchNode) Queries(start, stop time.Time) ([]BatchQueries, error) { - queries := make([]BatchQueries, len(s.children)) - for i, b := range s.children { +func (n *BatchNode) Queries(start, stop time.Time) ([]BatchQueries, error) { + queries := make([]BatchQueries, len(n.children)) + for i, b := range n.children { qn := b.(*QueryNode) qs, err := qn.Queries(start, stop) if err != nil { @@ -117,10 +117,10 @@ func (s *BatchNode) Queries(start, stop time.Time) ([]BatchQueries, error) { // Do not add the source batch node to the dot output // since its not really an edge. -func (s *BatchNode) edot(*bytes.Buffer, bool) {} +func (n *BatchNode) edot(*bytes.Buffer, bool) {} -func (s *BatchNode) collectedCount() (count int64) { - for _, child := range s.children { +func (n *BatchNode) collectedCount() (count int64) { + for _, child := range n.children { count += child.collectedCount() } return @@ -206,34 +206,34 @@ func newQueryNode(et *ExecutingTask, n *pipeline.QueryNode, l *log.Logger) (*Que return bn, nil } -func (b *QueryNode) GroupByMeasurement() bool { - return b.byName +func (n *QueryNode) GroupByMeasurement() bool { + return n.byName } // Return list of databases and retention policies // the batcher will query. -func (b *QueryNode) DBRPs() ([]DBRP, error) { - return b.query.DBRPs() +func (n *QueryNode) DBRPs() ([]DBRP, error) { + return n.query.DBRPs() } -func (b *QueryNode) Start() { - b.queryMu.Lock() - defer b.queryMu.Unlock() - b.queryErr = make(chan error, 1) +func (n *QueryNode) Start() { + n.queryMu.Lock() + defer n.queryMu.Unlock() + n.queryErr = make(chan error, 1) go func() { - b.queryErr <- b.doQuery() + n.queryErr <- n.doQuery(n.ins[0]) }() } -func (b *QueryNode) Abort() { - close(b.aborting) +func (n *QueryNode) Abort() { + close(n.aborting) } -func (b *QueryNode) Cluster() string { - return b.b.Cluster +func (n *QueryNode) Cluster() string { + return n.b.Cluster } -func (b *QueryNode) Queries(start, stop time.Time) ([]*Query, error) { +func (n *QueryNode) Queries(start, stop time.Time) ([]*Query, error) { now := time.Now() if stop.IsZero() { stop = now @@ -243,20 +243,20 @@ func (b *QueryNode) Queries(start, stop time.Time) ([]*Query, error) { current := start.Local() queries := make([]*Query, 0) for { - current = b.ticker.Next(current) + current = n.ticker.Next(current) if current.IsZero() || current.After(stop) { break } - qstop := current.Add(-1 * b.b.Offset) + qstop := current.Add(-1 * n.b.Offset) if qstop.After(now) { break } - q, err := b.query.Clone() + q, err := n.query.Clone() if err != nil { return nil, err } - q.SetStartTime(qstop.Add(-1 * b.b.Period)) + q.SetStartTime(qstop.Add(-1 * n.b.Period)) q.SetStopTime(qstop) queries = append(queries, q) } @@ -264,38 +264,38 @@ func (b *QueryNode) Queries(start, stop time.Time) ([]*Query, error) { } // Query InfluxDB and collect batches on batch collector. -func (b *QueryNode) doQuery() error { - defer b.ins[0].Close() - b.batchesQueried = &expvar.Int{} - b.pointsQueried = &expvar.Int{} +func (n *QueryNode) doQuery(in edge.Edge) error { + defer in.Close() + n.batchesQueried = &expvar.Int{} + n.pointsQueried = &expvar.Int{} - b.statMap.Set(statsBatchesQueried, b.batchesQueried) - b.statMap.Set(statsPointsQueried, b.pointsQueried) + n.statMap.Set(statsBatchesQueried, n.batchesQueried) + n.statMap.Set(statsPointsQueried, n.pointsQueried) - if b.et.tm.InfluxDBService == nil { + if n.et.tm.InfluxDBService == nil { return errors.New("InfluxDB not configured, cannot query InfluxDB for batch query") } - con, err := b.et.tm.InfluxDBService.NewNamedClient(b.b.Cluster) + con, err := n.et.tm.InfluxDBService.NewNamedClient(n.b.Cluster) if err != nil { return errors.Wrap(err, "failed to get InfluxDB client") } - tickC := b.ticker.Start() + tickC := n.ticker.Start() for { select { - case <-b.closing: + case <-n.closing: return nil - case <-b.aborting: + case <-n.aborting: return errors.New("batch doQuery aborted") case now := <-tickC: - b.timer.Start() + n.timer.Start() // Update times for query - stop := now.Add(-1 * b.b.Offset) - b.query.SetStartTime(stop.Add(-1 * b.b.Period)) - b.query.SetStopTime(stop) + stop := now.Add(-1 * n.b.Offset) + n.query.SetStartTime(stop.Add(-1 * n.b.Period)) + n.query.SetStopTime(stop) - qStr := b.query.String() - b.logger.Println("D! starting next batch query:", qStr) + qStr := n.query.String() + n.logger.Println("D! starting next batch query:", qStr) // Execute query q := influxdb.Query{ @@ -303,41 +303,42 @@ func (b *QueryNode) doQuery() error { } resp, err := con.Query(q) if err != nil { - b.incrementErrorCount() - b.logger.Println("E!", err) - b.timer.Stop() + n.incrementErrorCount() + n.logger.Println("E!", err) + n.timer.Stop() break } // Collect batches for _, res := range resp.Results { - batches, err := models.ResultToBatches(res, b.byName) + batches, err := edge.ResultToBufferedBatches(res, n.byName) if err != nil { - b.incrementErrorCount() - b.logger.Println("E! failed to understand query result:", err) + n.incrementErrorCount() + n.logger.Println("E! failed to understand query result:", err) continue } for _, bch := range batches { // Set stop time based off query bounds - if bch.TMax.IsZero() || !b.query.IsGroupedByTime() { - bch.TMax = stop + if bch.Begin().Time().IsZero() || !n.query.IsGroupedByTime() { + bch.Begin().SetTime(stop) } - b.batchesQueried.Add(1) - b.pointsQueried.Add(int64(len(bch.Points))) - b.timer.Pause() - err := b.ins[0].CollectBatch(bch) - if err != nil { + + n.batchesQueried.Add(1) + n.pointsQueried.Add(int64(len(bch.Points()))) + + n.timer.Pause() + if err := in.Collect(bch); err != nil { return err } - b.timer.Resume() + n.timer.Resume() } } - b.timer.Stop() + n.timer.Stop() } } } -func (b *QueryNode) runBatch([]byte) error { +func (n *QueryNode) runBatch([]byte) error { errC := make(chan error, 1) go func() { defer func() { @@ -346,9 +347,9 @@ func (b *QueryNode) runBatch([]byte) error { errC <- fmt.Errorf("%v", err) } }() - for bt, ok := b.ins[0].NextBatch(); ok; bt, ok = b.ins[0].NextBatch() { - for _, child := range b.outs { - err := child.CollectBatch(bt) + for bt, ok := n.ins[0].Emit(); ok; bt, ok = n.ins[0].Emit() { + for _, child := range n.outs { + err := child.Collect(bt) if err != nil { errC <- err return @@ -358,22 +359,22 @@ func (b *QueryNode) runBatch([]byte) error { errC <- nil }() var queryErr error - b.queryMu.Lock() - if b.queryErr != nil { - b.queryMu.Unlock() + n.queryMu.Lock() + if n.queryErr != nil { + n.queryMu.Unlock() select { - case queryErr = <-b.queryErr: - case <-b.aborting: + case queryErr = <-n.queryErr: + case <-n.aborting: queryErr = errors.New("batch queryErr aborted") } } else { - b.queryMu.Unlock() + n.queryMu.Unlock() } var err error select { case err = <-errC: - case <-b.aborting: + case <-n.aborting: err = errors.New("batch run aborted") } if queryErr != nil { @@ -382,11 +383,11 @@ func (b *QueryNode) runBatch([]byte) error { return err } -func (b *QueryNode) stopBatch() { - if b.ticker != nil { - b.ticker.Stop() +func (n *QueryNode) stopBatch() { + if n.ticker != nil { + n.ticker.Stop() } - close(b.closing) + close(n.closing) } type ticker interface { diff --git a/combine.go b/combine.go index 5722ceebf..5e40fe4b2 100644 --- a/combine.go +++ b/combine.go @@ -3,11 +3,9 @@ package kapacitor import ( "fmt" "log" - "sort" - "sync" "time" - "github.com/influxdata/kapacitor/expvar" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" "github.com/influxdata/kapacitor/tick/ast" @@ -18,11 +16,8 @@ type CombineNode struct { node c *pipeline.CombineNode - expressions []stateful.Expression - expressionsByGroup map[models.GroupID][]stateful.Expression - scopePools []stateful.ScopePool - - expressionsByGroupMu sync.RWMutex + expressions []stateful.Expression + scopePools []stateful.ScopePool combination combination } @@ -30,10 +25,9 @@ type CombineNode struct { // Create a new CombineNode, which combines a stream with itself dynamically. func newCombineNode(et *ExecutingTask, n *pipeline.CombineNode, l *log.Logger) (*CombineNode, error) { cn := &CombineNode{ - c: n, - node: node{Node: n, et: et, logger: l}, - expressionsByGroup: make(map[models.GroupID][]stateful.Expression), - combination: combination{max: n.Max}, + c: n, + node: node{Node: n, et: et, logger: l}, + combination: combination{max: n.Max}, } // Create stateful expressions @@ -51,174 +45,142 @@ func newCombineNode(et *ExecutingTask, n *pipeline.CombineNode, l *log.Logger) ( return cn, nil } -type buffer struct { - Time time.Time - Name string - Group models.GroupID - Dimensions models.Dimensions - Points []rawPoint +func (n *CombineNode) runCombine([]byte) error { + consumer := edge.NewGroupedConsumer( + n.ins[0], + n, + ) + n.statMap.Set(statCardinalityGauge, consumer.CardinalityVar()) + return consumer.Consume() } -type timeList []time.Time +func (n *CombineNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) { + expressions := make([]stateful.Expression, len(n.expressions)) + for i, expr := range n.expressions { + expressions[i] = expr.CopyReset() + } + return &combineBuffer{ + n: n, + time: first.Time(), + name: first.Name(), + groupInfo: group, + expressions: expressions, + c: n.combination, + }, nil +} -func (t timeList) Len() int { return len(t) } -func (t timeList) Less(i, j int) bool { return t[i].Before(t[j]) } -func (t timeList) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +type combineBuffer struct { + n *CombineNode + time time.Time + name string + groupInfo edge.GroupInfo + points []edge.FieldsTagsTimeSetter + expressions []stateful.Expression + c combination -func (n *CombineNode) runCombine([]byte) error { - valueF := func() int64 { - n.expressionsByGroupMu.RLock() - l := len(n.expressionsByGroup) - n.expressionsByGroupMu.RUnlock() - return int64(l) + begin edge.BeginBatchMessage +} + +func (b *combineBuffer) BeginBatch(begin edge.BeginBatchMessage) error { + b.n.timer.Start() + defer b.n.timer.Stop() + + b.name = begin.Name() + b.time = time.Time{} + if s := begin.SizeHint(); s > cap(b.points) { + b.points = make([]edge.FieldsTagsTimeSetter, 0, s) } - n.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - - switch n.Wants() { - case pipeline.StreamEdge: - buffers := make(map[models.GroupID]*buffer) - for p, ok := n.ins[0].NextPoint(); ok; p, ok = n.ins[0].NextPoint() { - n.timer.Start() - t := p.Time.Round(n.c.Tolerance) - currentBuf, ok := buffers[p.Group] - if !ok { - currentBuf = &buffer{ - Time: t, - Name: p.Name, - Group: p.Group, - Dimensions: p.Dimensions, - } - buffers[p.Group] = currentBuf - } - rp := rawPoint{ - Time: t, - Fields: p.Fields, - Tags: p.Tags, - } - if t.Equal(currentBuf.Time) { - currentBuf.Points = append(currentBuf.Points, rp) - } else { - if err := n.combineBuffer(currentBuf); err != nil { - return err - } - currentBuf.Time = t - currentBuf.Name = p.Name - currentBuf.Group = p.Group - currentBuf.Dimensions = p.Dimensions - currentBuf.Points = currentBuf.Points[0:1] - currentBuf.Points[0] = rp - } - n.timer.Stop() - } - case pipeline.BatchEdge: - allBuffers := make(map[models.GroupID]map[time.Time]*buffer) - groupTimes := make(map[models.GroupID]time.Time) - for b, ok := n.ins[0].NextBatch(); ok; b, ok = n.ins[0].NextBatch() { - n.timer.Start() - t := b.TMax.Round(n.c.Tolerance) - buffers, ok := allBuffers[b.Group] - if !ok { - buffers = make(map[time.Time]*buffer) - allBuffers[b.Group] = buffers - groupTimes[b.Group] = t - } - groupTime := groupTimes[b.Group] - if !t.Equal(groupTime) { - // Set new groupTime - groupTimes[b.Group] = t - // Combine/Emit all old buffers - times := make(timeList, 0, len(buffers)) - for t := range buffers { - times = append(times, t) - } - sort.Sort(times) - for _, t := range times { - if err := n.combineBuffer(buffers[t]); err != nil { - return err - } - delete(buffers, t) - } - } - for _, p := range b.Points { - t := p.Time.Round(n.c.Tolerance) - currentBuf, ok := buffers[t] - if !ok { - currentBuf = &buffer{ - Time: t, - Name: b.Name, - Group: b.Group, - Dimensions: b.PointDimensions(), - } - buffers[t] = currentBuf - } - currentBuf.Points = append(currentBuf.Points, rawPoint{ - Time: t, - Fields: p.Fields, - Tags: p.Tags, - }) - } - n.timer.Stop() + return nil +} + +func (b *combineBuffer) BatchPoint(bp edge.BatchPointMessage) error { + b.n.timer.Start() + defer b.n.timer.Stop() + bp = bp.ShallowCopy() + return b.addPoint(bp) +} + +func (b *combineBuffer) EndBatch(end edge.EndBatchMessage) error { + b.n.timer.Start() + defer b.n.timer.Stop() + if err := b.combine(); err != nil { + return err + } + b.points = b.points[0:0] + return nil +} + +func (b *combineBuffer) Point(p edge.PointMessage) error { + b.n.timer.Start() + defer b.n.timer.Stop() + p = p.ShallowCopy() + return b.addPoint(p) +} + +func (b *combineBuffer) addPoint(p edge.FieldsTagsTimeSetter) error { + t := p.Time().Round(b.n.c.Tolerance) + p.SetTime(t) + if t.Equal(b.time) { + b.points = append(b.points, p) + } else { + if err := b.combine(); err != nil { + return err } + b.time = t + b.points = b.points[0:1] + b.points[0] = p } return nil } -// Simple container for point data. -type rawPoint struct { - Time time.Time - Fields models.Fields - Tags models.Tags +func (b *combineBuffer) Barrier(barrier edge.BarrierMessage) error { + return edge.Forward(b.n.outs, barrier) +} +func (b *combineBuffer) DeleteGroup(d edge.DeleteGroupMessage) error { + return edge.Forward(b.n.outs, d) } // Combine a set of points into all their combinations. -func (n *CombineNode) combineBuffer(buf *buffer) error { - if len(buf.Points) == 0 { +func (b *combineBuffer) combine() error { + if len(b.points) == 0 { return nil } - l := len(n.expressions) - n.expressionsByGroupMu.RLock() - expressions, ok := n.expressionsByGroup[buf.Group] - n.expressionsByGroupMu.RUnlock() - if !ok { - expressions = make([]stateful.Expression, l) - for i, expr := range n.expressions { - expressions[i] = expr.CopyReset() - } - n.expressionsByGroupMu.Lock() - n.expressionsByGroup[buf.Group] = expressions - n.expressionsByGroupMu.Unlock() - } + + l := len(b.expressions) // Compute matching result for all points matches := make([]map[int]bool, l) for i := 0; i < l; i++ { - matches[i] = make(map[int]bool, len(buf.Points)) + matches[i] = make(map[int]bool, len(b.points)) } - for idx, p := range buf.Points { - for i := range expressions { - matched, err := EvalPredicate(expressions[i], n.scopePools[i], p.Time, p.Fields, p.Tags) + for idx, p := range b.points { + for i := range b.expressions { + matched, err := EvalPredicate(b.expressions[i], b.n.scopePools[i], p) if err != nil { - n.incrementErrorCount() - n.logger.Println("E! evaluating lambda expression:", err) + b.n.incrementErrorCount() + b.n.logger.Println("E! evaluating lambda expression:", err) } matches[i][idx] = matched } } - p := models.Point{ - Name: buf.Name, - Group: buf.Group, - Dimensions: buf.Dimensions, - } - dimensions := p.Dimensions.ToSet() - set := make([]rawPoint, l) - return n.combination.Do(len(buf.Points), l, func(indices []int) error { + p := edge.NewPointMessage( + b.name, "", "", + b.groupInfo.Dimensions, + nil, + nil, + time.Time{}, + ) + + dimensions := p.Dimensions().ToSet() + set := make([]edge.FieldsTagsTimeSetter, l) + return b.c.Do(len(b.points), l, func(indices []int) error { valid := true for s := 0; s < l; s++ { found := false for i := range indices { if matches[s][indices[i]] { - set[s] = buf.Points[indices[i]] + set[s] = b.points[indices[i]] indices = append(indices[0:i], indices[i+1:]...) found = true break @@ -230,48 +192,43 @@ func (n *CombineNode) combineBuffer(buf *buffer) error { } } if valid { - rp := n.merge(set, dimensions) + fields, tags, t := b.merge(set, dimensions) - p.Time = rp.Time.Round(n.c.Tolerance) - p.Fields = rp.Fields - p.Tags = rp.Tags + np := p.ShallowCopy() + np.SetFields(fields) + np.SetTags(tags) + np.SetTime(t.Round(b.n.c.Tolerance)) - n.timer.Pause() - for _, out := range n.outs { - err := out.CollectPoint(p) - if err != nil { - return err - } + b.n.timer.Pause() + err := edge.Forward(b.n.outs, np) + b.n.timer.Resume() + if err != nil { + return err } - n.timer.Resume() } return nil }) } // Merge a set of points into a single point. -func (n *CombineNode) merge(points []rawPoint, dimensions map[string]bool) rawPoint { - fields := make(models.Fields, len(points[0].Fields)*len(points)) - tags := make(models.Tags, len(points[0].Tags)*len(points)) +func (b *combineBuffer) merge(points []edge.FieldsTagsTimeSetter, dimensions map[string]bool) (models.Fields, models.Tags, time.Time) { + fields := make(models.Fields, len(points[0].Fields())*len(points)) + tags := make(models.Tags, len(points[0].Tags())*len(points)) for i, p := range points { - for field, value := range p.Fields { - fields[n.c.Names[i]+n.c.Delimiter+field] = value + for field, value := range p.Fields() { + fields[b.n.c.Names[i]+b.n.c.Delimiter+field] = value } - for tag, value := range p.Tags { + for tag, value := range p.Tags() { if !dimensions[tag] { - tags[n.c.Names[i]+n.c.Delimiter+tag] = value + tags[b.n.c.Names[i]+b.n.c.Delimiter+tag] = value } else { tags[tag] = value } } } - return rawPoint{ - Time: points[0].Time, - Fields: fields, - Tags: tags, - } + return fields, tags, points[0].Time() } // Type for performing actions on a set of combinations. diff --git a/default.go b/default.go index a8e92c1fb..3cba8b662 100644 --- a/default.go +++ b/default.go @@ -3,6 +3,7 @@ package kapacitor import ( "log" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" @@ -24,76 +25,85 @@ type DefaultNode struct { // Create a new DefaultNode which applies a transformation func to each point in a stream and returns a single point. func newDefaultNode(et *ExecutingTask, n *pipeline.DefaultNode, l *log.Logger) (*DefaultNode, error) { dn := &DefaultNode{ - node: node{Node: n, et: et, logger: l}, - d: n, + node: node{Node: n, et: et, logger: l}, + d: n, + fieldsDefaulted: new(expvar.Int), + tagsDefaulted: new(expvar.Int), } dn.node.runF = dn.runDefault return dn, nil } -func (e *DefaultNode) runDefault(snapshot []byte) error { - e.fieldsDefaulted = &expvar.Int{} - e.tagsDefaulted = &expvar.Int{} +func (n *DefaultNode) runDefault(snapshot []byte) error { + n.statMap.Set(statsFieldsDefaulted, n.fieldsDefaulted) + n.statMap.Set(statsTagsDefaulted, n.tagsDefaulted) - e.statMap.Set(statsFieldsDefaulted, e.fieldsDefaulted) - e.statMap.Set(statsTagsDefaulted, e.tagsDefaulted) - switch e.Provides() { - case pipeline.StreamEdge: - for p, ok := e.ins[0].NextPoint(); ok; p, ok = e.ins[0].NextPoint() { - e.timer.Start() - p.Fields, p.Tags = e.setDefaults(p.Fields, p.Tags) - p.UpdateGroup() - e.timer.Stop() - for _, child := range e.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } - } - case pipeline.BatchEdge: - for b, ok := e.ins[0].NextBatch(); ok; b, ok = e.ins[0].NextBatch() { - e.timer.Start() - b.Points = b.ShallowCopyPoints() - _, b.Tags = e.setDefaults(nil, b.Tags) - b.UpdateGroup() - for i := range b.Points { - b.Points[i].Fields, b.Points[i].Tags = e.setDefaults(b.Points[i].Fields, b.Points[i].Tags) - } - e.timer.Stop() - for _, child := range e.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } - } - } - return nil + consumer := edge.NewConsumerWithReceiver( + n.ins[0], + edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n), + ), + ) + return consumer.Consume() +} + +func (n *DefaultNode) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + begin = begin.ShallowCopy() + _, tags := n.setDefaults(nil, begin.Tags()) + begin.SetTags(tags) + return begin, nil +} + +func (n *DefaultNode) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + bp = bp.ShallowCopy() + fields, tags := n.setDefaults(bp.Fields(), bp.Tags()) + bp.SetFields(fields) + bp.SetTags(tags) + return bp, nil +} + +func (n *DefaultNode) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return end, nil +} + +func (n *DefaultNode) Point(p edge.PointMessage) (edge.Message, error) { + p = p.ShallowCopy() + fields, tags := n.setDefaults(p.Fields(), p.Tags()) + p.SetFields(fields) + p.SetTags(tags) + return p, nil +} + +func (n *DefaultNode) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (n *DefaultNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil } -func (d *DefaultNode) setDefaults(fields models.Fields, tags models.Tags) (models.Fields, models.Tags) { +func (n *DefaultNode) setDefaults(fields models.Fields, tags models.Tags) (models.Fields, models.Tags) { newFields := fields fieldsCopied := false - for field, value := range d.d.Fields { + for field, value := range n.d.Fields { if v := fields[field]; v == nil { if !fieldsCopied { newFields = newFields.Copy() fieldsCopied = true } - d.fieldsDefaulted.Add(1) + n.fieldsDefaulted.Add(1) newFields[field] = value } } newTags := tags tagsCopied := false - for tag, value := range d.d.Tags { + for tag, value := range n.d.Tags { if v := tags[tag]; v == "" { if !tagsCopied { newTags = newTags.Copy() tagsCopied = true } - d.tagsDefaulted.Add(1) + n.tagsDefaulted.Add(1) newTags[tag] = value } } diff --git a/delete.go b/delete.go index 066214466..a51002adc 100644 --- a/delete.go +++ b/delete.go @@ -3,6 +3,7 @@ package kapacitor import ( "log" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" @@ -25,101 +26,118 @@ type DeleteNode struct { // Create a new DeleteNode which applies a transformation func to each point in a stream and returns a single point. func newDeleteNode(et *ExecutingTask, n *pipeline.DeleteNode, l *log.Logger) (*DeleteNode, error) { - dn := &DeleteNode{ - node: node{Node: n, et: et, logger: l}, - d: n, - tags: make(map[string]bool), - } + tags := make(map[string]bool) for _, tag := range n.Tags { - dn.tags[tag] = true + tags[tag] = true + } + + dn := &DeleteNode{ + node: node{Node: n, et: et, logger: l}, + d: n, + fieldsDeleted: new(expvar.Int), + tagsDeleted: new(expvar.Int), + tags: tags, } dn.node.runF = dn.runDelete return dn, nil } -func (e *DeleteNode) runDelete(snapshot []byte) error { - e.fieldsDeleted = &expvar.Int{} - e.tagsDeleted = &expvar.Int{} - - e.statMap.Set(statsFieldsDeleted, e.fieldsDeleted) - e.statMap.Set(statsTagsDeleted, e.tagsDeleted) - switch e.Provides() { - case pipeline.StreamEdge: - for p, ok := e.ins[0].NextPoint(); ok; p, ok = e.ins[0].NextPoint() { - e.timer.Start() - p.Fields, p.Tags = e.doDeletes(p.Fields, p.Tags) - // Check if we deleted a group by dimension - updateDims := false - for _, dim := range p.Dimensions.TagNames { - if !e.tags[dim] { - updateDims = true - break - } - } - if updateDims { - newDims := make([]string, 0, len(p.Dimensions.TagNames)) - for _, dim := range p.Dimensions.TagNames { - if !e.tags[dim] { - newDims = append(newDims, dim) - } - } - p.Dimensions.TagNames = newDims - p.Group = models.ToGroupID(p.Name, p.Tags, p.Dimensions) - } - e.timer.Stop() - for _, child := range e.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } +func (n *DeleteNode) runDelete(snapshot []byte) error { + n.statMap.Set(statsFieldsDeleted, n.fieldsDeleted) + n.statMap.Set(statsTagsDeleted, n.tagsDeleted) + consumer := edge.NewConsumerWithReceiver( + n.ins[0], + edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n), + ), + ) + return consumer.Consume() +} + +func (n *DeleteNode) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + begin = begin.ShallowCopy() + _, tags := n.doDeletes(nil, begin.Tags()) + begin.SetTags(tags) + return begin, nil +} + +func (n *DeleteNode) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + bp = bp.ShallowCopy() + fields, tags := n.doDeletes(bp.Fields(), bp.Tags()) + bp.SetFields(fields) + bp.SetTags(tags) + return bp, nil +} + +func (n *DeleteNode) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return end, nil +} + +func (n *DeleteNode) Point(p edge.PointMessage) (edge.Message, error) { + p = p.ShallowCopy() + fields, tags := n.doDeletes(p.Fields(), p.Tags()) + p.SetFields(fields) + p.SetTags(tags) + dims := p.Dimensions() + if n.checkForDeletedDimension(dims) { + p.SetDimensions(n.deleteDimensions(dims)) + } + return p, nil +} + +func (n *DeleteNode) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (n *DeleteNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil +} + +// checkForDeletedDimension checks if we deleted a group by dimension +func (n *DeleteNode) checkForDeletedDimension(dimensions models.Dimensions) bool { + for _, dim := range dimensions.TagNames { + if n.tags[dim] { + return true } - case pipeline.BatchEdge: - for b, ok := e.ins[0].NextBatch(); ok; b, ok = e.ins[0].NextBatch() { - e.timer.Start() - b.Points = b.ShallowCopyPoints() - for i := range b.Points { - b.Points[i].Fields, b.Points[i].Tags = e.doDeletes(b.Points[i].Fields, b.Points[i].Tags) - } - _, newTags := e.doDeletes(nil, b.Tags) - if len(newTags) != len(b.Tags) { - b.Tags = newTags - b.Group = models.ToGroupID(b.Name, b.Tags, b.PointDimensions()) - } - e.timer.Stop() - for _, child := range e.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } + } + return false +} + +func (n *DeleteNode) deleteDimensions(dims models.Dimensions) models.Dimensions { + newTagNames := make([]string, 0, len(dims.TagNames)-1) + for _, dim := range dims.TagNames { + if !n.tags[dim] { + newTagNames = append(newTagNames, dim) } } - return nil + return models.Dimensions{ + TagNames: newTagNames, + ByName: dims.ByName, + } } -func (d *DeleteNode) doDeletes(fields models.Fields, tags models.Tags) (models.Fields, models.Tags) { +func (n *DeleteNode) doDeletes(fields models.Fields, tags models.Tags) (models.Fields, models.Tags) { newFields := fields fieldsCopied := false - for _, field := range d.d.Fields { + for _, field := range n.d.Fields { if _, ok := fields[field]; ok { if !fieldsCopied { newFields = newFields.Copy() fieldsCopied = true } - d.fieldsDeleted.Add(1) + n.fieldsDeleted.Add(1) delete(newFields, field) } } newTags := tags tagsCopied := false - for _, tag := range d.d.Tags { + for _, tag := range n.d.Tags { if _, ok := tags[tag]; ok { if !tagsCopied { newTags = newTags.Copy() tagsCopied = true } - d.tagsDeleted.Add(1) + n.tagsDeleted.Add(1) delete(newTags, tag) } } diff --git a/derivative.go b/derivative.go index 5de1709cc..ea0c4ed12 100644 --- a/derivative.go +++ b/derivative.go @@ -2,10 +2,9 @@ package kapacitor import ( "log" - "sync" "time" - "github.com/influxdata/kapacitor/expvar" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" ) @@ -26,90 +25,111 @@ func newDerivativeNode(et *ExecutingTask, n *pipeline.DerivativeNode, l *log.Log return dn, nil } -func (d *DerivativeNode) runDerivative([]byte) error { - switch d.Provides() { - case pipeline.StreamEdge: - var mu sync.RWMutex - previous := make(map[models.GroupID]models.Point) - valueF := func() int64 { - mu.RLock() - l := len(previous) - mu.RUnlock() - return int64(l) - } - d.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - - for p, ok := d.ins[0].NextPoint(); ok; p, ok = d.ins[0].NextPoint() { - d.timer.Start() - mu.RLock() - pr := previous[p.Group] - mu.RUnlock() - - value, store, emit := d.derivative(pr.Fields, p.Fields, pr.Time, p.Time) - if store { - mu.Lock() - previous[p.Group] = p - mu.Unlock() - } - if emit { - fields := p.Fields.Copy() - fields[d.d.As] = value - p.Fields = fields - d.timer.Pause() - for _, child := range d.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } - d.timer.Resume() - } - d.timer.Stop() - } - case pipeline.BatchEdge: - for b, ok := d.ins[0].NextBatch(); ok; b, ok = d.ins[0].NextBatch() { - d.timer.Start() - b.Points = b.ShallowCopyPoints() - var pr, p models.BatchPoint - for i := 0; i < len(b.Points); i++ { - p = b.Points[i] - value, store, emit := d.derivative(pr.Fields, p.Fields, pr.Time, p.Time) - if store { - pr = p - } - if emit { - fields := p.Fields.Copy() - fields[d.d.As] = value - b.Points[i].Fields = fields - } else { - b.Points = append(b.Points[:i], b.Points[i+1:]...) - i-- - } - } - d.timer.Stop() - for _, child := range d.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } - } +func (n *DerivativeNode) runDerivative([]byte) error { + consumer := edge.NewGroupedConsumer( + n.ins[0], + n, + ) + n.statMap.Set(statCardinalityGauge, consumer.CardinalityVar()) + return consumer.Consume() +} + +func (n *DerivativeNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) { + return edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n.newGroup()), + ), nil +} + +func (n *DerivativeNode) newGroup() *derivativeGroup { + return &derivativeGroup{ + n: n, + } +} + +type derivativeGroup struct { + n *DerivativeNode + previous edge.FieldsTagsTimeGetter +} + +func (g *derivativeGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + if s := begin.SizeHint(); s > 0 { + begin = begin.ShallowCopy() + begin.SetSizeHint(s - 1) + } + g.previous = nil + return begin, nil +} + +func (g *derivativeGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + np := bp.ShallowCopy() + emit := g.doDerivative(bp, np) + if emit { + return np, nil + } + return nil, nil +} + +func (g *derivativeGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return end, nil +} + +func (g *derivativeGroup) Point(p edge.PointMessage) (edge.Message, error) { + np := p.ShallowCopy() + emit := g.doDerivative(p, np) + if emit { + return np, nil + } + return nil, nil +} + +// doDerivative computes the derivative with respect to g.previous and p. +// The resulting derivative value will be set on n. +func (g *derivativeGroup) doDerivative(p edge.FieldsTagsTimeGetter, n edge.FieldsTagsTimeSetter) bool { + var prevFields, currFields models.Fields + var prevTime, currTime time.Time + if g.previous != nil { + prevFields = g.previous.Fields() + prevTime = g.previous.Time() } - return nil + currFields = p.Fields() + currTime = p.Time() + value, store, emit := g.n.derivative( + prevFields, currFields, + prevTime, currTime, + ) + if store { + g.previous = p + } + if !emit { + return false + } + + fields := n.Fields().Copy() + fields[g.n.d.As] = value + n.SetFields(fields) + return true +} + +func (g *derivativeGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (g *derivativeGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil } // derivative calculates the derivative between prev and cur. // Return is the resulting derivative, whether the current point should be // stored as previous, and whether the point result should be emitted. -func (d *DerivativeNode) derivative(prev, curr models.Fields, prevTime, currTime time.Time) (float64, bool, bool) { - f1, ok := numToFloat(curr[d.d.Field]) +func (n *DerivativeNode) derivative(prev, curr models.Fields, prevTime, currTime time.Time) (float64, bool, bool) { + f1, ok := numToFloat(curr[n.d.Field]) if !ok { - d.incrementErrorCount() - d.logger.Printf("E! cannot apply derivative to type %T", curr[d.d.Field]) + n.incrementErrorCount() + n.logger.Printf("E! cannot apply derivative to type %T", curr[n.d.Field]) return 0, false, false } - f0, ok := numToFloat(prev[d.d.Field]) + f0, ok := numToFloat(prev[n.d.Field]) if !ok { // The only time this will fail to parse is if there is no previous. // Because we only return `store=true` if current parses successfully, we will @@ -119,17 +139,17 @@ func (d *DerivativeNode) derivative(prev, curr models.Fields, prevTime, currTime elapsed := float64(currTime.Sub(prevTime)) if elapsed == 0 { - d.incrementErrorCount() - d.logger.Printf("E! cannot perform derivative elapsed time was 0") + n.incrementErrorCount() + n.logger.Printf("E! cannot perform derivative elapsed time was 0") return 0, true, false } diff := f1 - f0 // Drop negative values for non-negative derivatives - if d.d.NonNegativeFlag && diff < 0 { + if n.d.NonNegativeFlag && diff < 0 { return 0, true, false } - value := float64(diff) / (elapsed / float64(d.d.Unit)) + value := float64(diff) / (elapsed / float64(n.d.Unit)) return value, true, true } diff --git a/edge.go b/edge.go index 5dd69bbf6..46312f9b6 100644 --- a/edge.go +++ b/edge.go @@ -6,8 +6,8 @@ import ( "log" "sync" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/expvar" - "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" "github.com/influxdata/kapacitor/server/vars" ) @@ -21,34 +21,19 @@ const ( var ErrAborted = errors.New("edged aborted") -type StreamCollector interface { - CollectPoint(models.Point) error - Close() -} - -type BatchCollector interface { - CollectBatch(models.Batch) error - Close() -} - type Edge struct { + edge.StatsEdge + mu sync.Mutex closed bool - stream chan models.Point - batch chan models.Batch - - logger *log.Logger - aborted chan struct{} - statsKey string - collected *expvar.Int - emitted *expvar.Int - statMap *expvar.Map - groupMu sync.RWMutex - groupStats map[models.GroupID]*edgeStat + statsKey string + statMap *expvar.Map + logger *log.Logger } -func newEdge(taskName, parentName, childName string, t pipeline.EdgeType, size int, logService LogService) *Edge { +func newEdge(taskName, parentName, childName string, t pipeline.EdgeType, size int, logService LogService) edge.StatsEdge { + e := edge.NewStatsEdge(edge.NewChannelEdge(t, defaultEdgeBufferSize)) tags := map[string]string{ "task": taskName, "parent": parentName, @@ -56,184 +41,28 @@ func newEdge(taskName, parentName, childName string, t pipeline.EdgeType, size i "type": t.String(), } key, sm := vars.NewStatistic("edges", tags) - collected := &expvar.Int{} - emitted := &expvar.Int{} - sm.Set(statCollected, collected) - sm.Set(statEmitted, emitted) - e := &Edge{ - statsKey: key, - statMap: sm, - collected: collected, - emitted: emitted, - aborted: make(chan struct{}), - groupStats: make(map[models.GroupID]*edgeStat), - } + sm.Set(statCollected, e.CollectedVar()) + sm.Set(statEmitted, e.EmittedVar()) name := fmt.Sprintf("%s|%s->%s", taskName, parentName, childName) - e.logger = logService.NewLogger(fmt.Sprintf("[edge:%s] ", name), log.LstdFlags) - switch t { - case pipeline.StreamEdge: - e.stream = make(chan models.Point, size) - case pipeline.BatchEdge: - e.batch = make(chan models.Batch, size) - } - return e -} - -func (e *Edge) emittedCount() int64 { - return e.emitted.IntValue() -} - -func (e *Edge) collectedCount() int64 { - return e.collected.IntValue() -} - -// Stats for a given group for this edge -type edgeStat struct { - collected int64 - emitted int64 - tags models.Tags - dims models.Dimensions -} - -// Get a snapshot of the current group statistics for this edge -func (e *Edge) readGroupStats(f func(group models.GroupID, collected, emitted int64, tags models.Tags, dims models.Dimensions)) { - e.groupMu.RLock() - defer e.groupMu.RUnlock() - for group, stats := range e.groupStats { - f( - group, - stats.collected, - stats.emitted, - stats.tags, - stats.dims, - ) + return &Edge{ + StatsEdge: e, + statsKey: key, + statMap: sm, + logger: logService.NewLogger(fmt.Sprintf("[edge:%s] ", name), log.LstdFlags), } } -// Close the edge, this can only be called after all -// collect calls to the edge have finished. -// Can be called multiple times. -func (e *Edge) Close() { +func (e *Edge) Close() error { e.mu.Lock() defer e.mu.Unlock() if e.closed { - return + return nil } e.closed = true - e.logger.Printf( - "D! closing c: %d e: %d\n", - e.collected.IntValue(), - e.emitted.IntValue(), - ) - if e.stream != nil { - close(e.stream) - } - if e.batch != nil { - close(e.batch) - } vars.DeleteStatistic(e.statsKey) -} - -// Abort all next and collect calls. -// Items in flight may or may not be processed. -func (e *Edge) Abort() { - close(e.aborted) - e.logger.Printf( - "I! aborting c: %d e: %d\n", - e.collected.IntValue(), - e.emitted.IntValue(), + e.logger.Printf("D! closing c: %d e: %d", + e.Collected(), + e.Emitted(), ) -} - -func (e *Edge) Next() (p models.PointInterface, ok bool) { - if e.stream != nil { - return e.NextPoint() - } - return e.NextBatch() -} - -func (e *Edge) NextPoint() (p models.Point, ok bool) { - select { - case <-e.aborted: - case p, ok = <-e.stream: - if ok { - e.emitted.Add(1) - e.incEmitted(p.Group, p.Tags, p.Dimensions, 1) - } - } - return -} - -func (e *Edge) NextBatch() (b models.Batch, ok bool) { - select { - case <-e.aborted: - case b, ok = <-e.batch: - if ok { - e.emitted.Add(1) - e.incEmitted(b.Group, b.Tags, b.PointDimensions(), int64(len(b.Points))) - } - } - return -} - -func (e *Edge) CollectPoint(p models.Point) error { - e.collected.Add(1) - e.incCollected(p.Group, p.Tags, p.Dimensions, 1) - select { - case <-e.aborted: - return ErrAborted - case e.stream <- p: - return nil - } -} - -func (e *Edge) CollectBatch(b models.Batch) error { - e.collected.Add(1) - e.incCollected(b.Group, b.Tags, b.PointDimensions(), int64(len(b.Points))) - select { - case <-e.aborted: - return ErrAborted - case e.batch <- b: - return nil - } -} - -// Increment the emitted count of the group for this edge. -func (e *Edge) incEmitted(group models.GroupID, tags models.Tags, dims models.Dimensions, count int64) { - // we are "manually" calling Unlock() and not using defer, because this method is called - // in hot locations (NextPoint/CollectPoint) and defer have some performance penalty - e.groupMu.Lock() - - if stats, ok := e.groupStats[group]; ok { - stats.emitted += count - e.groupMu.Unlock() - } else { - stats = &edgeStat{ - emitted: count, - tags: tags, - dims: dims, - } - e.groupStats[group] = stats - e.groupMu.Unlock() - } -} - -// Increment the collected count of the group for this edge. -func (e *Edge) incCollected(group models.GroupID, tags models.Tags, dims models.Dimensions, count int64) { - // we are "manually" calling Unlock() and not using defer, because this method is called - // in hot locations (NextPoint/CollectPoint) and defer have some performance penalty - e.groupMu.Lock() - - if stats, ok := e.groupStats[group]; ok { - stats.collected += count - e.groupMu.Unlock() - } else { - stats = &edgeStat{ - collected: count, - tags: tags, - dims: dims, - } - e.groupStats[group] = stats - e.groupMu.Unlock() - } + return e.StatsEdge.Close() } diff --git a/edge/buffered.go b/edge/buffered.go new file mode 100644 index 000000000..ccf5431d7 --- /dev/null +++ b/edge/buffered.go @@ -0,0 +1,30 @@ +package edge + +type BufferedReceiver interface { + Receiver + // BufferedBatch processes an entire buffered batch. + // Do not modify the batch or the slice of Points as it is shared. + BufferedBatch(batch BufferedBatchMessage) error +} + +// BatchBuffer buffers batch messages into a BufferedBatchMessage. +type BatchBuffer struct { + begin BeginBatchMessage + points []BatchPointMessage +} + +func (r *BatchBuffer) BeginBatch(begin BeginBatchMessage) error { + r.begin = begin.ShallowCopy() + r.points = make([]BatchPointMessage, 0, begin.SizeHint()) + return nil +} + +func (r *BatchBuffer) BatchPoint(bp BatchPointMessage) error { + r.points = append(r.points, bp) + return nil +} + +func (r *BatchBuffer) BufferedBatchMessage(end EndBatchMessage) BufferedBatchMessage { + r.begin.SetSizeHint(len(r.points)) + return NewBufferedBatchMessage(r.begin, r.points, end) +} diff --git a/edge/consumer.go b/edge/consumer.go new file mode 100644 index 000000000..13569339a --- /dev/null +++ b/edge/consumer.go @@ -0,0 +1,203 @@ +package edge + +import ( + "fmt" +) + +// Consumer reads messages off an edge and passes them to a receiver. +type Consumer interface { + // Consume reads messages off an edge until the edge is closed or aborted. + // An error is returned if either the edge or receiver errors. + Consume() error +} + +// Receiver handles messages as they arrive via a consumer. +type Receiver interface { + BeginBatch(begin BeginBatchMessage) error + BatchPoint(bp BatchPointMessage) error + EndBatch(end EndBatchMessage) error + Point(p PointMessage) error + Barrier(b BarrierMessage) error + DeleteGroup(d DeleteGroupMessage) error +} + +type consumer struct { + edge Edge + r Receiver +} + +// NewConsumerWithReceiver creates a new consumer for the edge e and receiver r. +func NewConsumerWithReceiver(e Edge, r Receiver) Consumer { + return &consumer{ + edge: e, + r: r, + } +} + +func (ec *consumer) Consume() error { + for msg, ok := ec.edge.Emit(); ok; msg, ok = ec.edge.Emit() { + switch m := msg.(type) { + case BeginBatchMessage: + if err := ec.r.BeginBatch(m); err != nil { + return err + } + case BatchPointMessage: + if err := ec.r.BatchPoint(m); err != nil { + return err + } + case EndBatchMessage: + if err := ec.r.EndBatch(m); err != nil { + return err + } + case BufferedBatchMessage: + err := receiveBufferedBatch(ec.r, m) + if err != nil { + return err + } + case PointMessage: + if err := ec.r.Point(m); err != nil { + return err + } + case BarrierMessage: + if err := ec.r.Barrier(m); err != nil { + return err + } + default: + return fmt.Errorf("unexpected message of type %T", msg) + } + } + return nil +} + +func receiveBufferedBatch(r Receiver, batch BufferedBatchMessage) error { + b, ok := r.(BufferedReceiver) + // If we have a buffered receiver pass the batch straight through. + if ok { + return b.BufferedBatch(batch) + } + + // Pass the batch non buffered. + if err := r.BeginBatch(batch.Begin()); err != nil { + return err + } + for _, bp := range batch.Points() { + if err := r.BatchPoint(bp); err != nil { + return err + } + } + return r.EndBatch(batch.End()) +} + +type MultiReceiver interface { + BufferedBatch(src int, batch BufferedBatchMessage) error + Point(src int, p PointMessage) error + Barrier(src int, b BarrierMessage) error + Finish() error +} + +func NewMultiConsumerWithStats(ins []StatsEdge, r MultiReceiver) Consumer { + edges := make([]Edge, len(ins)) + for i := range ins { + edges[i] = ins[i] + } + return NewMultiConsumer(edges, r) +} + +func NewMultiConsumer(ins []Edge, r MultiReceiver) Consumer { + return &multiConsumer{ + ins: ins, + r: r, + messages: make(chan srcMessage), + } +} + +type multiConsumer struct { + ins []Edge + + r MultiReceiver + + messages chan srcMessage +} + +type srcMessage struct { + Src int + Msg Message +} + +func (c *multiConsumer) Consume() error { + errC := make(chan error, len(c.ins)) + for i, in := range c.ins { + go func(src int, in Edge) { + errC <- c.readEdge(src, in) + }(i, in) + } + + firstErr := make(chan error, 1) + go func() { + for range c.ins { + err := <-errC + if err != nil { + firstErr <- err + } + } + // Close messages now that all readEdge goroutines have finished. + close(c.messages) + }() + +LOOP: + for { + select { + case err := <-firstErr: + // One of the parents errored out, return the error. + return err + case m, ok := <-c.messages: + if !ok { + break LOOP + } + switch msg := m.Msg.(type) { + case BufferedBatchMessage: + if err := c.r.BufferedBatch(m.Src, msg); err != nil { + return err + } + case PointMessage: + if err := c.r.Point(m.Src, msg); err != nil { + return err + } + case BarrierMessage: + if err := c.r.Barrier(m.Src, msg); err != nil { + return err + } + } + } + } + + return c.r.Finish() +} + +func (c *multiConsumer) readEdge(src int, in Edge) error { + batchBuffer := new(BatchBuffer) + for m, ok := in.Emit(); ok; m, ok = in.Emit() { + switch msg := m.(type) { + case BeginBatchMessage: + if err := batchBuffer.BeginBatch(msg); err != nil { + return err + } + case BatchPointMessage: + if err := batchBuffer.BatchPoint(msg); err != nil { + return err + } + case EndBatchMessage: + batch := batchBuffer.BufferedBatchMessage(msg) + c.messages <- srcMessage{ + Src: src, + Msg: batch, + } + default: + c.messages <- srcMessage{ + Src: src, + Msg: msg, + } + } + } + return nil +} diff --git a/edge/doc.go b/edge/doc.go new file mode 100644 index 000000000..705bc941e --- /dev/null +++ b/edge/doc.go @@ -0,0 +1,5 @@ +/* + Package edge provides mechanisms for message passing along edges. + Several composable interfaces are defined to aid in implementing a node which consumes messages from an edge. +*/ +package edge diff --git a/edge/edge.go b/edge/edge.go new file mode 100644 index 000000000..837785319 --- /dev/null +++ b/edge/edge.go @@ -0,0 +1,98 @@ +package edge + +import ( + "errors" + "sync" + + "github.com/influxdata/kapacitor/pipeline" +) + +// Edge represents the connection between two nodes that communicate via messages. +// Edge communication is unidirectional and asynchronous. +// Edges are safe for concurrent use. +type Edge interface { + // Collect instructs the edge to accept a new message. + Collect(Message) error + // Emit blocks until a message is available and returns it or returns false if the edge has been closed or aborted. + Emit() (Message, bool) + // Close stops the edge, all messages currently buffered will be processed. + // Future calls to Collect will panic. + Close() error + // Abort immediately stops the edge and all currently buffered messages are dropped. + // Future calls to Collect return the error ErrAborted. + Abort() + // Type indicates whether the edge will emit stream or batch data. + Type() pipeline.EdgeType +} + +type edgeState int + +const ( + edgeOpen edgeState = iota + edgeClosed + edgeAborted +) + +// channelEdge is an implementation of Edge using channels. +type channelEdge struct { + aborting chan struct{} + messages chan Message + + typ pipeline.EdgeType + + mu sync.Mutex + state edgeState +} + +// NewChannelEdge returns a new edge that uses channels as the underlying transport. +func NewChannelEdge(typ pipeline.EdgeType, size int) Edge { + return &channelEdge{ + aborting: make(chan struct{}), + messages: make(chan Message, size), + state: edgeOpen, + typ: typ, + } +} + +func (e *channelEdge) Collect(m Message) error { + select { + case e.messages <- m: + return nil + case <-e.aborting: + return ErrAborted + } +} + +func (e *channelEdge) Emit() (m Message, ok bool) { + select { + case m, ok = <-e.messages: + case <-e.aborting: + } + return +} + +func (e *channelEdge) Close() error { + e.mu.Lock() + defer e.mu.Unlock() + if e.state != edgeOpen { + return errors.New("edge not open cannot close") + } + close(e.messages) + e.state = edgeClosed + return nil +} + +func (e *channelEdge) Abort() { + e.mu.Lock() + defer e.mu.Unlock() + if e.state == edgeAborted { + //nothing to do, already aborted + return + } + close(e.aborting) + e.state = edgeAborted +} + +func (e *channelEdge) Type() pipeline.EdgeType { + return e.typ +} diff --git a/edge/edge_test.go b/edge/edge_test.go new file mode 100644 index 000000000..54be7af05 --- /dev/null +++ b/edge/edge_test.go @@ -0,0 +1,187 @@ +package edge_test + +import ( + "reflect" + "testing" + "time" + + "github.com/influxdata/kapacitor/edge" + "github.com/influxdata/kapacitor/models" + "github.com/influxdata/kapacitor/pipeline" +) + +const defaultEdgeBufferSize = 1000 + +var name = "edge_test" +var db = "mydb" +var rp = "myrp" +var now = time.Now() +var groupTags = models.Tags{ + "tag1": "value1", + "tag2": "value2", +} +var groupDims = models.Dimensions{TagNames: []string{"tag1", "tag2"}} + +var point = edge.NewPointMessage( + name, + db, + rp, + groupDims, + models.Fields{ + "field1": 42, + "field2": 4.2, + "field3": 49, + "field4": 4.9, + }, + models.Tags{ + "tag1": "value1", + "tag2": "value2", + "tag3": "value3", + "tag4": "value4", + }, + now, +) + +var batch = edge.NewBufferedBatchMessage( + edge.NewBeginBatchMessage( + name, + groupTags, + groupDims.ByName, + now, + 2, + ), + []edge.BatchPointMessage{ + edge.NewBatchPointMessage( + models.Fields{ + "field1": 42, + "field2": 4.2, + "field3": 49, + "field4": 4.9, + }, + models.Tags{ + "tag1": "value1", + "tag2": "value2", + "tag3": "first", + "tag4": "first", + }, + now, + ), + edge.NewBatchPointMessage( + models.Fields{ + "field1": 42, + "field2": 4.2, + "field3": 49, + "field4": 4.9, + }, + models.Tags{ + "tag1": "value1", + "tag2": "value2", + "tag3": "second", + "tag4": "second", + }, + now, + ), + }, + edge.NewEndBatchMessage(), +) + +func TestEdge_CollectPoint(t *testing.T) { + e := edge.NewChannelEdge(pipeline.StreamEdge, defaultEdgeBufferSize) + + e.Collect(point) + msg, ok := e.Emit() + if !ok { + t.Fatal("did not get point back out of edge") + } + if !reflect.DeepEqual(msg, point) { + t.Errorf("unexpected point after passing through edge:\ngot:\n%v\nexp:\n%v\n", msg, point) + } +} + +func TestEdge_CollectBatch(t *testing.T) { + e := edge.NewChannelEdge(pipeline.BatchEdge, defaultEdgeBufferSize) + e.Collect(batch) + msg, ok := e.Emit() + if !ok { + t.Fatal("did not get batch back out of edge") + } + if !reflect.DeepEqual(batch, msg) { + t.Errorf("unexpected batch after passing through edge:\ngot:\n%v\nexp:\n%v\n", msg, batch) + } +} + +var emittedMsg edge.Message +var emittedOK bool + +func BenchmarkCollectPoint(b *testing.B) { + e := edge.NewChannelEdge(pipeline.StreamEdge, defaultEdgeBufferSize) + b.ReportAllocs() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + e.Collect(point) + emittedMsg, emittedOK = e.Emit() + } + }) +} + +func BenchmarkCollectBatch(b *testing.B) { + e := edge.NewChannelEdge(pipeline.StreamEdge, defaultEdgeBufferSize) + b.ReportAllocs() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + e.Collect(batch) + emittedMsg, emittedOK = e.Emit() + } + }) +} + +type noopReceiver struct{} + +func (r noopReceiver) BeginBatch(begin edge.BeginBatchMessage) error { + return nil +} + +func (r noopReceiver) BatchPoint(bp edge.BatchPointMessage) error { + return nil +} + +func (r noopReceiver) EndBatch(end edge.EndBatchMessage) error { + return nil +} + +func (r noopReceiver) Point(p edge.PointMessage) error { + return nil +} + +func (r noopReceiver) Barrier(b edge.BarrierMessage) error { + return nil +} +func (r noopReceiver) DeleteGroup(d edge.DeleteGroupMessage) error { + return nil +} + +func BenchmarkConsumer(b *testing.B) { + var msg edge.Message + msg = batch + count := defaultEdgeBufferSize * 10 + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + e := edge.NewChannelEdge(pipeline.StreamEdge, defaultEdgeBufferSize) + consumer := edge.NewConsumerWithReceiver(e, noopReceiver{}) + go func() { + for i := 0; i < count; i++ { + e.Collect(msg) + } + e.Close() + }() + b.StartTimer() + err := consumer.Consume() + if err != nil { + b.Fatal(err) + } + } +} diff --git a/edge/error.go b/edge/error.go new file mode 100644 index 000000000..01356c1ef --- /dev/null +++ b/edge/error.go @@ -0,0 +1,8 @@ +package edge + +import ( + "errors" +) + +// ErrAborted is returned from the Edge interface when operations are performed on the edge after it has been aborted. +var ErrAborted = errors.New("edge aborted") diff --git a/edge/forwarding.go b/edge/forwarding.go new file mode 100644 index 000000000..d3024fdc6 --- /dev/null +++ b/edge/forwarding.go @@ -0,0 +1,103 @@ +package edge + +// ForwardReceiver handles messages as they arrive and can return a message to be forwarded to output edges. +// If a returned messages is nil, no message is forwarded. +type ForwardReceiver interface { + BeginBatch(begin BeginBatchMessage) (Message, error) + BatchPoint(bp BatchPointMessage) (Message, error) + EndBatch(end EndBatchMessage) (Message, error) + Point(p PointMessage) (Message, error) + Barrier(b BarrierMessage) (Message, error) + DeleteGroup(d DeleteGroupMessage) (Message, error) +} + +// ForwardBufferedReceiver handles messages as they arrive and can return a message to be forwarded to output edges. +// If a returned messages is nil, no message is forwarded. +type ForwardBufferedReceiver interface { + ForwardReceiver + BufferedBatch(batch BufferedBatchMessage) (Message, error) +} + +// NewReceiverFromForwardReceiverWithStats creates a new receiver from the provided list of stats edges and forward receiver. +func NewReceiverFromForwardReceiverWithStats(outs []StatsEdge, r ForwardReceiver) Receiver { + os := make([]Edge, len(outs)) + for i := range outs { + os[i] = outs[i] + } + return NewReceiverFromForwardReceiver(os, r) +} + +// NewReceiverFromForwardReceiver creates a new receiver from the provided list of edges and forward receiver. +func NewReceiverFromForwardReceiver(outs []Edge, r ForwardReceiver) Receiver { + b, ok := r.(ForwardBufferedReceiver) + if ok { + return &forwardingBufferedReceiver{ + forwardingReceiver: forwardingReceiver{ + outs: outs, + r: r, + }, + b: b, + } + } + return &forwardingReceiver{ + outs: outs, + r: r, + } +} + +type forwardingReceiver struct { + outs []Edge + r ForwardReceiver +} + +type forwardingBufferedReceiver struct { + forwardingReceiver + b ForwardBufferedReceiver +} + +func (fr *forwardingReceiver) BeginBatch(begin BeginBatchMessage) error { + return fr.forward(fr.r.BeginBatch(begin)) +} +func (fr *forwardingReceiver) BatchPoint(bp BatchPointMessage) error { + return fr.forward(fr.r.BatchPoint(bp)) +} +func (fr *forwardingReceiver) EndBatch(end EndBatchMessage) error { + return fr.forward(fr.r.EndBatch(end)) +} + +func (fr *forwardingBufferedReceiver) BufferedBatch(batch BufferedBatchMessage) error { + return fr.forward(fr.b.BufferedBatch(batch)) +} + +func (fr *forwardingReceiver) Point(p PointMessage) error { + return fr.forward(fr.r.Point(p)) +} +func (fr *forwardingReceiver) Barrier(b BarrierMessage) error { + return fr.forward(fr.r.Barrier(b)) +} +func (fr *forwardingReceiver) DeleteGroup(d DeleteGroupMessage) error { + return fr.forward(fr.r.DeleteGroup(d)) +} + +func (fr *forwardingReceiver) forward(msg Message, err error) error { + if err != nil { + return err + } + if msg != nil { + for _, out := range fr.outs { + if err := out.Collect(msg); err != nil { + return err + } + } + } + return nil +} + +func Forward(outs []StatsEdge, msg Message) error { + for _, out := range outs { + if err := out.Collect(msg); err != nil { + return err + } + } + return nil +} diff --git a/edge/grouped.go b/edge/grouped.go new file mode 100644 index 000000000..5cbb73698 --- /dev/null +++ b/edge/grouped.go @@ -0,0 +1,129 @@ +package edge + +import ( + "errors" + + "github.com/influxdata/kapacitor/expvar" + "github.com/influxdata/kapacitor/models" +) + +// GroupedConsumer reads messages off an edge and passes them by group to receivers created from a grouped receiver. +type GroupedConsumer interface { + Consumer + // CardinalityVar is an exported var that indicates the current number of groups being managed. + CardinalityVar() expvar.IntVar +} + +// GroupedReceiver creates and deletes receivers as groups are created and deleted. +type GroupedReceiver interface { + // NewGroup signals that a new group has been discovered in the data. + // Information on the group and the message that first triggered its creation are provided. + NewGroup(group GroupInfo, first PointMeta) (Receiver, error) +} + +// GroupInfo identifies and contians information about a specific group. +type GroupInfo struct { + ID models.GroupID + Tags models.Tags + Dimensions models.Dimensions +} + +type groupedConsumer struct { + consumer Consumer + gr GroupedReceiver + groups map[models.GroupID]Receiver + current Receiver + cardinality *expvar.Int +} + +// NewGroupedConsumer creates a new grouped consumer for edge e and grouped receiver r. +func NewGroupedConsumer(e Edge, r GroupedReceiver) GroupedConsumer { + gc := &groupedConsumer{ + gr: r, + groups: make(map[models.GroupID]Receiver), + cardinality: new(expvar.Int), + } + gc.consumer = NewConsumerWithReceiver(e, gc) + return gc +} + +func (c *groupedConsumer) Consume() error { + return c.consumer.Consume() +} +func (c *groupedConsumer) CardinalityVar() expvar.IntVar { + return c.cardinality +} + +func (c *groupedConsumer) getOrCreateGroup(group GroupInfo, first PointMeta) (Receiver, error) { + r, ok := c.groups[group.ID] + if !ok { + c.cardinality.Add(1) + recv, err := c.gr.NewGroup(group, first) + if err != nil { + return nil, err + } + c.groups[group.ID] = recv + r = recv + } + return r, nil +} + +func (c *groupedConsumer) BeginBatch(begin BeginBatchMessage) error { + r, err := c.getOrCreateGroup(begin.GroupInfo(), begin) + if err != nil { + return err + } + c.current = r + return r.BeginBatch(begin) +} + +func (c *groupedConsumer) BatchPoint(p BatchPointMessage) error { + if c.current == nil { + return errors.New("received batch point without batch") + } + return c.current.BatchPoint(p) +} + +func (c *groupedConsumer) EndBatch(end EndBatchMessage) error { + err := c.current.EndBatch(end) + c.current = nil + return err +} + +func (c *groupedConsumer) BufferedBatch(batch BufferedBatchMessage) error { + begin := batch.Begin() + r, err := c.getOrCreateGroup(begin.GroupInfo(), begin) + if err != nil { + return err + } + return receiveBufferedBatch(r, batch) +} + +func (c *groupedConsumer) Point(p PointMessage) error { + r, err := c.getOrCreateGroup(p.GroupInfo(), p) + if err != nil { + return err + } + return r.Point(p) +} + +func (c *groupedConsumer) Barrier(b BarrierMessage) error { + // Barriers messages apply to all gorups + for _, r := range c.groups { + if err := r.Barrier(b); err != nil { + return err + } + } + return nil +} + +func (c *groupedConsumer) DeleteGroup(d DeleteGroupMessage) error { + id := d.GroupID() + r, ok := c.groups[id] + if ok { + delete(c.groups, id) + c.cardinality.Add(-1) + return r.DeleteGroup(d) + } + return nil +} diff --git a/edge/log.go b/edge/log.go new file mode 100644 index 000000000..f1cfb1d44 --- /dev/null +++ b/edge/log.go @@ -0,0 +1,51 @@ +// +build debug + +package edge + +import ( + "log" + + "github.com/influxdata/kapacitor/pipeline" +) + +type logEdge struct { + e Edge + logger *log.Logger +} + +// NewLogEdge creates an edge that logs the type of all collected and emitted messages. +// +// This edge should only be used during debug sessions and not in production code. +// As such by default build tags exclude this file from being compiled. +// Add the `-tags debug` arguments to build or test commands in order to include this file for compilation. +func NewLogEdge(l *log.Logger, e Edge) Edge { + return &logEdge{ + e: e, + logger: l, + } +} + +func (e *logEdge) Collect(m Message) error { + e.logger.Println("D! collect:", m.Type()) + return e.e.Collect(m) +} + +func (e *logEdge) Emit() (m Message, ok bool) { + m, ok = e.e.Emit() + if ok { + e.logger.Println("D! emit:", m.Type()) + } + return +} + +func (e *logEdge) Close() error { + return e.e.Close() +} + +func (e *logEdge) Abort() { + e.e.Abort() +} + +func (e *logEdge) Type() pipeline.EdgeType { + return e.e.Type() +} diff --git a/edge/messages.go b/edge/messages.go new file mode 100644 index 000000000..3dbb25ea6 --- /dev/null +++ b/edge/messages.go @@ -0,0 +1,924 @@ +package edge + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "sort" + "strconv" + "time" + + imodels "github.com/influxdata/influxdb/models" + "github.com/influxdata/kapacitor/influxdb" + "github.com/influxdata/kapacitor/models" +) + +// Message represents data to be passed along an edge. +// Messages can be shared across many contexts. +// +// All messages implement their own ShallowCopy method. +// All ShallowCopy methods create a copy of the message but does not +// deeply copy any reference types. +// +// Never mutate a reference type returned from a message without first directly copying +// the reference type. +type Message interface { + // Type returns the type of the message. + Type() MessageType + //TODO(nathanielc): Explore adding a shared flag to Messages to check when they have been shared. + // Then we can enforce shared messages cannot be mutated. + //markShared() +} + +type MessageType int + +const ( + BeginBatch MessageType = iota + BatchPoint + EndBatch + BufferedBatch + Point + Barrier + DeleteGroup +) + +type GroupIDGetter interface { + GroupID() models.GroupID +} + +type GroupInfoer interface { + GroupIDGetter + GroupInfo() GroupInfo +} + +type NameGetter interface { + Name() string +} +type NameSetter interface { + NameGetter + SetName(string) +} + +type DimensionGetter interface { + Dimensions() models.Dimensions +} +type DimensionSetter interface { + DimensionGetter + SetDimensions(models.Dimensions) +} + +type TimeGetter interface { + Time() time.Time +} +type TimeSetter interface { + TimeGetter + SetTime(time.Time) +} + +type FieldGetter interface { + Fields() models.Fields +} +type FieldSetter interface { + FieldGetter + SetFields(models.Fields) +} + +type TagGetter interface { + Tags() models.Tags +} +type TagSetter interface { + TagGetter + SetTags(models.Tags) +} + +type FieldsTagsTimeSetter interface { + FieldSetter + TagSetter + TimeSetter +} + +type FieldsTagsTimeGetter interface { + FieldGetter + TagGetter + TimeGetter +} + +type FieldsTagsTimeGetterMessage interface { + Message + FieldsTagsTimeGetter +} + +// PointMeta is the common read interfaces of point and batch messages. +type PointMeta interface { + NameGetter + GroupInfoer + DimensionGetter + TagGetter + TimeGetter +} + +func (m MessageType) String() string { + switch m { + case BeginBatch: + return "begin_batch" + case BatchPoint: + return "batch_point" + case EndBatch: + return "end_batch" + case BufferedBatch: + return "buffered_batch" + case Point: + return "point" + case Barrier: + return "barrier" + default: + return fmt.Sprintf("unknown message type %d", int(m)) + } +} + +// PointMessage is a single point. +type PointMessage interface { + Message + + ShallowCopy() PointMessage + + NameSetter + + Database() string + SetDatabase(string) + RetentionPolicy() string + SetRetentionPolicy(string) + + GroupInfoer + + DimensionSetter + SetTagsAndDimensions(models.Tags, models.Dimensions) + + FieldsTagsTimeSetter + + Bytes(precision string) []byte + + ToResult() models.Result + ToRow() *models.Row +} + +type pointMessage struct { + name string + database string + retentionPolicy string + + groupID models.GroupID + dimensions models.Dimensions + + tags models.Tags + + fields models.Fields + + time time.Time +} + +func NewPointMessage( + name, + database, + retentionPolicy string, + dimensions models.Dimensions, + fields models.Fields, + tags models.Tags, + time time.Time) PointMessage { + pm := &pointMessage{ + name: name, + database: database, + retentionPolicy: retentionPolicy, + dimensions: dimensions, + groupID: models.ToGroupID(name, tags, dimensions), + tags: tags, + fields: fields, + time: time, + } + return pm +} + +func (pm *pointMessage) ShallowCopy() PointMessage { + c := new(pointMessage) + *c = *pm + return c +} + +func (*pointMessage) Type() MessageType { + return Point +} + +func (pm *pointMessage) Name() string { + return pm.name +} +func (pm *pointMessage) SetName(name string) { + pm.name = name + pm.groupID = models.ToGroupID(pm.name, pm.tags, pm.dimensions) +} +func (pm *pointMessage) Database() string { + return pm.database +} +func (pm *pointMessage) SetDatabase(database string) { + pm.database = database +} +func (pm *pointMessage) RetentionPolicy() string { + return pm.retentionPolicy +} +func (pm *pointMessage) SetRetentionPolicy(retentionPolicy string) { + pm.retentionPolicy = retentionPolicy +} +func (pm *pointMessage) GroupID() models.GroupID { + return pm.groupID +} +func (pm *pointMessage) GroupInfo() GroupInfo { + tags := make(models.Tags, len(pm.dimensions.TagNames)) + for _, t := range pm.dimensions.TagNames { + tags[t] = pm.tags[t] + } + return GroupInfo{ + ID: pm.groupID, + Tags: tags, + Dimensions: pm.dimensions, + } +} + +func (pm *pointMessage) Dimensions() models.Dimensions { + return pm.dimensions +} +func (pm *pointMessage) SetDimensions(dimensions models.Dimensions) { + if !pm.dimensions.Equal(dimensions) { + pm.dimensions = dimensions + pm.groupID = models.ToGroupID(pm.name, pm.tags, pm.dimensions) + } +} +func (pm *pointMessage) Tags() models.Tags { + return pm.tags +} +func (pm *pointMessage) SetTags(tags models.Tags) { + pm.tags = tags + pm.groupID = models.ToGroupID(pm.name, pm.tags, pm.dimensions) +} + +func (pm *pointMessage) SetTagsAndDimensions(tags models.Tags, dimensions models.Dimensions) { + pm.dimensions = dimensions + pm.tags = tags + pm.groupID = models.ToGroupID(pm.name, pm.tags, pm.dimensions) +} +func (pm *pointMessage) Fields() models.Fields { + return pm.fields +} +func (pm *pointMessage) SetFields(fields models.Fields) { + pm.fields = fields +} +func (pm *pointMessage) Time() time.Time { + return pm.time +} +func (pm *pointMessage) SetTime(time time.Time) { + pm.time = time +} + +// Returns byte array of a line protocol representation of the point +func (pm *pointMessage) Bytes(precision string) []byte { + key := imodels.MakeKey([]byte(pm.name), imodels.NewTags(pm.tags)) + fields := imodels.Fields(pm.fields).MarshalBinary() + kl := len(key) + fl := len(fields) + var bytes []byte + + if pm.time.IsZero() { + bytes = make([]byte, fl+kl+1) + copy(bytes, key) + bytes[kl] = ' ' + copy(bytes[kl+1:], fields) + } else { + timeStr := strconv.FormatInt(pm.time.UnixNano()/imodels.GetPrecisionMultiplier(precision), 10) + tl := len(timeStr) + bytes = make([]byte, fl+kl+tl+2) + copy(bytes, key) + bytes[kl] = ' ' + copy(bytes[kl+1:], fields) + bytes[kl+fl+1] = ' ' + copy(bytes[kl+fl+2:], []byte(timeStr)) + } + + return bytes +} + +func (pm *pointMessage) ToResult() models.Result { + return models.Result{ + Series: models.Rows{pm.ToRow()}, + } +} +func (pm *pointMessage) ToRow() *models.Row { + row := &models.Row{ + Name: pm.name, + Tags: pm.tags, + } + row.Columns = make([]string, len(pm.fields)+1) + row.Columns[0] = "time" + i := 1 + for f := range pm.fields { + row.Columns[i] = f + i++ + } + // Sort all columns but leave time as first + sort.Strings(row.Columns[1:]) + + row.Values = make([][]interface{}, 1) + row.Values[0] = make([]interface{}, len(row.Columns)) + row.Values[0][0] = pm.time + for i, c := range row.Columns[1:] { + if v, ok := pm.fields[c]; ok { + row.Values[0][i+1] = v + } + } + return row +} + +type pointMessageJSON struct { + Name string `json:"name,omitempty"` + Database string `json:"database,omitempty"` + RetentionPolicy string `json:"retentionPolicy,omitempty"` + Group models.GroupID `json:"group,omitempty"` + Dimensions models.Dimensions `json:"dimensions,omitempty"` + Fields models.Fields `json:"fields,omitempty"` + Tags models.Tags `json:"tags,omitempty"` + Time time.Time `json:"time,omitempty"` +} + +func (pm *pointMessage) MarshalJSON() ([]byte, error) { + p := pointMessageJSON{ + Name: pm.name, + Database: pm.database, + RetentionPolicy: pm.retentionPolicy, + Group: pm.groupID, + Dimensions: pm.dimensions, + Fields: pm.fields, + Tags: pm.tags, + Time: pm.time, + } + return json.Marshal(p) +} + +// BeginBatchMessage marks the beginning of a batch of points. +// Once a BeginBatchMessage is received all subsequent message will be BatchPointMessages until an EndBatchMessage is received. +type BeginBatchMessage interface { + Message + + ShallowCopy() BeginBatchMessage + + NameSetter + + GroupInfoer + TagSetter + DimensionSetter + SetTagsAndDimensions(models.Tags, models.Dimensions) + + // Time is the maximum time of any point in the batch + TimeSetter + + // SizeHint provides a hint about the size of the batch to come. + // If non-zero expect a batch with SizeHint points, + // otherwise an unknown number of points are coming. + SizeHint() int + SetSizeHint(int) +} + +type beginBatchMessage struct { + name string + groupID models.GroupID + tags models.Tags + dimensions models.Dimensions + tmax time.Time + // If non-zero expect a batch with SizeHint points, + // otherwise an unknown number of points are coming. + sizeHint int +} + +func NewBeginBatchMessage( + name string, + tags models.Tags, + byName bool, + tmax time.Time, + sizeHint int, +) BeginBatchMessage { + dimensions := models.Dimensions{ + TagNames: models.SortedKeys(tags), + ByName: byName, + } + groupID := models.ToGroupID(name, tags, dimensions) + bb := &beginBatchMessage{ + name: name, + tags: tags, + dimensions: dimensions, + groupID: groupID, + tmax: tmax, + sizeHint: sizeHint, + } + return bb +} + +func (beginBatchMessage) Type() MessageType { + return BeginBatch +} + +func (bb *beginBatchMessage) ShallowCopy() BeginBatchMessage { + c := new(beginBatchMessage) + *c = *bb + return c +} + +func (bb *beginBatchMessage) Name() string { + return bb.name +} +func (bb *beginBatchMessage) SetName(name string) { + bb.name = name + bb.groupID = models.ToGroupID(bb.name, bb.tags, bb.dimensions) +} +func (bb *beginBatchMessage) GroupID() models.GroupID { + return bb.groupID +} +func (bb *beginBatchMessage) GroupInfo() GroupInfo { + return GroupInfo{ + ID: bb.groupID, + Tags: bb.tags, + Dimensions: bb.dimensions, + } +} +func (bb *beginBatchMessage) Tags() models.Tags { + return bb.tags +} + +// SetTags updates the tags on the message. +// The dimensions are also updated to reflect the new tags. +func (bb *beginBatchMessage) SetTags(tags models.Tags) { + bb.tags = tags + bb.dimensions.TagNames = models.SortedKeys(tags) + bb.groupID = models.ToGroupID(bb.name, bb.tags, bb.dimensions) +} + +func (bb *beginBatchMessage) Dimensions() models.Dimensions { + return bb.dimensions +} + +// SetDimensions updates the dimensions on the message. +// The tags are updated to reflect the new dimensions. +// If new dimensions are being added use SetTags instead as the dimensions will be automatically updated. +func (bb *beginBatchMessage) SetDimensions(dimensions models.Dimensions) { + if !bb.dimensions.Equal(dimensions) { + bb.SetTagsAndDimensions(bb.tags, dimensions) + } +} + +// SetTagsAndDimensions updates both tags and dimensions at the same time. +// The tags will be updated to make sure they match the new dimensions. +func (bb *beginBatchMessage) SetTagsAndDimensions(tags models.Tags, dimensions models.Dimensions) { + newTags := make(models.Tags, len(tags)) + for _, dim := range dimensions.TagNames { + newTags[dim] = tags[dim] + } + bb.tags = newTags + bb.dimensions = dimensions + bb.groupID = models.ToGroupID(bb.name, bb.tags, bb.dimensions) +} +func (bb *beginBatchMessage) Time() time.Time { + return bb.tmax +} +func (bb *beginBatchMessage) SetTime(tmax time.Time) { + bb.tmax = tmax +} + +func (bb *beginBatchMessage) SizeHint() int { + return bb.sizeHint +} +func (bb *beginBatchMessage) SetSizeHint(sizeHint int) { + bb.sizeHint = sizeHint +} + +// BatchPointMessage is a single point in a batch of data. +type BatchPointMessage interface { + Message + + ShallowCopy() BatchPointMessage + + FieldsTagsTimeSetter +} + +type batchPointMessage struct { + fields models.Fields + tags models.Tags + time time.Time +} + +func NewBatchPointMessage( + fields models.Fields, + tags models.Tags, + time time.Time, +) BatchPointMessage { + return &batchPointMessage{ + fields: fields, + tags: tags, + time: time, + } +} + +func (*batchPointMessage) Type() MessageType { + return BatchPoint +} +func (bp *batchPointMessage) ShallowCopy() BatchPointMessage { + c := new(batchPointMessage) + *c = *bp + return c +} + +func (bp *batchPointMessage) Fields() models.Fields { + return bp.fields +} +func (bp *batchPointMessage) SetFields(fields models.Fields) { + bp.fields = fields +} +func (bp *batchPointMessage) Tags() models.Tags { + return bp.tags +} +func (bp *batchPointMessage) SetTags(tags models.Tags) { + bp.tags = tags +} +func (bp *batchPointMessage) Time() time.Time { + return bp.time +} +func (bp *batchPointMessage) SetTime(time time.Time) { + bp.time = time +} + +func BatchPointFromPoint(p PointMessage) BatchPointMessage { + return NewBatchPointMessage( + p.Fields(), + p.Tags(), + p.Time(), + ) +} + +// EndBatchMessage indicates that all points for a batch have arrived. +type EndBatchMessage interface { + Message + + ShallowCopy() EndBatchMessage +} + +type endBatchMessage struct { +} + +func NewEndBatchMessage() EndBatchMessage { + return &endBatchMessage{} +} + +func (*endBatchMessage) Type() MessageType { + return EndBatch +} +func (eb *endBatchMessage) ShallowCopy() EndBatchMessage { + c := new(endBatchMessage) + *c = *eb + return c +} + +// BufferedBatchMessage is a message containing all data for a single batch. +type BufferedBatchMessage interface { + Message + + ShallowCopy() BufferedBatchMessage + + Begin() BeginBatchMessage + SetBegin(BeginBatchMessage) + + // Expose common read interfaces of begin and point messages. + PointMeta + + Points() []BatchPointMessage + SetPoints([]BatchPointMessage) + + End() EndBatchMessage + SetEnd(EndBatchMessage) + + ToResult() models.Result + ToRow() *models.Row +} + +type bufferedBatchMessage struct { + begin BeginBatchMessage + points []BatchPointMessage + end EndBatchMessage +} + +func NewBufferedBatchMessage( + begin BeginBatchMessage, + points []BatchPointMessage, + end EndBatchMessage, +) BufferedBatchMessage { + return &bufferedBatchMessage{ + begin: begin, + points: points, + end: end, + } +} + +func (*bufferedBatchMessage) Type() MessageType { + return BufferedBatch +} +func (bb *bufferedBatchMessage) ShallowCopy() BufferedBatchMessage { + c := new(bufferedBatchMessage) + *c = *bb + return c +} +func (bb *bufferedBatchMessage) Begin() BeginBatchMessage { + return bb.begin +} +func (bb *bufferedBatchMessage) SetBegin(begin BeginBatchMessage) { + bb.begin = begin +} + +func (bb *bufferedBatchMessage) Name() string { + return bb.begin.Name() +} +func (bb *bufferedBatchMessage) GroupID() models.GroupID { + return bb.begin.GroupID() +} +func (bb *bufferedBatchMessage) GroupInfo() GroupInfo { + return bb.begin.GroupInfo() +} +func (bb *bufferedBatchMessage) Dimensions() models.Dimensions { + return bb.begin.Dimensions() +} +func (bb *bufferedBatchMessage) Tags() models.Tags { + return bb.begin.Tags() +} +func (bb *bufferedBatchMessage) Time() time.Time { + return bb.begin.Time() +} + +func (bb *bufferedBatchMessage) Points() []BatchPointMessage { + return bb.points +} +func (bb *bufferedBatchMessage) SetPoints(points []BatchPointMessage) { + bb.points = points +} +func (bb *bufferedBatchMessage) End() EndBatchMessage { + return bb.end +} +func (bb *bufferedBatchMessage) SetEnd(end EndBatchMessage) { + bb.end = end +} + +func (bb *bufferedBatchMessage) ToResult() models.Result { + return models.Result{ + Series: models.Rows{bb.ToRow()}, + } +} +func (bb *bufferedBatchMessage) ToRow() (row *models.Row) { + row = &models.Row{ + Name: bb.begin.Name(), + Tags: bb.begin.Tags(), + } + if len(bb.points) == 0 { + return + } + row.Columns = []string{"time"} + p := bb.points[0] + for f := range p.Fields() { + row.Columns = append(row.Columns, f) + } + // Append tags that are not on the batch + for t := range p.Tags() { + if _, ok := bb.begin.Tags()[t]; !ok { + row.Columns = append(row.Columns, t) + } + } + // Sort all columns but leave time as first + sort.Strings(row.Columns[1:]) + row.Values = make([][]interface{}, len(bb.points)) + for i, p := range bb.points { + row.Values[i] = make([]interface{}, len(row.Columns)) + row.Values[i][0] = p.Time() + for j, c := range row.Columns[1:] { + if v, ok := p.Fields()[c]; ok { + row.Values[i][j+1] = v + } else if v, ok := p.Tags()[c]; ok { + row.Values[i][j+1] = v + } + } + } + return +} + +type bufferedBatchMessageJSON struct { + Name string `json:"name,omitempty"` + TMax time.Time `json:"tmax,omitempty"` + Group models.GroupID `json:"group,omitempty"` + ByName bool `json:"byname,omitempty"` + Tags models.Tags `json:"tags,omitempty"` + Points []batchPointMessageJSON `json:"points,omitempty"` +} + +type batchPointMessageJSON struct { + Fields models.Fields `json:"fields"` + Tags models.Tags `json:"tags"` + Time time.Time `json:"time"` +} + +type BufferedBatchMessageDecoder interface { + Decode() (BufferedBatchMessage, error) + More() bool +} + +type bufferedBatchMessageDecoder struct { + dec *json.Decoder +} + +func (d *bufferedBatchMessageDecoder) More() bool { + return d.dec.More() +} + +func (d *bufferedBatchMessageDecoder) Decode() (BufferedBatchMessage, error) { + bb := &bufferedBatchMessage{ + begin: new(beginBatchMessage), + end: new(endBatchMessage), + } + err := d.dec.Decode(bb) + return bb, err +} + +func NewBufferedBatchMessageDecoder(r io.Reader) BufferedBatchMessageDecoder { + return &bufferedBatchMessageDecoder{ + dec: json.NewDecoder(r), + } +} + +func (bb *bufferedBatchMessage) MarshalJSON() ([]byte, error) { + b := &bufferedBatchMessageJSON{ + Name: bb.begin.Name(), + TMax: bb.begin.Time(), + Group: bb.begin.GroupID(), + ByName: bb.begin.Dimensions().ByName, + Tags: bb.begin.Tags(), + Points: make([]batchPointMessageJSON, len(bb.points)), + } + for i := range b.Points { + b.Points[i] = batchPointMessageJSON{ + Fields: bb.points[i].Fields(), + Tags: bb.points[i].Tags(), + Time: bb.points[i].Time(), + } + } + return json.Marshal(b) +} + +func (bb *bufferedBatchMessage) UnmarshalJSON(data []byte) error { + b := new(bufferedBatchMessageJSON) + json.Unmarshal(data, &b) + bb.begin.SetName(b.Name) + bb.begin.SetTags(b.Tags) + dims := bb.begin.Dimensions() + dims.ByName = b.ByName + bb.begin.SetDimensions(dims) + bb.begin.SetTime(b.TMax.UTC()) + bb.begin.SetSizeHint(len(b.Points)) + bb.points = make([]BatchPointMessage, len(b.Points)) + for i := range bb.points { + tags := b.Points[i].Tags + if len(tags) == 0 { + tags = b.Tags + } + bb.points[i] = NewBatchPointMessage( + b.Points[i].Fields, + tags, + b.Points[i].Time.UTC(), + ) + } + return nil +} + +func ResultToBufferedBatches(res influxdb.Result, groupByName bool) ([]BufferedBatchMessage, error) { + if res.Err != "" { + return nil, errors.New(res.Err) + } + batches := make([]BufferedBatchMessage, 0, len(res.Series)) + for _, series := range res.Series { + b := NewBufferedBatchMessage( + NewBeginBatchMessage( + series.Name, + series.Tags, + groupByName, + time.Time{}, + len(series.Values), + ), + make([]BatchPointMessage, 0, len(series.Values)), + NewEndBatchMessage(), + ) + points := b.Points() + + for _, v := range series.Values { + fields := make(models.Fields) + var t time.Time + for i, c := range series.Columns { + if c == "time" { + tStr, ok := v[i].(string) + if !ok { + return nil, fmt.Errorf("unexpected time value: %v", v[i]) + } + var err error + t, err = time.Parse(time.RFC3339Nano, tStr) + if err != nil { + t, err = time.Parse(time.RFC3339, tStr) + if err != nil { + return nil, fmt.Errorf("unexpected time format: %v", err) + } + } + } else { + value := v[i] + if n, ok := value.(json.Number); ok { + f, err := n.Float64() + if err == nil { + value = f + } + } + if value == nil { + continue + } + fields[c] = value + } + } + if len(fields) > 0 { + if t.After(b.Begin().Time()) { + b.Begin().SetTime(t.UTC()) + } + points = append( + points, + NewBatchPointMessage( + fields, + series.Tags, + t.UTC(), + ), + ) + } + } + b.Begin().SetSizeHint(len(points)) + b.SetPoints(points) + batches = append(batches, b) + } + return batches, nil +} + +type BatchPointMessages []BatchPointMessage + +func (l BatchPointMessages) Len() int { return len(l) } +func (l BatchPointMessages) Less(i int, j int) bool { return l[i].Time().Before(l[j].Time()) } +func (l BatchPointMessages) Swap(i int, j int) { l[i], l[j] = l[j], l[i] } + +// BarrierMessage indicates that no data older than the barrier time will arrive. +type BarrierMessage interface { + Message + ShallowCopy() BarrierMessage + TimeSetter +} +type barrierMessage struct { + time time.Time +} + +func NewBarrierMessage(time time.Time) BarrierMessage { + return &barrierMessage{ + time: time, + } +} + +func (b *barrierMessage) ShallowCopy() BarrierMessage { + c := new(barrierMessage) + *c = *b + return c +} + +func (*barrierMessage) Type() MessageType { + return Barrier +} +func (b *barrierMessage) Time() time.Time { + return b.time +} +func (b *barrierMessage) SetTime(time time.Time) { + b.time = time +} + +type DeleteGroupMessage interface { + Message + GroupIDGetter +} + +type deleteGroupMessage struct { + groupID models.GroupID +} + +func (d *deleteGroupMessage) Type() MessageType { + return DeleteGroup +} + +func (d *deleteGroupMessage) GroupID() models.GroupID { + return d.groupID +} diff --git a/edge/stats.go b/edge/stats.go new file mode 100644 index 000000000..9a6925c62 --- /dev/null +++ b/edge/stats.go @@ -0,0 +1,230 @@ +package edge + +import ( + "sync" + + expvar "github.com/influxdata/kapacitor/expvar" + "github.com/influxdata/kapacitor/models" + "github.com/influxdata/kapacitor/pipeline" +) + +// StatsEdge is an edge that tracks various statistics about message passing through the edge. +type StatsEdge interface { + Edge + // Collected returns the number of messages collected by this edge. + Collected() int64 + // Emitted returns the number of messages emitted by this edge. + Emitted() int64 + // CollectedVar is an exported var the represents the number of messages collected by this edge. + CollectedVar() expvar.IntVar + // EmittedVar is an exported var the represents the number of messages emitted by this edge. + EmittedVar() expvar.IntVar + // ReadGroupStats allows for the reading of the current statistics by group. + ReadGroupStats(func(*GroupStats)) +} + +// GroupStats represents the statistics for a specific group. +type GroupStats struct { + GroupInfo GroupInfo + Collected int64 + Emitted int64 +} + +// NewStatsEdge creates an edge that tracks statistics about the message passing through the edge. +func NewStatsEdge(e Edge) StatsEdge { + switch e.Type() { + case pipeline.StreamEdge: + return &streamStatsEdge{ + statsEdge: statsEdge{ + edge: e, + groupStats: make(map[models.GroupID]*GroupStats), + collected: new(expvar.Int), + emitted: new(expvar.Int), + }, + } + case pipeline.BatchEdge: + return &batchStatsEdge{ + statsEdge: statsEdge{ + edge: e, + groupStats: make(map[models.GroupID]*GroupStats), + collected: new(expvar.Int), + emitted: new(expvar.Int), + }, + } + } + return nil +} + +type statsEdge struct { + edge Edge + + collected *expvar.Int + emitted *expvar.Int + + mu sync.RWMutex + groupStats map[models.GroupID]*GroupStats +} + +func (e *statsEdge) Collected() int64 { + return e.collected.IntValue() +} +func (e *statsEdge) Emitted() int64 { + return e.emitted.IntValue() +} + +func (e *statsEdge) CollectedVar() expvar.IntVar { + return e.collected +} +func (e *statsEdge) EmittedVar() expvar.IntVar { + return e.emitted +} + +func (e *statsEdge) Close() error { + return e.edge.Close() +} +func (e *statsEdge) Abort() { + e.edge.Abort() +} + +// ReadGroupStats calls f for each of the group stats. +func (e *statsEdge) ReadGroupStats(f func(groupStat *GroupStats)) { + e.mu.RLock() + defer e.mu.RUnlock() + for _, stats := range e.groupStats { + f(stats) + } +} + +func (e *statsEdge) incCollected(group models.GroupID, infoF func() GroupInfo, count int64) { + // Manually unlock below as defer was too much of a performance hit + e.mu.Lock() + + if stats, ok := e.groupStats[group]; ok { + stats.Collected += count + } else { + stats = &GroupStats{ + Collected: count, + GroupInfo: infoF(), + } + e.groupStats[group] = stats + } + e.mu.Unlock() +} + +// Increment the emitted count of the group for this edge. +func (e *statsEdge) incEmitted(group models.GroupID, infoF func() GroupInfo, count int64) { + // Manually unlock below as defer was too much of a performance hit + e.mu.Lock() + + if stats, ok := e.groupStats[group]; ok { + stats.Emitted += count + } else { + stats = &GroupStats{ + Emitted: count, + GroupInfo: infoF(), + } + e.groupStats[group] = stats + } + e.mu.Unlock() +} + +type batchStatsEdge struct { + statsEdge + + currentCollectGroup GroupInfo + currentEmitGroup GroupInfo + collectSize int64 + emitSize int64 +} + +func (e *batchStatsEdge) Collect(m Message) error { + if err := e.edge.Collect(m); err != nil { + return err + } + switch b := m.(type) { + case BeginBatchMessage: + g := b.GroupInfo() + e.currentCollectGroup = g + e.collectSize = 0 + case BatchPointMessage: + e.collectSize++ + case EndBatchMessage: + e.collected.Add(1) + e.incCollected( + e.currentCollectGroup.ID, + func() GroupInfo { return e.currentCollectGroup }, + e.collectSize, + ) + case BufferedBatchMessage: + e.collected.Add(1) + begin := b.Begin() + e.incCollected(begin.GroupID(), begin.GroupInfo, int64(len(b.Points()))) + default: + // Do not count other messages + // TODO(nathanielc): How should we count other messages? + } + return nil +} + +func (e *batchStatsEdge) Emit() (m Message, ok bool) { + m, ok = e.edge.Emit() + if ok { + switch b := m.(type) { + case BeginBatchMessage: + g := b.GroupInfo() + e.currentEmitGroup = g + e.emitSize = 0 + case BatchPointMessage: + e.emitSize++ + case EndBatchMessage: + e.emitted.Add(1) + e.incEmitted( + e.currentEmitGroup.ID, + func() GroupInfo { return e.currentEmitGroup }, + e.emitSize, + ) + case BufferedBatchMessage: + e.emitted.Add(1) + begin := b.Begin() + e.incEmitted(begin.GroupID(), begin.GroupInfo, int64(len(b.Points()))) + default: + // Do not count other messages + // TODO(nathanielc): How should we count other messages? + } + } + return +} + +func (e *batchStatsEdge) Type() pipeline.EdgeType { + return e.edge.Type() +} + +type streamStatsEdge struct { + statsEdge +} + +func (e *streamStatsEdge) Collect(m Message) error { + if err := e.edge.Collect(m); err != nil { + return err + } + if m.Type() == Point { + e.collected.Add(1) + p := m.(GroupInfoer) + e.incCollected(p.GroupID(), p.GroupInfo, 1) + } + return nil +} + +func (e *streamStatsEdge) Emit() (m Message, ok bool) { + m, ok = e.edge.Emit() + if ok && m.Type() == Point { + e.emitted.Add(1) + p := m.(GroupInfoer) + e.incEmitted(p.GroupID(), p.GroupInfo, 1) + } + return +} + +func (e *streamStatsEdge) Type() pipeline.EdgeType { + return e.edge.Type() +} diff --git a/edge/timed.go b/edge/timed.go new file mode 100644 index 000000000..aa3ccb8a0 --- /dev/null +++ b/edge/timed.go @@ -0,0 +1,78 @@ +package edge + +import "github.com/influxdata/kapacitor/timer" + +type timedForwardReceiver struct { + timer timer.Timer + r ForwardReceiver +} +type timedForwardBufferedReceiver struct { + timedForwardReceiver + b ForwardBufferedReceiver +} + +// NewTimedForwardReceiver creates a forward receiver which times the time spent in r. +func NewTimedForwardReceiver(t timer.Timer, r ForwardReceiver) ForwardReceiver { + b, ok := r.(ForwardBufferedReceiver) + if ok { + return &timedForwardBufferedReceiver{ + timedForwardReceiver: timedForwardReceiver{ + timer: t, + r: r, + }, + b: b, + } + } + return &timedForwardReceiver{ + timer: t, + r: r, + } +} + +func (tr *timedForwardReceiver) BeginBatch(begin BeginBatchMessage) (m Message, err error) { + tr.timer.Start() + m, err = tr.r.BeginBatch(begin) + tr.timer.Stop() + return +} + +func (tr *timedForwardReceiver) BatchPoint(bp BatchPointMessage) (m Message, err error) { + tr.timer.Start() + m, err = tr.r.BatchPoint(bp) + tr.timer.Stop() + return +} + +func (tr *timedForwardReceiver) EndBatch(end EndBatchMessage) (m Message, err error) { + tr.timer.Start() + m, err = tr.r.EndBatch(end) + tr.timer.Stop() + return +} + +func (tr *timedForwardBufferedReceiver) BufferedBatch(batch BufferedBatchMessage) (m Message, err error) { + tr.timer.Start() + m, err = tr.b.BufferedBatch(batch) + tr.timer.Stop() + return +} + +func (tr *timedForwardReceiver) Point(p PointMessage) (m Message, err error) { + tr.timer.Start() + m, err = tr.r.Point(p) + tr.timer.Stop() + return +} + +func (tr *timedForwardReceiver) Barrier(b BarrierMessage) (m Message, err error) { + tr.timer.Start() + m, err = tr.r.Barrier(b) + tr.timer.Stop() + return +} +func (tr *timedForwardReceiver) DeleteGroup(d DeleteGroupMessage) (m Message, err error) { + tr.timer.Start() + m, err = tr.r.DeleteGroup(d) + tr.timer.Stop() + return +} diff --git a/edge_test.go b/edge_test.go deleted file mode 100644 index 2a112e871..000000000 --- a/edge_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package kapacitor - -import ( - "log" - "os" - "testing" - - "github.com/influxdata/kapacitor/models" - "github.com/influxdata/kapacitor/pipeline" - "github.com/influxdata/wlog" -) - -func BenchmarkCollectPoint(b *testing.B) { - name := "point" - b.ReportAllocs() - ls := &logService{} - e := newEdge("BCollectPoint", "parent", "child", pipeline.StreamEdge, defaultEdgeBufferSize, ls) - p := models.Point{ - Name: name, - Tags: models.Tags{ - "tag1": "value1", - "tag2": "value2", - "tag3": "value3", - "tag4": "value4", - }, - Group: models.ToGroupID(name, nil, models.Dimensions{}), - Fields: models.Fields{ - "field1": 42, - "field2": 4.2, - "field3": 49, - "field4": 4.9, - }, - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - e.CollectPoint(p) - e.NextPoint() - } - }) -} - -type logService struct{} - -func (l *logService) NewLogger(prefix string, flag int) *log.Logger { - return wlog.New(os.Stderr, prefix, flag) -} diff --git a/eval.go b/eval.go index a68373595..a3ee5dbc7 100644 --- a/eval.go +++ b/eval.go @@ -4,9 +4,8 @@ import ( "errors" "fmt" "log" - "sync" - "time" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" @@ -16,14 +15,11 @@ import ( type EvalNode struct { node - e *pipeline.EvalNode - expressions []stateful.Expression - expressionsByGroup map[models.GroupID][]stateful.Expression - refVarList [][]string - scopePool stateful.ScopePool - tags map[string]bool - - expressionsByGroupMu sync.RWMutex + e *pipeline.EvalNode + expressions []stateful.Expression + refVarList [][]string + scopePool stateful.ScopePool + tags map[string]bool evalErrors *expvar.Int } @@ -34,9 +30,8 @@ func newEvalNode(et *ExecutingTask, n *pipeline.EvalNode, l *log.Logger) (*EvalN return nil, errors.New("must provide one name per expression via the 'As' property") } en := &EvalNode{ - node: node{Node: n, et: et, logger: l}, - e: n, - expressionsByGroup: make(map[models.GroupID][]stateful.Expression), + node: node{Node: n, et: et, logger: l}, + e: n, } // Create stateful expressions @@ -68,155 +63,166 @@ func newEvalNode(et *ExecutingTask, n *pipeline.EvalNode, l *log.Logger) (*EvalN return en, nil } -func (e *EvalNode) runEval(snapshot []byte) error { - valueF := func() int64 { - e.expressionsByGroupMu.RLock() - l := len(e.expressionsByGroup) - e.expressionsByGroupMu.RUnlock() - return int64(l) +func (n *EvalNode) runEval(snapshot []byte) error { + consumer := edge.NewGroupedConsumer( + n.ins[0], + n, + ) + n.statMap.Set(statCardinalityGauge, consumer.CardinalityVar()) + + return consumer.Consume() + +} + +func (n *EvalNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) { + return edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n.newGroup()), + ), nil +} + +func (n *EvalNode) newGroup() *evalGroup { + expressions := make([]stateful.Expression, len(n.expressions)) + for i, exp := range n.expressions { + expressions[i] = exp.CopyReset() } - e.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - - switch e.Provides() { - case pipeline.StreamEdge: - var err error - for p, ok := e.ins[0].NextPoint(); ok; p, ok = e.ins[0].NextPoint() { - e.timer.Start() - p.Fields, p.Tags, err = e.eval(p.Time, p.Group, p.Fields, p.Tags) - if err != nil { - e.incrementErrorCount() - if !e.e.QuietFlag { - e.logger.Println("E!", err) - } - e.timer.Stop() - // Skip bad point - continue - } - e.timer.Stop() - for _, child := range e.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } - } - case pipeline.BatchEdge: - var err error - for b, ok := e.ins[0].NextBatch(); ok; b, ok = e.ins[0].NextBatch() { - e.timer.Start() - b.Points = b.ShallowCopyPoints() - for i := 0; i < len(b.Points); { - p := b.Points[i] - b.Points[i].Fields, b.Points[i].Tags, err = e.eval(p.Time, b.Group, p.Fields, p.Tags) - if err != nil { - e.incrementErrorCount() - if !e.e.QuietFlag { - e.logger.Println("E!", err) - } - // Remove bad point - b.Points = append(b.Points[:i], b.Points[i+1:]...) - } else { - i++ - } - } - e.timer.Stop() - for _, child := range e.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } - } + return &evalGroup{ + n: n, + expressions: expressions, } - return nil } -func (e *EvalNode) eval(now time.Time, group models.GroupID, fields models.Fields, tags models.Tags) (models.Fields, models.Tags, error) { - vars := e.scopePool.Get() - defer e.scopePool.Put(vars) - e.expressionsByGroupMu.RLock() - expressions, ok := e.expressionsByGroup[group] - e.expressionsByGroupMu.RUnlock() - if !ok { - expressions = make([]stateful.Expression, len(e.expressions)) - for i, exp := range e.expressions { - expressions[i] = exp.CopyReset() - } - e.expressionsByGroupMu.Lock() - e.expressionsByGroup[group] = expressions - e.expressionsByGroupMu.Unlock() - } +func (n *EvalNode) eval(expressions []stateful.Expression, p edge.FieldsTagsTimeSetter) error { + + vars := n.scopePool.Get() + defer n.scopePool.Put(vars) + for i, expr := range expressions { - err := fillScope(vars, e.refVarList[i], now, fields, tags) + err := fillScope(vars, n.refVarList[i], p) if err != nil { - return nil, nil, err + return err } v, err := expr.Eval(vars) if err != nil { - return nil, nil, err + return err } - name := e.e.AsList[i] + name := n.e.AsList[i] vars.Set(name, v) } + fields := p.Fields() + tags := p.Tags() newTags := tags - if len(e.tags) > 0 { + if len(n.tags) > 0 { newTags = newTags.Copy() - for tag := range e.tags { + for tag := range n.tags { v, err := vars.Get(tag) if err != nil { - return nil, nil, err + return err } if s, ok := v.(string); !ok { - return nil, nil, fmt.Errorf("result of a tag expression must be of type string, got %T", v) + return fmt.Errorf("result of a tag expression must be of type string, got %T", v) } else { newTags[tag] = s } } } var newFields models.Fields - if e.e.KeepFlag { - if l := len(e.e.KeepList); l != 0 { + if n.e.KeepFlag { + if l := len(n.e.KeepList); l != 0 { newFields = make(models.Fields, l) - for _, f := range e.e.KeepList { + for _, f := range n.e.KeepList { // Try the vars scope first if vars.Has(f) { v, err := vars.Get(f) if err != nil { - return nil, nil, err + return err } newFields[f] = v } else if v, ok := fields[f]; ok { // Try the raw fields next, since it may not have been a referenced var. newFields[f] = v } else { - return nil, nil, fmt.Errorf("cannot keep field %q, field does not exist", f) + return fmt.Errorf("cannot keep field %q, field does not exist", f) } } } else { - newFields = make(models.Fields, len(fields)+len(e.e.AsList)) + newFields = make(models.Fields, len(fields)+len(n.e.AsList)) for f, v := range fields { newFields[f] = v } - for _, f := range e.e.AsList { + for _, f := range n.e.AsList { v, err := vars.Get(f) if err != nil { - return nil, nil, err + return err } newFields[f] = v } } } else { - newFields = make(models.Fields, len(e.e.AsList)-len(e.tags)) - for _, f := range e.e.AsList { - if e.tags[f] { + newFields = make(models.Fields, len(n.e.AsList)-len(n.tags)) + for _, f := range n.e.AsList { + if n.tags[f] { continue } v, err := vars.Get(f) if err != nil { - return nil, nil, err + return err } newFields[f] = v } } - return newFields, newTags, nil + p.SetFields(newFields) + p.SetTags(newTags) + return nil +} + +type evalGroup struct { + n *EvalNode + expressions []stateful.Expression +} + +func (g *evalGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + begin = begin.ShallowCopy() + begin.SetSizeHint(0) + return begin, nil +} + +func (g *evalGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + bp = bp.ShallowCopy() + if g.doEval(bp) { + return bp, nil + } + return nil, nil +} + +func (g *evalGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return end, nil +} + +func (g *evalGroup) Point(p edge.PointMessage) (edge.Message, error) { + p = p.ShallowCopy() + if g.doEval(p) { + return p, nil + } + return nil, nil +} + +func (g *evalGroup) doEval(p edge.FieldsTagsTimeSetter) bool { + err := g.n.eval(g.expressions, p) + if err != nil { + g.n.incrementErrorCount() + if !g.n.e.QuietFlag { + g.n.logger.Println("E!", err) + } + // Skip bad point + return false + } + return true +} + +func (g *evalGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (g *evalGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil } diff --git a/expr.go b/expr.go index 169a50c02..1419e0b43 100644 --- a/expr.go +++ b/expr.go @@ -2,18 +2,17 @@ package kapacitor import ( "fmt" - "time" - "github.com/influxdata/kapacitor/models" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/tick/ast" "github.com/influxdata/kapacitor/tick/stateful" ) // EvalPredicate - Evaluate a given expression as a boolean predicate against a set of fields and tags -func EvalPredicate(se stateful.Expression, scopePool stateful.ScopePool, now time.Time, fields models.Fields, tags models.Tags) (bool, error) { +func EvalPredicate(se stateful.Expression, scopePool stateful.ScopePool, p edge.FieldsTagsTimeGetter) (bool, error) { vars := scopePool.Get() defer scopePool.Put(vars) - err := fillScope(vars, scopePool.ReferenceVariables(), now, fields, tags) + err := fillScope(vars, scopePool.ReferenceVariables(), p) if err != nil { return false, err } @@ -27,7 +26,10 @@ func EvalPredicate(se stateful.Expression, scopePool stateful.ScopePool, now tim } // fillScope - given a scope and reference variables, we fill the exact variables from the now, fields and tags. -func fillScope(vars *stateful.Scope, referenceVariables []string, now time.Time, fields models.Fields, tags models.Tags) error { +func fillScope(vars *stateful.Scope, referenceVariables []string, p edge.FieldsTagsTimeGetter) error { + now := p.Time() + fields := p.Fields() + tags := p.Tags() for _, refVariableName := range referenceVariables { if refVariableName == "time" { vars.Set("time", now.Local()) diff --git a/expvar/expvar.go b/expvar/expvar.go index 00485aede..b6a23e626 100644 --- a/expvar/expvar.go +++ b/expvar/expvar.go @@ -16,14 +16,17 @@ import ( ) type IntVar interface { + expvar.Var IntValue() int64 } type FloatVar interface { + expvar.Var FloatValue() float64 } type StringVar interface { + expvar.Var StringValue() string } diff --git a/flatten.go b/flatten.go index 51f7bbefb..3189f9812 100644 --- a/flatten.go +++ b/flatten.go @@ -3,11 +3,10 @@ package kapacitor import ( "bytes" "log" - "sort" "sync" "time" - "github.com/influxdata/kapacitor/expvar" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" ) @@ -32,171 +31,160 @@ func newFlattenNode(et *ExecutingTask, n *pipeline.FlattenNode, l *log.Logger) ( return fn, nil } -type flattenStreamBuffer struct { - Time time.Time - Name string - Group models.GroupID - Dimensions models.Dimensions - Tags models.Tags - Points []models.RawPoint +func (n *FlattenNode) runFlatten([]byte) error { + consumer := edge.NewGroupedConsumer( + n.ins[0], + n, + ) + n.statMap.Set(statCardinalityGauge, consumer.CardinalityVar()) + return consumer.Consume() } -type flattenBatchBuffer struct { - Time time.Time - Name string - Group models.GroupID - Tags models.Tags - Points map[time.Time][]models.RawPoint +func (n *FlattenNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) { + t := first.Time().Round(n.f.Tolerance) + return &flattenBuffer{ + n: n, + time: t, + name: first.Name(), + groupInfo: group, + }, nil } -func (n *FlattenNode) runFlatten([]byte) error { - var mu sync.RWMutex - switch n.Wants() { - case pipeline.StreamEdge: - flattenBuffers := make(map[models.GroupID]*flattenStreamBuffer) - valueF := func() int64 { - mu.RLock() - l := len(flattenBuffers) - mu.RUnlock() - return int64(l) - } - n.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - - for p, ok := n.ins[0].NextPoint(); ok; p, ok = n.ins[0].NextPoint() { - n.timer.Start() - t := p.Time.Round(n.f.Tolerance) - mu.RLock() - currentBuf, ok := flattenBuffers[p.Group] - mu.RUnlock() - if !ok { - currentBuf = &flattenStreamBuffer{ - Time: t, - Name: p.Name, - Group: p.Group, - Dimensions: p.Dimensions, - Tags: p.PointTags(), - } - mu.Lock() - flattenBuffers[p.Group] = currentBuf - mu.Unlock() - } - rp := models.RawPoint{ - Time: t, - Fields: p.Fields, - Tags: p.Tags, - } - if t.Equal(currentBuf.Time) { - currentBuf.Points = append(currentBuf.Points, rp) - } else { - if fields, err := n.flatten(currentBuf.Points); err != nil { - return err - } else { - // Emit point - flatP := models.Point{ - Time: currentBuf.Time, - Name: currentBuf.Name, - Group: currentBuf.Group, - Dimensions: currentBuf.Dimensions, - Tags: currentBuf.Tags, - Fields: fields, - } - n.timer.Pause() - for _, out := range n.outs { - err := out.CollectPoint(flatP) - if err != nil { - return err - } - } - n.timer.Resume() - } - // Update currentBuf with new time and initial point - currentBuf.Time = t - currentBuf.Points = currentBuf.Points[0:1] - currentBuf.Points[0] = rp - } - n.timer.Stop() +type flattenBuffer struct { + n *FlattenNode + time time.Time + name string + groupInfo edge.GroupInfo + points []edge.FieldsTagsTimeGetter +} + +func (b *flattenBuffer) BeginBatch(begin edge.BeginBatchMessage) error { + b.n.timer.Start() + defer b.n.timer.Stop() + + b.name = begin.Name() + b.time = time.Time{} + if s := begin.SizeHint(); s > cap(b.points) { + b.points = make([]edge.FieldsTagsTimeGetter, 0, s) + } + + begin = begin.ShallowCopy() + begin.SetSizeHint(0) + b.n.timer.Pause() + err := edge.Forward(b.n.outs, begin) + b.n.timer.Resume() + return err +} + +func (b *flattenBuffer) BatchPoint(bp edge.BatchPointMessage) error { + b.n.timer.Start() + defer b.n.timer.Stop() + + t := bp.Time().Round(b.n.f.Tolerance) + bp = bp.ShallowCopy() + bp.SetTime(t) + + t, fields, err := b.addPoint(bp) + if err != nil { + return err + } + if len(fields) == 0 { + return nil + } + + return b.emitBatchPoint(t, fields) +} + +func (b *flattenBuffer) emitBatchPoint(t time.Time, fields models.Fields) error { + // Emit batch point + flatP := edge.NewBatchPointMessage( + fields, + b.groupInfo.Tags, + t, + ) + b.n.timer.Pause() + err := edge.Forward(b.n.outs, flatP) + b.n.timer.Resume() + return err +} + +func (b *flattenBuffer) EndBatch(end edge.EndBatchMessage) error { + b.n.timer.Start() + defer b.n.timer.Stop() + + if len(b.points) > 0 { + fields, err := b.n.flatten(b.points) + if err != nil { + return err } - case pipeline.BatchEdge: - allBuffers := make(map[models.GroupID]*flattenBatchBuffer) - valueF := func() int64 { - mu.RLock() - l := len(allBuffers) - mu.RUnlock() - return int64(l) + if err := b.emitBatchPoint(b.time, fields); err != nil { + return err } - n.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - - for b, ok := n.ins[0].NextBatch(); ok; b, ok = n.ins[0].NextBatch() { - n.timer.Start() - t := b.TMax.Round(n.f.Tolerance) - mu.RLock() - currentBuf, ok := allBuffers[b.Group] - mu.RUnlock() - if !ok { - currentBuf = &flattenBatchBuffer{ - Time: t, - Name: b.Name, - Group: b.Group, - Tags: b.Tags, - Points: make(map[time.Time][]models.RawPoint), - } - mu.Lock() - allBuffers[b.Group] = currentBuf - mu.Unlock() - } - if !t.Equal(currentBuf.Time) { - // Flatten/Emit old buffer - times := make(timeList, 0, len(currentBuf.Points)) - for t := range currentBuf.Points { - times = append(times, t) - } - sort.Sort(times) - flatBatch := models.Batch{ - TMax: currentBuf.Time, - Name: currentBuf.Name, - Group: currentBuf.Group, - ByName: b.ByName, - Tags: currentBuf.Tags, - } - for _, t := range times { - if fields, err := n.flatten(currentBuf.Points[t]); err != nil { - return err - } else { - flatBatch.Points = append(flatBatch.Points, models.BatchPoint{ - Time: t, - Tags: currentBuf.Tags, - Fields: fields, - }) - } - delete(currentBuf.Points, t) - } - n.timer.Pause() - for _, out := range n.outs { - err := out.CollectBatch(flatBatch) - if err != nil { - return err - } - } - n.timer.Resume() - // Update currentBuf with new time - currentBuf.Time = t - } - for _, p := range b.Points { - t := p.Time.Round(n.f.Tolerance) - currentBuf.Points[t] = append(currentBuf.Points[t], models.RawPoint{ - Time: t, - Fields: p.Fields, - Tags: p.Tags, - }) + b.points = b.points[0:0] + } + + b.n.timer.Pause() + err := edge.Forward(b.n.outs, end) + b.n.timer.Resume() + return err +} + +func (b *flattenBuffer) Point(p edge.PointMessage) error { + b.n.timer.Start() + defer b.n.timer.Stop() + + t := p.Time().Round(b.n.f.Tolerance) + p = p.ShallowCopy() + p.SetTime(t) + + t, fields, err := b.addPoint(p) + if err != nil { + return err + } + if len(fields) == 0 { + return nil + } + + // Emit point + flatP := edge.NewPointMessage( + b.name, "", "", + b.groupInfo.Dimensions, + fields, + b.groupInfo.Tags, + t, + ) + b.n.timer.Pause() + err = edge.Forward(b.n.outs, flatP) + b.n.timer.Resume() + return err +} + +func (b *flattenBuffer) addPoint(p edge.FieldsTagsTimeGetter) (next time.Time, fields models.Fields, err error) { + t := p.Time() + if !t.Equal(b.time) { + if len(b.points) > 0 { + fields, err = b.n.flatten(b.points) + if err != nil { + return } - n.timer.Stop() + next = b.time + b.points = b.points[0:0] } + // Update buffer with new time + b.time = t } - return nil + b.points = append(b.points, p) + return +} +func (b *flattenBuffer) Barrier(barrier edge.BarrierMessage) error { + return edge.Forward(b.n.outs, barrier) +} +func (b *flattenBuffer) DeleteGroup(d edge.DeleteGroupMessage) error { + return edge.Forward(b.n.outs, d) } -func (n *FlattenNode) flatten(points []models.RawPoint) (models.Fields, error) { +func (n *FlattenNode) flatten(points []edge.FieldsTagsTimeGetter) (models.Fields, error) { fields := make(models.Fields) if len(points) == 0 { return fields, nil @@ -205,8 +193,9 @@ func (n *FlattenNode) flatten(points []models.RawPoint) (models.Fields, error) { defer n.bufPool.Put(fieldPrefix) POINTS: for _, p := range points { + tags := p.Tags() for i, tag := range n.f.Dimensions { - if v, ok := p.Tags[tag]; ok { + if v, ok := tags[tag]; ok { if i > 0 { fieldPrefix.WriteString(n.f.Delimiter) } @@ -218,7 +207,7 @@ POINTS: } } l := fieldPrefix.Len() - for fname, value := range p.Fields { + for fname, value := range p.Fields() { if !n.f.DropOriginalFieldNameFlag { if l > 0 { fieldPrefix.WriteString(n.f.Delimiter) diff --git a/group_by.go b/group_by.go index 6efc36a42..8e12d6cb5 100644 --- a/group_by.go +++ b/group_by.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" @@ -14,10 +15,19 @@ import ( type GroupByNode struct { node - g *pipeline.GroupByNode - dimensions []string + g *pipeline.GroupByNode + + byName bool + tagNames []string + + begin edge.BeginBatchMessage + dimensions models.Dimensions + allDimensions bool - byName bool + + mu sync.RWMutex + lastTime time.Time + groups map[models.GroupID]edge.BufferedBatchMessage } // Create a new GroupByNode which splits the stream dynamically based on the specified dimensions. @@ -25,101 +35,130 @@ func newGroupByNode(et *ExecutingTask, n *pipeline.GroupByNode, l *log.Logger) ( gn := &GroupByNode{ node: node{Node: n, et: et, logger: l}, g: n, - byName: n.ByMeasurementFlag, + groups: make(map[models.GroupID]edge.BufferedBatchMessage), } gn.node.runF = gn.runGroupBy - gn.allDimensions, gn.dimensions = determineDimensions(n.Dimensions) + gn.allDimensions, gn.tagNames = determineTagNames(n.Dimensions, n.ExcludedDimensions) + gn.byName = n.ByMeasurementFlag return gn, nil } -func (g *GroupByNode) runGroupBy([]byte) error { - dims := models.Dimensions{ - ByName: g.g.ByMeasurementFlag, +func (n *GroupByNode) runGroupBy([]byte) error { + valueF := func() int64 { + n.mu.RLock() + l := len(n.groups) + n.mu.RUnlock() + return int64(l) } - switch g.Wants() { - case pipeline.StreamEdge: - dims.TagNames = g.dimensions - for pt, ok := g.ins[0].NextPoint(); ok; pt, ok = g.ins[0].NextPoint() { - g.timer.Start() - pt = setGroupOnPoint(pt, g.allDimensions, dims, g.g.ExcludedDimensions) - g.timer.Stop() - for _, child := range g.outs { - err := child.CollectPoint(pt) - if err != nil { - return err - } - } - } - default: - var mu sync.RWMutex - var lastTime time.Time - groups := make(map[models.GroupID]*models.Batch) - valueF := func() int64 { - mu.RLock() - l := len(groups) - mu.RUnlock() - return int64(l) - } - g.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - - for b, ok := g.ins[0].NextBatch(); ok; b, ok = g.ins[0].NextBatch() { - g.timer.Start() - if !b.TMax.Equal(lastTime) { - lastTime = b.TMax - // Emit all groups - mu.RLock() - for id, group := range groups { - for _, child := range g.outs { - err := child.CollectBatch(*group) - if err != nil { - return err - } - } - mu.RUnlock() - mu.Lock() - // Remove from groups - delete(groups, id) - mu.Unlock() - mu.RLock() - } - mu.RUnlock() - } - for _, p := range b.Points { - if g.allDimensions { - dims.TagNames = filterExcludedDimensions(p.Tags, dims, g.g.ExcludedDimensions) - } else { - dims.TagNames = g.dimensions - } - groupID := models.ToGroupID(b.Name, p.Tags, dims) - mu.RLock() - group, ok := groups[groupID] - mu.RUnlock() - if !ok { - tags := make(map[string]string, len(dims.TagNames)) - for _, dim := range dims.TagNames { - tags[dim] = p.Tags[dim] - } - group = &models.Batch{ - Name: b.Name, - Group: groupID, - TMax: b.TMax, - ByName: b.ByName, - Tags: tags, - } - mu.Lock() - groups[groupID] = group - mu.Unlock() - } - group.Points = append(group.Points, p) + n.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) + + consumer := edge.NewConsumerWithReceiver( + n.ins[0], + n, + ) + return consumer.Consume() +} + +func (n *GroupByNode) Point(p edge.PointMessage) error { + p = p.ShallowCopy() + n.timer.Start() + dims := p.Dimensions() + dims.ByName = dims.ByName || n.byName + dims.TagNames = computeTagNames(p.Tags(), n.allDimensions, n.tagNames, n.g.ExcludedDimensions) + p.SetDimensions(dims) + n.timer.Stop() + if err := edge.Forward(n.outs, p); err != nil { + return err + } + return nil +} + +func (n *GroupByNode) BeginBatch(begin edge.BeginBatchMessage) error { + n.timer.Start() + defer n.timer.Stop() + + n.emit(begin.Time()) + + n.begin = begin + n.dimensions = begin.Dimensions() + n.dimensions.ByName = n.dimensions.ByName || n.byName + + return nil +} + +func (n *GroupByNode) BatchPoint(bp edge.BatchPointMessage) error { + n.timer.Start() + defer n.timer.Stop() + + n.dimensions.TagNames = computeTagNames(bp.Tags(), n.allDimensions, n.tagNames, n.g.ExcludedDimensions) + groupID := models.ToGroupID(n.begin.Name(), bp.Tags(), n.dimensions) + group, ok := n.groups[groupID] + if !ok { + // Create new begin message + newBegin := n.begin.ShallowCopy() + newBegin.SetTagsAndDimensions(bp.Tags(), n.dimensions) + + // Create buffer for group batch + group = edge.NewBufferedBatchMessage( + newBegin, + make([]edge.BatchPointMessage, 0, newBegin.SizeHint()), + edge.NewEndBatchMessage(), + ) + n.mu.Lock() + n.groups[groupID] = group + n.mu.Unlock() + } + group.SetPoints(append(group.Points(), bp)) + + return nil +} + +func (n *GroupByNode) EndBatch(end edge.EndBatchMessage) error { + return nil +} + +func (n *GroupByNode) Barrier(b edge.BarrierMessage) error { + n.timer.Start() + err := n.emit(b.Time()) + n.timer.Stop() + if err != nil { + return err + } + return edge.Forward(n.outs, b) +} +func (n *GroupByNode) DeleteGroup(d edge.DeleteGroupMessage) error { + return edge.Forward(n.outs, d) +} + +// emit sends all groups before time t to children nodes. +// The node timer must be started when calling this method. +func (n *GroupByNode) emit(t time.Time) error { + // TODO: ensure this time comparison works with barrier messages + if !t.Equal(n.lastTime) { + n.lastTime = t + // Emit all groups + for id, group := range n.groups { + // Update SizeHint since we know the final point count + group.Begin().SetSizeHint(len(group.Points())) + // Sort points since we didn't guarantee insertion order was sorted + sort.Sort(edge.BatchPointMessages(group.Points())) + // Send group batch to all children + n.timer.Pause() + if err := edge.Forward(n.outs, group); err != nil { + return err } - g.timer.Stop() + n.timer.Resume() + n.mu.Lock() + // Remove from group + delete(n.groups, id) + n.mu.Unlock() } } return nil } -func determineDimensions(dimensions []interface{}) (allDimensions bool, realDimensions []string) { +func determineTagNames(dimensions []interface{}, excluded []string) (allDimensions bool, realDimensions []string) { for _, dim := range dimensions { switch d := dim.(type) { case string: @@ -129,13 +168,13 @@ func determineDimensions(dimensions []interface{}) (allDimensions bool, realDime } } sort.Strings(realDimensions) + realDimensions = filterExcludedTagNames(realDimensions, excluded) return } -func filterExcludedDimensions(tags models.Tags, dimensions models.Dimensions, excluded []string) []string { - dimensions.TagNames = models.SortedKeys(tags) - filtered := dimensions.TagNames[0:0] - for _, t := range dimensions.TagNames { +func filterExcludedTagNames(tagNames, excluded []string) []string { + filtered := tagNames[0:0] + for _, t := range tagNames { found := false for _, x := range excluded { if x == t { @@ -150,11 +189,9 @@ func filterExcludedDimensions(tags models.Tags, dimensions models.Dimensions, ex return filtered } -func setGroupOnPoint(p models.Point, allDimensions bool, dimensions models.Dimensions, excluded []string) models.Point { +func computeTagNames(tags models.Tags, allDimensions bool, tagNames, excluded []string) []string { if allDimensions { - dimensions.TagNames = filterExcludedDimensions(p.Tags, dimensions, excluded) + return filterExcludedTagNames(models.SortedKeys(tags), excluded) } - p.Group = models.ToGroupID(p.Name, p.Tags, dimensions) - p.Dimensions = dimensions - return p + return tagNames } diff --git a/http_out.go b/http_out.go index d34ba1083..002f75fa9 100644 --- a/http_out.go +++ b/http_out.go @@ -7,7 +7,7 @@ import ( "path" "sync" - "github.com/influxdata/kapacitor/expvar" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" "github.com/influxdata/kapacitor/services/httpd" @@ -15,21 +15,22 @@ import ( type HTTPOutNode struct { node - c *pipeline.HTTPOutNode - result *models.Result - groupSeriesIdx map[models.GroupID]int - endpoint string - routes []httpd.Route - mu sync.RWMutex + c *pipeline.HTTPOutNode + + endpoint string + + mu sync.RWMutex + routes []httpd.Route + result *models.Result + indexes []*httpOutGroup } // Create a new HTTPOutNode which caches the most recent item and exposes it over the HTTP API. func newHTTPOutNode(et *ExecutingTask, n *pipeline.HTTPOutNode, l *log.Logger) (*HTTPOutNode, error) { hn := &HTTPOutNode{ - node: node{Node: n, et: et, logger: l}, - c: n, - groupSeriesIdx: make(map[models.GroupID]int), - result: new(models.Result), + node: node{Node: n, et: et, logger: l}, + c: n, + result: new(models.Result), } et.registerOutput(hn.c.Endpoint, hn) hn.node.runF = hn.runOut @@ -37,24 +38,16 @@ func newHTTPOutNode(et *ExecutingTask, n *pipeline.HTTPOutNode, l *log.Logger) ( return hn, nil } -func (h *HTTPOutNode) Endpoint() string { - return h.endpoint +func (n *HTTPOutNode) Endpoint() string { + return n.endpoint } -func (h *HTTPOutNode) runOut([]byte) error { - valueF := func() int64 { - h.mu.RLock() - l := len(h.groupSeriesIdx) - h.mu.RUnlock() - return int64(l) - } - h.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - +func (n *HTTPOutNode) runOut([]byte) error { hndl := func(w http.ResponseWriter, req *http.Request) { - h.mu.RLock() - defer h.mu.RUnlock() + n.mu.RLock() + defer n.mu.RUnlock() - if b, err := json.Marshal(h.result); err != nil { + if b, err := json.Marshal(n.result); err != nil { httpd.HttpError( w, err.Error(), @@ -66,7 +59,7 @@ func (h *HTTPOutNode) runOut([]byte) error { } } - p := path.Join("/tasks/", h.et.Task.ID, h.c.Endpoint) + p := path.Join("/tasks/", n.et.Task.ID, n.c.Endpoint) r := []httpd.Route{{ Method: "GET", @@ -74,65 +67,110 @@ func (h *HTTPOutNode) runOut([]byte) error { HandlerFunc: hndl, }} - h.endpoint = h.et.tm.HTTPDService.URL() + p - func() { - h.mu.Lock() - defer h.mu.Unlock() - h.routes = r - }() + n.endpoint = n.et.tm.HTTPDService.URL() + p + n.mu.Lock() + n.routes = r + n.mu.Unlock() - err := h.et.tm.HTTPDService.AddRoutes(r) + err := n.et.tm.HTTPDService.AddRoutes(r) if err != nil { return err } - switch h.Wants() { - case pipeline.StreamEdge: - for p, ok := h.ins[0].NextPoint(); ok; p, ok = h.ins[0].NextPoint() { - h.timer.Start() - row := models.PointToRow(p) - h.updateResultWithRow(p.Group, row) - h.timer.Stop() - for _, child := range h.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } - } - case pipeline.BatchEdge: - for b, ok := h.ins[0].NextBatch(); ok; b, ok = h.ins[0].NextBatch() { - h.timer.Start() - row := models.BatchToRow(b) - h.updateResultWithRow(b.Group, row) - h.timer.Stop() - for _, child := range h.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } - } - } - return nil + consumer := edge.NewGroupedConsumer( + n.ins[0], + n, + ) + n.statMap.Set(statCardinalityGauge, consumer.CardinalityVar()) + + return consumer.Consume() } // Update the result structure with a row. -func (h *HTTPOutNode) updateResultWithRow(group models.GroupID, row *models.Row) { - h.mu.Lock() - defer h.mu.Unlock() - idx, ok := h.groupSeriesIdx[group] - if !ok { - idx = len(h.result.Series) - h.groupSeriesIdx[group] = idx - h.result.Series = append(h.result.Series, row) - } else { - h.result.Series[idx] = row +func (n *HTTPOutNode) updateResultWithRow(idx int, row *models.Row) { + n.mu.Lock() + defer n.mu.Unlock() + if idx >= len(n.result.Series) { + n.incrementErrorCount() + n.logger.Printf("E! index out of range for row update %d", idx) + return + } + n.result.Series[idx] = row +} + +func (n *HTTPOutNode) stopOut() { + n.mu.Lock() + defer n.mu.Unlock() + n.et.tm.HTTPDService.DelRoutes(n.routes) +} + +func (n *HTTPOutNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) { + return edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n.newGroup(group.ID)), + ), nil +} + +func (n *HTTPOutNode) newGroup(groupID models.GroupID) *httpOutGroup { + n.mu.Lock() + defer n.mu.Unlock() + + idx := len(n.result.Series) + n.result.Series = append(n.result.Series, nil) + g := &httpOutGroup{ + n: n, + idx: idx, + buffer: new(edge.BatchBuffer), } + n.indexes = append(n.indexes, g) + return g } +func (n *HTTPOutNode) deleteGroup(idx int) { + n.mu.Lock() + defer n.mu.Unlock() -func (h *HTTPOutNode) stopOut() { - h.mu.Lock() - defer h.mu.Unlock() - h.et.tm.HTTPDService.DelRoutes(h.routes) + for _, g := range n.indexes[idx+1:] { + g.idx-- + } + n.indexes = append(n.indexes[0:idx], n.indexes[idx+1:]...) + n.result.Series = append(n.result.Series[0:idx], n.result.Series[idx+1:]...) +} + +type httpOutGroup struct { + n *HTTPOutNode + id models.GroupID + idx int + buffer *edge.BatchBuffer +} + +func (g *httpOutGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + return nil, g.buffer.BeginBatch(begin) +} + +func (g *httpOutGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + return nil, g.buffer.BatchPoint(bp) +} + +func (g *httpOutGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return g.BufferedBatch(g.buffer.BufferedBatchMessage(end)) +} + +func (g *httpOutGroup) BufferedBatch(batch edge.BufferedBatchMessage) (edge.Message, error) { + row := batch.ToRow() + g.n.updateResultWithRow(g.idx, row) + return batch, nil +} + +func (g *httpOutGroup) Point(p edge.PointMessage) (edge.Message, error) { + row := p.ToRow() + g.n.updateResultWithRow(g.idx, row) + return p, nil +} + +func (g *httpOutGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (g *httpOutGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + g.n.deleteGroup(g.idx) + return d, nil } diff --git a/http_post.go b/http_post.go index 7fdc4cf55..c7a520e1b 100644 --- a/http_post.go +++ b/http_post.go @@ -8,6 +8,7 @@ import ( "sync" "github.com/influxdata/kapacitor/bufpool" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" "github.com/influxdata/kapacitor/services/httppost" @@ -50,68 +51,92 @@ func newHTTPPostNode(et *ExecutingTask, n *pipeline.HTTPPostNode, l *log.Logger) return hn, nil } -func (h *HTTPPostNode) runPost([]byte) error { - switch h.Wants() { - case pipeline.StreamEdge: - for p, ok := h.ins[0].NextPoint(); ok; p, ok = h.ins[0].NextPoint() { - h.timer.Start() - row := models.PointToRow(p) - h.postRow(p.Group, row) - h.timer.Stop() - for _, child := range h.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } - } - case pipeline.BatchEdge: - for b, ok := h.ins[0].NextBatch(); ok; b, ok = h.ins[0].NextBatch() { - h.timer.Start() - row := models.BatchToRow(b) - h.postRow(b.Group, row) - h.timer.Stop() - for _, child := range h.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } - } +func (n *HTTPPostNode) runPost([]byte) error { + consumer := edge.NewGroupedConsumer( + n.ins[0], + n, + ) + n.statMap.Set(statCardinalityGauge, consumer.CardinalityVar()) + + return consumer.Consume() + +} + +func (n *HTTPPostNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) { + g := &httpPostGroup{ + n: n, + buffer: new(edge.BatchBuffer), } - return nil + return edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, g), + ), nil +} + +type httpPostGroup struct { + n *HTTPPostNode + buffer *edge.BatchBuffer +} + +func (g *httpPostGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + return nil, g.buffer.BeginBatch(begin) +} + +func (g *httpPostGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + return nil, g.buffer.BatchPoint(bp) +} + +func (g *httpPostGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return g.BufferedBatch(g.buffer.BufferedBatchMessage(end)) } -// Update the result structure with a row. -func (h *HTTPPostNode) postRow(group models.GroupID, row *models.Row) { +func (g *httpPostGroup) BufferedBatch(batch edge.BufferedBatchMessage) (edge.Message, error) { + row := batch.ToRow() + g.n.postRow(row) + return batch, nil +} + +func (g *httpPostGroup) Point(p edge.PointMessage) (edge.Message, error) { + row := p.ToRow() + g.n.postRow(row) + return p, nil +} + +func (g *httpPostGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (g *httpPostGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil +} + +func (n *HTTPPostNode) postRow(row *models.Row) { result := new(models.Result) result.Series = []*models.Row{row} - body := h.bp.Get() - defer h.bp.Put(body) + body := n.bp.Get() + defer n.bp.Put(body) err := json.NewEncoder(body).Encode(result) if err != nil { - h.incrementErrorCount() - h.logger.Printf("E! failed to marshal row data json: %v", err) + n.incrementErrorCount() + n.logger.Printf("E! failed to marshal row data json: %v", err) return } - req, err := h.endpoint.NewHTTPRequest(body) + req, err := n.endpoint.NewHTTPRequest(body) if err != nil { - h.incrementErrorCount() - h.logger.Printf("E! failed to marshal row data json: %v", err) + n.incrementErrorCount() + n.logger.Printf("E! failed to marshal row data json: %v", err) return } req.Header.Set("Content-Type", "application/json") - for k, v := range h.c.Headers { + for k, v := range n.c.Headers { req.Header.Set(k, v) } resp, err := http.DefaultClient.Do(req) if err != nil { - h.incrementErrorCount() - h.logger.Printf("E! failed to POST row data: %v", err) + n.incrementErrorCount() + n.logger.Printf("E! failed to POST row data: %v", err) return } resp.Body.Close() - } diff --git a/influxdb_out.go b/influxdb_out.go index 3f805a8ee..17a333039 100644 --- a/influxdb_out.go +++ b/influxdb_out.go @@ -7,9 +7,9 @@ import ( "time" "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/influxdb" - "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" "github.com/pkg/errors" ) @@ -26,6 +26,8 @@ type InfluxDBOutNode struct { pointsWritten *expvar.Int writeErrors *expvar.Int + + batchBuffer *edge.BatchBuffer } func newInfluxDBOutNode(et *ExecutingTask, n *pipeline.InfluxDBOutNode, l *log.Logger) (*InfluxDBOutNode, error) { @@ -37,9 +39,10 @@ func newInfluxDBOutNode(et *ExecutingTask, n *pipeline.InfluxDBOutNode, l *log.L return nil, errors.Wrap(err, "failed to get InfluxDB client") } in := &InfluxDBOutNode{ - node: node{Node: n, et: et, logger: l}, - i: n, - wb: newWriteBuffer(int(n.Buffer), n.FlushInterval, cli), + node: node{Node: n, et: et, logger: l}, + i: n, + wb: newWriteBuffer(int(n.Buffer), n.FlushInterval, cli), + batchBuffer: new(edge.BatchBuffer), } in.node.runF = in.runOut in.node.stopF = in.stopOut @@ -47,29 +50,29 @@ func newInfluxDBOutNode(et *ExecutingTask, n *pipeline.InfluxDBOutNode, l *log.L return in, nil } -func (i *InfluxDBOutNode) runOut([]byte) error { - i.pointsWritten = &expvar.Int{} - i.writeErrors = &expvar.Int{} +func (n *InfluxDBOutNode) runOut([]byte) error { + n.pointsWritten = &expvar.Int{} + n.writeErrors = &expvar.Int{} - i.statMap.Set(statsInfluxDBPointsWritten, i.pointsWritten) - i.statMap.Set(statsInfluxDBWriteErrors, i.writeErrors) + n.statMap.Set(statsInfluxDBPointsWritten, n.pointsWritten) + n.statMap.Set(statsInfluxDBWriteErrors, n.writeErrors) // Start the write buffer - i.wb.start() + n.wb.start() // Create the database and retention policy - if i.i.CreateFlag { + if n.i.CreateFlag { err := func() error { - cli, err := i.et.tm.InfluxDBService.NewNamedClient(i.i.Cluster) + cli, err := n.et.tm.InfluxDBService.NewNamedClient(n.i.Cluster) if err != nil { return err } var createDb bytes.Buffer createDb.WriteString("CREATE DATABASE ") - createDb.WriteString(influxql.QuoteIdent(i.i.Database)) - if i.i.RetentionPolicy != "" { + createDb.WriteString(influxql.QuoteIdent(n.i.Database)) + if n.i.RetentionPolicy != "" { createDb.WriteString(" WITH NAME ") - createDb.WriteString(influxql.QuoteIdent(i.i.RetentionPolicy)) + createDb.WriteString(influxql.QuoteIdent(n.i.RetentionPolicy)) } _, err = cli.Query(influxdb.Query{Command: createDb.String()}) if err != nil { @@ -78,86 +81,113 @@ func (i *InfluxDBOutNode) runOut([]byte) error { return nil }() if err != nil { - i.incrementErrorCount() - i.logger.Printf("E! failed to create database %q on cluster %q: %v", i.i.Database, i.i.Cluster, err) + n.incrementErrorCount() + n.logger.Printf("E! failed to create database %q on cluster %q: %v", n.i.Database, n.i.Cluster, err) } } - switch i.Wants() { - case pipeline.StreamEdge: - for p, ok := i.ins[0].NextPoint(); ok; p, ok = i.ins[0].NextPoint() { - i.timer.Start() - batch := models.Batch{ - Name: p.Name, - Group: p.Group, - Tags: p.Tags, - ByName: p.Dimensions.ByName, - Points: []models.BatchPoint{models.BatchPointFromPoint(p)}, - } - err := i.write(p.Database, p.RetentionPolicy, batch) - if err != nil { - return err - } - i.timer.Stop() - } - case pipeline.BatchEdge: - for b, ok := i.ins[0].NextBatch(); ok; b, ok = i.ins[0].NextBatch() { - i.timer.Start() - err := i.write("", "", b) - if err != nil { - return err - } - i.timer.Stop() - } - } - return nil + // Setup consumer + consumer := edge.NewConsumerWithReceiver( + n.ins[0], + edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n), + ), + ) + return consumer.Consume() +} + +func (n *InfluxDBOutNode) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + return nil, n.batchBuffer.BeginBatch(begin) +} + +func (n *InfluxDBOutNode) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + return nil, n.batchBuffer.BatchPoint(bp) +} + +func (n *InfluxDBOutNode) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return n.BufferedBatch(n.batchBuffer.BufferedBatchMessage(end)) +} + +func (n *InfluxDBOutNode) BufferedBatch(batch edge.BufferedBatchMessage) (edge.Message, error) { + n.write("", "", batch) + return batch, nil +} + +func (n *InfluxDBOutNode) Point(p edge.PointMessage) (edge.Message, error) { + batch := edge.NewBufferedBatchMessage( + edge.NewBeginBatchMessage( + p.Name(), + p.Tags(), + p.Dimensions().ByName, + p.Time(), + 1, + ), + []edge.BatchPointMessage{ + edge.NewBatchPointMessage( + p.Fields(), + p.Tags(), + p.Time(), + ), + }, + edge.NewEndBatchMessage(), + ) + n.write(p.Database(), p.RetentionPolicy(), batch) + return p, nil +} + +func (n *InfluxDBOutNode) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (n *InfluxDBOutNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil } -func (i *InfluxDBOutNode) stopOut() { - i.wb.flush() - i.wb.abort() +func (n *InfluxDBOutNode) stopOut() { + n.wb.flush() + n.wb.abort() } -func (i *InfluxDBOutNode) write(db, rp string, batch models.Batch) error { - if i.i.Database != "" { - db = i.i.Database +func (n *InfluxDBOutNode) write(db, rp string, batch edge.BufferedBatchMessage) error { + if n.i.Database != "" { + db = n.i.Database } - if i.i.RetentionPolicy != "" { - rp = i.i.RetentionPolicy + if n.i.RetentionPolicy != "" { + rp = n.i.RetentionPolicy } - name := i.i.Measurement + name := n.i.Measurement if name == "" { - name = batch.Name + name = batch.Name() } - points := make([]influxdb.Point, len(batch.Points)) - for j, p := range batch.Points { + points := make([]influxdb.Point, len(batch.Points())) + for j, p := range batch.Points() { var tags map[string]string - if len(i.i.Tags) > 0 { - tags = make(map[string]string, len(p.Tags)+len(i.i.Tags)) - for k, v := range p.Tags { + if len(n.i.Tags) > 0 { + tags = make(map[string]string, len(p.Tags())+len(n.i.Tags)) + for k, v := range p.Tags() { tags[k] = v } - for k, v := range i.i.Tags { + for k, v := range n.i.Tags { tags[k] = v } } else { - tags = p.Tags + tags = p.Tags() } points[j] = influxdb.Point{ Name: name, Tags: tags, - Fields: p.Fields, - Time: p.Time, + Fields: p.Fields(), + Time: p.Time(), } } bpc := influxdb.BatchPointsConfig{ Database: db, RetentionPolicy: rp, - WriteConsistency: i.i.WriteConsistency, - Precision: i.i.Precision, + WriteConsistency: n.i.WriteConsistency, + Precision: n.i.Precision, } - i.wb.enqueue(bpc, points) + n.wb.enqueue(bpc, points) return nil } diff --git a/influxql.gen.go b/influxql.gen.go index d6f1b6900..50415e667 100644 --- a/influxql.gen.go +++ b/influxql.gen.go @@ -12,147 +12,67 @@ import ( "time" "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" ) -type floatPointAggregator struct { - field string - topBottomInfo *pipeline.TopBottomCallInfo - isSimpleSelector bool - aggregator influxql.FloatPointAggregator -} - -func floatPopulateAuxFieldsAndTags(ap *influxql.FloatPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) { - ap.Aux = make([]interface{}, len(fieldsAndTags)) - for i, name := range fieldsAndTags { - if f, ok := fields[name]; ok { - ap.Aux[i] = f - } else { - ap.Aux[i] = tags[name] - } - } -} - -func (a *floatPointAggregator) AggregateBatch(b *models.Batch) error { - for _, p := range b.Points { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.(float64) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp float64", a.field, value) - } - ap := &influxql.FloatPoint{ - Name: b.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - floatPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - ap.Aux = []interface{}{p.Tags, p.Fields} - } - - a.aggregator.AggregateFloat(ap) - } - return nil -} - -func (a *floatPointAggregator) AggregatePoint(p *models.Point) error { - value, ok := p.Fields[a.field] +func convertFloatPoint( + name string, + p edge.FieldsTagsTimeGetter, + field string, + isSimpleSelector bool, + topBottomInfo *pipeline.TopBottomCallInfo, +) (*influxql.FloatPoint, error) { + value, ok := p.Fields()[field] if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) + return nil, fmt.Errorf("field %s missing from point cannot aggregate", field) } typed, ok := value.(float64) if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp float64", a.field, value) + return nil, fmt.Errorf("field %s has wrong type: got %T exp float64", field, value) } ap := &influxql.FloatPoint{ - Name: p.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), + Name: name, + Tags: influxql.NewTags(p.Tags()), + Time: p.Time().UnixNano(), Value: typed, } - if a.topBottomInfo != nil { + if topBottomInfo != nil { // We need to populate the Aux fields - floatPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + floatPopulateAuxFieldsAndTags(ap, topBottomInfo.FieldsAndTags, p.Fields(), p.Tags()) } - if a.isSimpleSelector { - ap.Aux = []interface{}{p.Tags, p.Fields} + if isSimpleSelector { + ap.Aux = []interface{}{p.Tags(), p.Fields()} } - a.aggregator.AggregateFloat(ap) - return nil + return ap, nil } -type floatPointBulkAggregator struct { +type floatPointAggregator struct { field string topBottomInfo *pipeline.TopBottomCallInfo isSimpleSelector bool - aggregator pipeline.FloatBulkPointAggregator + aggregator influxql.FloatPointAggregator } -func (a *floatPointBulkAggregator) AggregateBatch(b *models.Batch) error { - slice := make([]influxql.FloatPoint, len(b.Points)) - for i, p := range b.Points { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.(float64) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp float64", a.field, value) - } - slice[i] = influxql.FloatPoint{ - Name: b.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - floatPopulateAuxFieldsAndTags(&slice[i], a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - slice[i].Aux = []interface{}{p.Tags, p.Fields} +func floatPopulateAuxFieldsAndTags(ap *influxql.FloatPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) { + ap.Aux = make([]interface{}, len(fieldsAndTags)) + for i, name := range fieldsAndTags { + if f, ok := fields[name]; ok { + ap.Aux[i] = f + } else { + ap.Aux[i] = tags[name] } } - a.aggregator.AggregateFloatBulk(slice) - return nil } -func (a *floatPointBulkAggregator) AggregatePoint(p *models.Point) error { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.(float64) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp float64", a.field, value) - } - ap := &influxql.FloatPoint{ - Name: p.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - floatPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - ap.Aux = []interface{}{p.Tags, p.Fields} +func (a *floatPointAggregator) AggregatePoint(name string, p edge.FieldsTagsTimeGetter) error { + ap, err := convertFloatPoint(name, p, a.field, a.isSimpleSelector, a.topBottomInfo) + if err != nil { + return nil } - a.aggregator.AggregateFloat(ap) return nil } @@ -164,10 +84,10 @@ type floatPointEmitter struct { byName bool } -func (e *floatPointEmitter) EmitPoint() (models.Point, error) { +func (e *floatPointEmitter) EmitPoint() (edge.PointMessage, error) { slice := e.emitter.Emit() if len(slice) != 1 { - return models.Point{}, ErrEmptyEmit + return nil, nil } ap := slice[0] var t time.Time @@ -192,30 +112,29 @@ func (e *floatPointEmitter) EmitPoint() (models.Point, error) { delete(fields, e.field) } } else { - tags = e.tags + tags = e.groupInfo.Tags fields = map[string]interface{}{e.as: ap.Value} } - return models.Point{ - Name: e.name, - Time: t, - Group: e.group, - Dimensions: e.dimensions, - Tags: tags, - Fields: fields, - }, nil + return edge.NewPointMessage( + e.name, "", "", + e.groupInfo.Dimensions, + fields, + tags, + t, + ), nil } -func (e *floatPointEmitter) EmitBatch() models.Batch { +func (e *floatPointEmitter) EmitBatch() edge.BufferedBatchMessage { slice := e.emitter.Emit() - b := models.Batch{ - Name: e.name, - TMax: e.time, - Group: e.group, - ByName: e.dimensions.ByName, - Tags: e.tags, - Points: make([]models.BatchPoint, len(slice)), - } + begin := edge.NewBeginBatchMessage( + e.name, + e.groupInfo.Tags, + e.groupInfo.Dimensions.ByName, + e.time, + len(slice), + ) + points := make([]edge.BatchPointMessage, len(slice)) var t time.Time for i, ap := range slice { if e.pointTimes { @@ -230,165 +149,88 @@ func (e *floatPointEmitter) EmitBatch() models.Batch { var tags models.Tags if l := len(ap.Tags.KeyValues()); l > 0 { // Merge batch and point specific tags - tags = make(models.Tags, len(e.tags)+l) - for k, v := range e.tags { + tags = make(models.Tags, len(e.groupInfo.Tags)+l) + for k, v := range e.groupInfo.Tags { tags[k] = v } for k, v := range ap.Tags.KeyValues() { tags[k] = v } } else { - tags = e.tags - } - b.Points[i] = models.BatchPoint{ - Time: t, - Tags: tags, - Fields: map[string]interface{}{e.as: ap.Value}, - } - if t.After(b.TMax) { - b.TMax = t - } - } - return b -} - -type integerPointAggregator struct { - field string - topBottomInfo *pipeline.TopBottomCallInfo - isSimpleSelector bool - aggregator influxql.IntegerPointAggregator -} - -func integerPopulateAuxFieldsAndTags(ap *influxql.IntegerPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) { - ap.Aux = make([]interface{}, len(fieldsAndTags)) - for i, name := range fieldsAndTags { - if f, ok := fields[name]; ok { - ap.Aux[i] = f - } else { - ap.Aux[i] = tags[name] - } - } -} - -func (a *integerPointAggregator) AggregateBatch(b *models.Batch) error { - for _, p := range b.Points { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.(int64) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp int64", a.field, value) - } - ap := &influxql.IntegerPoint{ - Name: b.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - integerPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - ap.Aux = []interface{}{p.Tags, p.Fields} - } - - a.aggregator.AggregateInteger(ap) - } - return nil -} - -func (a *integerPointAggregator) AggregatePoint(p *models.Point) error { - value, ok := p.Fields[a.field] + tags = e.groupInfo.Tags + } + points[i] = edge.NewBatchPointMessage( + models.Fields{e.as: ap.Value}, + tags, + t, + ) + if t.After(begin.Time()) { + begin.SetTime(t) + } + } + return edge.NewBufferedBatchMessage( + begin, + points, + edge.NewEndBatchMessage(), + ) +} + +func convertIntegerPoint( + name string, + p edge.FieldsTagsTimeGetter, + field string, + isSimpleSelector bool, + topBottomInfo *pipeline.TopBottomCallInfo, +) (*influxql.IntegerPoint, error) { + value, ok := p.Fields()[field] if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) + return nil, fmt.Errorf("field %s missing from point cannot aggregate", field) } typed, ok := value.(int64) if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp int64", a.field, value) + return nil, fmt.Errorf("field %s has wrong type: got %T exp int64", field, value) } ap := &influxql.IntegerPoint{ - Name: p.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), + Name: name, + Tags: influxql.NewTags(p.Tags()), + Time: p.Time().UnixNano(), Value: typed, } - if a.topBottomInfo != nil { + if topBottomInfo != nil { // We need to populate the Aux fields - integerPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + integerPopulateAuxFieldsAndTags(ap, topBottomInfo.FieldsAndTags, p.Fields(), p.Tags()) } - if a.isSimpleSelector { - ap.Aux = []interface{}{p.Tags, p.Fields} + if isSimpleSelector { + ap.Aux = []interface{}{p.Tags(), p.Fields()} } - a.aggregator.AggregateInteger(ap) - return nil + return ap, nil } -type integerPointBulkAggregator struct { +type integerPointAggregator struct { field string topBottomInfo *pipeline.TopBottomCallInfo isSimpleSelector bool - aggregator pipeline.IntegerBulkPointAggregator + aggregator influxql.IntegerPointAggregator } -func (a *integerPointBulkAggregator) AggregateBatch(b *models.Batch) error { - slice := make([]influxql.IntegerPoint, len(b.Points)) - for i, p := range b.Points { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.(int64) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp int64", a.field, value) - } - slice[i] = influxql.IntegerPoint{ - Name: b.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - integerPopulateAuxFieldsAndTags(&slice[i], a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - slice[i].Aux = []interface{}{p.Tags, p.Fields} +func integerPopulateAuxFieldsAndTags(ap *influxql.IntegerPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) { + ap.Aux = make([]interface{}, len(fieldsAndTags)) + for i, name := range fieldsAndTags { + if f, ok := fields[name]; ok { + ap.Aux[i] = f + } else { + ap.Aux[i] = tags[name] } } - a.aggregator.AggregateIntegerBulk(slice) - return nil } -func (a *integerPointBulkAggregator) AggregatePoint(p *models.Point) error { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.(int64) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp int64", a.field, value) - } - ap := &influxql.IntegerPoint{ - Name: p.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - integerPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - ap.Aux = []interface{}{p.Tags, p.Fields} +func (a *integerPointAggregator) AggregatePoint(name string, p edge.FieldsTagsTimeGetter) error { + ap, err := convertIntegerPoint(name, p, a.field, a.isSimpleSelector, a.topBottomInfo) + if err != nil { + return nil } - a.aggregator.AggregateInteger(ap) return nil } @@ -400,10 +242,10 @@ type integerPointEmitter struct { byName bool } -func (e *integerPointEmitter) EmitPoint() (models.Point, error) { +func (e *integerPointEmitter) EmitPoint() (edge.PointMessage, error) { slice := e.emitter.Emit() if len(slice) != 1 { - return models.Point{}, ErrEmptyEmit + return nil, nil } ap := slice[0] var t time.Time @@ -428,30 +270,29 @@ func (e *integerPointEmitter) EmitPoint() (models.Point, error) { delete(fields, e.field) } } else { - tags = e.tags + tags = e.groupInfo.Tags fields = map[string]interface{}{e.as: ap.Value} } - return models.Point{ - Name: e.name, - Time: t, - Group: e.group, - Dimensions: e.dimensions, - Tags: tags, - Fields: fields, - }, nil + return edge.NewPointMessage( + e.name, "", "", + e.groupInfo.Dimensions, + fields, + tags, + t, + ), nil } -func (e *integerPointEmitter) EmitBatch() models.Batch { +func (e *integerPointEmitter) EmitBatch() edge.BufferedBatchMessage { slice := e.emitter.Emit() - b := models.Batch{ - Name: e.name, - TMax: e.time, - Group: e.group, - ByName: e.dimensions.ByName, - Tags: e.tags, - Points: make([]models.BatchPoint, len(slice)), - } + begin := edge.NewBeginBatchMessage( + e.name, + e.groupInfo.Tags, + e.groupInfo.Dimensions.ByName, + e.time, + len(slice), + ) + points := make([]edge.BatchPointMessage, len(slice)) var t time.Time for i, ap := range slice { if e.pointTimes { @@ -466,165 +307,88 @@ func (e *integerPointEmitter) EmitBatch() models.Batch { var tags models.Tags if l := len(ap.Tags.KeyValues()); l > 0 { // Merge batch and point specific tags - tags = make(models.Tags, len(e.tags)+l) - for k, v := range e.tags { + tags = make(models.Tags, len(e.groupInfo.Tags)+l) + for k, v := range e.groupInfo.Tags { tags[k] = v } for k, v := range ap.Tags.KeyValues() { tags[k] = v } } else { - tags = e.tags - } - b.Points[i] = models.BatchPoint{ - Time: t, - Tags: tags, - Fields: map[string]interface{}{e.as: ap.Value}, - } - if t.After(b.TMax) { - b.TMax = t - } - } - return b -} - -type stringPointAggregator struct { - field string - topBottomInfo *pipeline.TopBottomCallInfo - isSimpleSelector bool - aggregator influxql.StringPointAggregator -} - -func stringPopulateAuxFieldsAndTags(ap *influxql.StringPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) { - ap.Aux = make([]interface{}, len(fieldsAndTags)) - for i, name := range fieldsAndTags { - if f, ok := fields[name]; ok { - ap.Aux[i] = f - } else { - ap.Aux[i] = tags[name] - } - } -} - -func (a *stringPointAggregator) AggregateBatch(b *models.Batch) error { - for _, p := range b.Points { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.(string) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp string", a.field, value) - } - ap := &influxql.StringPoint{ - Name: b.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - stringPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - ap.Aux = []interface{}{p.Tags, p.Fields} - } - - a.aggregator.AggregateString(ap) - } - return nil -} - -func (a *stringPointAggregator) AggregatePoint(p *models.Point) error { - value, ok := p.Fields[a.field] + tags = e.groupInfo.Tags + } + points[i] = edge.NewBatchPointMessage( + models.Fields{e.as: ap.Value}, + tags, + t, + ) + if t.After(begin.Time()) { + begin.SetTime(t) + } + } + return edge.NewBufferedBatchMessage( + begin, + points, + edge.NewEndBatchMessage(), + ) +} + +func convertStringPoint( + name string, + p edge.FieldsTagsTimeGetter, + field string, + isSimpleSelector bool, + topBottomInfo *pipeline.TopBottomCallInfo, +) (*influxql.StringPoint, error) { + value, ok := p.Fields()[field] if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) + return nil, fmt.Errorf("field %s missing from point cannot aggregate", field) } typed, ok := value.(string) if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp string", a.field, value) + return nil, fmt.Errorf("field %s has wrong type: got %T exp string", field, value) } ap := &influxql.StringPoint{ - Name: p.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), + Name: name, + Tags: influxql.NewTags(p.Tags()), + Time: p.Time().UnixNano(), Value: typed, } - if a.topBottomInfo != nil { + if topBottomInfo != nil { // We need to populate the Aux fields - stringPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + stringPopulateAuxFieldsAndTags(ap, topBottomInfo.FieldsAndTags, p.Fields(), p.Tags()) } - if a.isSimpleSelector { - ap.Aux = []interface{}{p.Tags, p.Fields} + if isSimpleSelector { + ap.Aux = []interface{}{p.Tags(), p.Fields()} } - a.aggregator.AggregateString(ap) - return nil + return ap, nil } -type stringPointBulkAggregator struct { +type stringPointAggregator struct { field string topBottomInfo *pipeline.TopBottomCallInfo isSimpleSelector bool - aggregator pipeline.StringBulkPointAggregator + aggregator influxql.StringPointAggregator } -func (a *stringPointBulkAggregator) AggregateBatch(b *models.Batch) error { - slice := make([]influxql.StringPoint, len(b.Points)) - for i, p := range b.Points { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.(string) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp string", a.field, value) - } - slice[i] = influxql.StringPoint{ - Name: b.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - stringPopulateAuxFieldsAndTags(&slice[i], a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - slice[i].Aux = []interface{}{p.Tags, p.Fields} +func stringPopulateAuxFieldsAndTags(ap *influxql.StringPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) { + ap.Aux = make([]interface{}, len(fieldsAndTags)) + for i, name := range fieldsAndTags { + if f, ok := fields[name]; ok { + ap.Aux[i] = f + } else { + ap.Aux[i] = tags[name] } } - a.aggregator.AggregateStringBulk(slice) - return nil } -func (a *stringPointBulkAggregator) AggregatePoint(p *models.Point) error { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.(string) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp string", a.field, value) - } - ap := &influxql.StringPoint{ - Name: p.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - stringPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - ap.Aux = []interface{}{p.Tags, p.Fields} +func (a *stringPointAggregator) AggregatePoint(name string, p edge.FieldsTagsTimeGetter) error { + ap, err := convertStringPoint(name, p, a.field, a.isSimpleSelector, a.topBottomInfo) + if err != nil { + return nil } - a.aggregator.AggregateString(ap) return nil } @@ -636,10 +400,10 @@ type stringPointEmitter struct { byName bool } -func (e *stringPointEmitter) EmitPoint() (models.Point, error) { +func (e *stringPointEmitter) EmitPoint() (edge.PointMessage, error) { slice := e.emitter.Emit() if len(slice) != 1 { - return models.Point{}, ErrEmptyEmit + return nil, nil } ap := slice[0] var t time.Time @@ -664,30 +428,29 @@ func (e *stringPointEmitter) EmitPoint() (models.Point, error) { delete(fields, e.field) } } else { - tags = e.tags + tags = e.groupInfo.Tags fields = map[string]interface{}{e.as: ap.Value} } - return models.Point{ - Name: e.name, - Time: t, - Group: e.group, - Dimensions: e.dimensions, - Tags: tags, - Fields: fields, - }, nil + return edge.NewPointMessage( + e.name, "", "", + e.groupInfo.Dimensions, + fields, + tags, + t, + ), nil } -func (e *stringPointEmitter) EmitBatch() models.Batch { +func (e *stringPointEmitter) EmitBatch() edge.BufferedBatchMessage { slice := e.emitter.Emit() - b := models.Batch{ - Name: e.name, - TMax: e.time, - Group: e.group, - ByName: e.dimensions.ByName, - Tags: e.tags, - Points: make([]models.BatchPoint, len(slice)), - } + begin := edge.NewBeginBatchMessage( + e.name, + e.groupInfo.Tags, + e.groupInfo.Dimensions.ByName, + e.time, + len(slice), + ) + points := make([]edge.BatchPointMessage, len(slice)) var t time.Time for i, ap := range slice { if e.pointTimes { @@ -702,165 +465,88 @@ func (e *stringPointEmitter) EmitBatch() models.Batch { var tags models.Tags if l := len(ap.Tags.KeyValues()); l > 0 { // Merge batch and point specific tags - tags = make(models.Tags, len(e.tags)+l) - for k, v := range e.tags { + tags = make(models.Tags, len(e.groupInfo.Tags)+l) + for k, v := range e.groupInfo.Tags { tags[k] = v } for k, v := range ap.Tags.KeyValues() { tags[k] = v } } else { - tags = e.tags - } - b.Points[i] = models.BatchPoint{ - Time: t, - Tags: tags, - Fields: map[string]interface{}{e.as: ap.Value}, - } - if t.After(b.TMax) { - b.TMax = t - } - } - return b -} - -type booleanPointAggregator struct { - field string - topBottomInfo *pipeline.TopBottomCallInfo - isSimpleSelector bool - aggregator influxql.BooleanPointAggregator -} - -func booleanPopulateAuxFieldsAndTags(ap *influxql.BooleanPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) { - ap.Aux = make([]interface{}, len(fieldsAndTags)) - for i, name := range fieldsAndTags { - if f, ok := fields[name]; ok { - ap.Aux[i] = f - } else { - ap.Aux[i] = tags[name] - } - } -} - -func (a *booleanPointAggregator) AggregateBatch(b *models.Batch) error { - for _, p := range b.Points { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.(bool) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp bool", a.field, value) - } - ap := &influxql.BooleanPoint{ - Name: b.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - booleanPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - ap.Aux = []interface{}{p.Tags, p.Fields} - } - - a.aggregator.AggregateBoolean(ap) - } - return nil -} - -func (a *booleanPointAggregator) AggregatePoint(p *models.Point) error { - value, ok := p.Fields[a.field] + tags = e.groupInfo.Tags + } + points[i] = edge.NewBatchPointMessage( + models.Fields{e.as: ap.Value}, + tags, + t, + ) + if t.After(begin.Time()) { + begin.SetTime(t) + } + } + return edge.NewBufferedBatchMessage( + begin, + points, + edge.NewEndBatchMessage(), + ) +} + +func convertBooleanPoint( + name string, + p edge.FieldsTagsTimeGetter, + field string, + isSimpleSelector bool, + topBottomInfo *pipeline.TopBottomCallInfo, +) (*influxql.BooleanPoint, error) { + value, ok := p.Fields()[field] if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) + return nil, fmt.Errorf("field %s missing from point cannot aggregate", field) } typed, ok := value.(bool) if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp bool", a.field, value) + return nil, fmt.Errorf("field %s has wrong type: got %T exp bool", field, value) } ap := &influxql.BooleanPoint{ - Name: p.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), + Name: name, + Tags: influxql.NewTags(p.Tags()), + Time: p.Time().UnixNano(), Value: typed, } - if a.topBottomInfo != nil { + if topBottomInfo != nil { // We need to populate the Aux fields - booleanPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + booleanPopulateAuxFieldsAndTags(ap, topBottomInfo.FieldsAndTags, p.Fields(), p.Tags()) } - if a.isSimpleSelector { - ap.Aux = []interface{}{p.Tags, p.Fields} + if isSimpleSelector { + ap.Aux = []interface{}{p.Tags(), p.Fields()} } - a.aggregator.AggregateBoolean(ap) - return nil + return ap, nil } -type booleanPointBulkAggregator struct { +type booleanPointAggregator struct { field string topBottomInfo *pipeline.TopBottomCallInfo isSimpleSelector bool - aggregator pipeline.BooleanBulkPointAggregator + aggregator influxql.BooleanPointAggregator } -func (a *booleanPointBulkAggregator) AggregateBatch(b *models.Batch) error { - slice := make([]influxql.BooleanPoint, len(b.Points)) - for i, p := range b.Points { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.(bool) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp bool", a.field, value) - } - slice[i] = influxql.BooleanPoint{ - Name: b.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - booleanPopulateAuxFieldsAndTags(&slice[i], a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - slice[i].Aux = []interface{}{p.Tags, p.Fields} +func booleanPopulateAuxFieldsAndTags(ap *influxql.BooleanPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) { + ap.Aux = make([]interface{}, len(fieldsAndTags)) + for i, name := range fieldsAndTags { + if f, ok := fields[name]; ok { + ap.Aux[i] = f + } else { + ap.Aux[i] = tags[name] } } - a.aggregator.AggregateBooleanBulk(slice) - return nil } -func (a *booleanPointBulkAggregator) AggregatePoint(p *models.Point) error { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.(bool) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp bool", a.field, value) - } - ap := &influxql.BooleanPoint{ - Name: p.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - booleanPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - ap.Aux = []interface{}{p.Tags, p.Fields} +func (a *booleanPointAggregator) AggregatePoint(name string, p edge.FieldsTagsTimeGetter) error { + ap, err := convertBooleanPoint(name, p, a.field, a.isSimpleSelector, a.topBottomInfo) + if err != nil { + return nil } - a.aggregator.AggregateBoolean(ap) return nil } @@ -872,10 +558,10 @@ type booleanPointEmitter struct { byName bool } -func (e *booleanPointEmitter) EmitPoint() (models.Point, error) { +func (e *booleanPointEmitter) EmitPoint() (edge.PointMessage, error) { slice := e.emitter.Emit() if len(slice) != 1 { - return models.Point{}, ErrEmptyEmit + return nil, nil } ap := slice[0] var t time.Time @@ -900,30 +586,29 @@ func (e *booleanPointEmitter) EmitPoint() (models.Point, error) { delete(fields, e.field) } } else { - tags = e.tags + tags = e.groupInfo.Tags fields = map[string]interface{}{e.as: ap.Value} } - return models.Point{ - Name: e.name, - Time: t, - Group: e.group, - Dimensions: e.dimensions, - Tags: tags, - Fields: fields, - }, nil + return edge.NewPointMessage( + e.name, "", "", + e.groupInfo.Dimensions, + fields, + tags, + t, + ), nil } -func (e *booleanPointEmitter) EmitBatch() models.Batch { +func (e *booleanPointEmitter) EmitBatch() edge.BufferedBatchMessage { slice := e.emitter.Emit() - b := models.Batch{ - Name: e.name, - TMax: e.time, - Group: e.group, - ByName: e.dimensions.ByName, - Tags: e.tags, - Points: make([]models.BatchPoint, len(slice)), - } + begin := edge.NewBeginBatchMessage( + e.name, + e.groupInfo.Tags, + e.groupInfo.Dimensions.ByName, + e.time, + len(slice), + ) + points := make([]edge.BatchPointMessage, len(slice)) var t time.Time for i, ap := range slice { if e.pointTimes { @@ -938,26 +623,30 @@ func (e *booleanPointEmitter) EmitBatch() models.Batch { var tags models.Tags if l := len(ap.Tags.KeyValues()); l > 0 { // Merge batch and point specific tags - tags = make(models.Tags, len(e.tags)+l) - for k, v := range e.tags { + tags = make(models.Tags, len(e.groupInfo.Tags)+l) + for k, v := range e.groupInfo.Tags { tags[k] = v } for k, v := range ap.Tags.KeyValues() { tags[k] = v } } else { - tags = e.tags + tags = e.groupInfo.Tags } - b.Points[i] = models.BatchPoint{ - Time: t, - Tags: tags, - Fields: map[string]interface{}{e.as: ap.Value}, - } - if t.After(b.TMax) { - b.TMax = t + points[i] = edge.NewBatchPointMessage( + models.Fields{e.as: ap.Value}, + tags, + t, + ) + if t.After(begin.Time()) { + begin.SetTime(t) } } - return b + return edge.NewBufferedBatchMessage( + begin, + points, + edge.NewEndBatchMessage(), + ) } // floatReduceContext uses composition to implement the reduceContext interface @@ -966,192 +655,96 @@ type floatReduceContext struct { floatPointEmitter } -// floatBulkReduceContext uses composition to implement the reduceContext interface -type floatBulkReduceContext struct { - floatPointBulkAggregator - floatPointEmitter -} - // floatIntegerReduceContext uses composition to implement the reduceContext interface type floatIntegerReduceContext struct { floatPointAggregator integerPointEmitter } -// floatBulkIntegerReduceContext uses composition to implement the reduceContext interface -type floatBulkIntegerReduceContext struct { - floatPointBulkAggregator - integerPointEmitter -} - // floatStringReduceContext uses composition to implement the reduceContext interface type floatStringReduceContext struct { floatPointAggregator stringPointEmitter } -// floatBulkStringReduceContext uses composition to implement the reduceContext interface -type floatBulkStringReduceContext struct { - floatPointBulkAggregator - stringPointEmitter -} - // floatBooleanReduceContext uses composition to implement the reduceContext interface type floatBooleanReduceContext struct { floatPointAggregator booleanPointEmitter } -// floatBulkBooleanReduceContext uses composition to implement the reduceContext interface -type floatBulkBooleanReduceContext struct { - floatPointBulkAggregator - booleanPointEmitter -} - // integerFloatReduceContext uses composition to implement the reduceContext interface type integerFloatReduceContext struct { integerPointAggregator floatPointEmitter } -// integerBulkFloatReduceContext uses composition to implement the reduceContext interface -type integerBulkFloatReduceContext struct { - integerPointBulkAggregator - floatPointEmitter -} - // integerReduceContext uses composition to implement the reduceContext interface type integerReduceContext struct { integerPointAggregator integerPointEmitter } -// integerBulkReduceContext uses composition to implement the reduceContext interface -type integerBulkReduceContext struct { - integerPointBulkAggregator - integerPointEmitter -} - // integerStringReduceContext uses composition to implement the reduceContext interface type integerStringReduceContext struct { integerPointAggregator stringPointEmitter } -// integerBulkStringReduceContext uses composition to implement the reduceContext interface -type integerBulkStringReduceContext struct { - integerPointBulkAggregator - stringPointEmitter -} - // integerBooleanReduceContext uses composition to implement the reduceContext interface type integerBooleanReduceContext struct { integerPointAggregator booleanPointEmitter } -// integerBulkBooleanReduceContext uses composition to implement the reduceContext interface -type integerBulkBooleanReduceContext struct { - integerPointBulkAggregator - booleanPointEmitter -} - // stringFloatReduceContext uses composition to implement the reduceContext interface type stringFloatReduceContext struct { stringPointAggregator floatPointEmitter } -// stringBulkFloatReduceContext uses composition to implement the reduceContext interface -type stringBulkFloatReduceContext struct { - stringPointBulkAggregator - floatPointEmitter -} - // stringIntegerReduceContext uses composition to implement the reduceContext interface type stringIntegerReduceContext struct { stringPointAggregator integerPointEmitter } -// stringBulkIntegerReduceContext uses composition to implement the reduceContext interface -type stringBulkIntegerReduceContext struct { - stringPointBulkAggregator - integerPointEmitter -} - // stringReduceContext uses composition to implement the reduceContext interface type stringReduceContext struct { stringPointAggregator stringPointEmitter } -// stringBulkReduceContext uses composition to implement the reduceContext interface -type stringBulkReduceContext struct { - stringPointBulkAggregator - stringPointEmitter -} - // stringBooleanReduceContext uses composition to implement the reduceContext interface type stringBooleanReduceContext struct { stringPointAggregator booleanPointEmitter } -// stringBulkBooleanReduceContext uses composition to implement the reduceContext interface -type stringBulkBooleanReduceContext struct { - stringPointBulkAggregator - booleanPointEmitter -} - // booleanFloatReduceContext uses composition to implement the reduceContext interface type booleanFloatReduceContext struct { booleanPointAggregator floatPointEmitter } -// booleanBulkFloatReduceContext uses composition to implement the reduceContext interface -type booleanBulkFloatReduceContext struct { - booleanPointBulkAggregator - floatPointEmitter -} - // booleanIntegerReduceContext uses composition to implement the reduceContext interface type booleanIntegerReduceContext struct { booleanPointAggregator integerPointEmitter } -// booleanBulkIntegerReduceContext uses composition to implement the reduceContext interface -type booleanBulkIntegerReduceContext struct { - booleanPointBulkAggregator - integerPointEmitter -} - // booleanStringReduceContext uses composition to implement the reduceContext interface type booleanStringReduceContext struct { booleanPointAggregator stringPointEmitter } -// booleanBulkStringReduceContext uses composition to implement the reduceContext interface -type booleanBulkStringReduceContext struct { - booleanPointBulkAggregator - stringPointEmitter -} - // booleanReduceContext uses composition to implement the reduceContext interface type booleanReduceContext struct { booleanPointAggregator booleanPointEmitter } -// booleanBulkReduceContext uses composition to implement the reduceContext interface -type booleanBulkReduceContext struct { - booleanPointBulkAggregator - booleanPointEmitter -} - func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipeline.ReduceCreater) (fn createReduceContextFunc, err error) { switch kind { @@ -1175,23 +768,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateFloatBulkReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateFloatBulkReducer() - return &floatBulkReduceContext{ - floatPointBulkAggregator: floatPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - floatPointEmitter: floatPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } case rc.CreateFloatIntegerReducer != nil: fn = func(c baseReduceContext) reduceContext { @@ -1210,23 +786,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateFloatBulkIntegerReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateFloatBulkIntegerReducer() - return &floatBulkIntegerReduceContext{ - floatPointBulkAggregator: floatPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - integerPointEmitter: integerPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } case rc.CreateFloatStringReducer != nil: fn = func(c baseReduceContext) reduceContext { @@ -1245,23 +804,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateFloatBulkStringReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateFloatBulkStringReducer() - return &floatBulkStringReduceContext{ - floatPointBulkAggregator: floatPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - stringPointEmitter: stringPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } case rc.CreateFloatBooleanReducer != nil: fn = func(c baseReduceContext) reduceContext { @@ -1280,23 +822,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateFloatBulkBooleanReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateFloatBulkBooleanReducer() - return &floatBulkBooleanReduceContext{ - floatPointBulkAggregator: floatPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - booleanPointEmitter: booleanPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } default: err = fmt.Errorf("cannot apply %s to float64 field", method) @@ -1322,23 +847,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateIntegerBulkFloatReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateIntegerBulkFloatReducer() - return &integerBulkFloatReduceContext{ - integerPointBulkAggregator: integerPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - floatPointEmitter: floatPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } case rc.CreateIntegerReducer != nil: fn = func(c baseReduceContext) reduceContext { @@ -1357,23 +865,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateIntegerBulkReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateIntegerBulkReducer() - return &integerBulkReduceContext{ - integerPointBulkAggregator: integerPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - integerPointEmitter: integerPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } case rc.CreateIntegerStringReducer != nil: fn = func(c baseReduceContext) reduceContext { @@ -1392,23 +883,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateIntegerBulkStringReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateIntegerBulkStringReducer() - return &integerBulkStringReduceContext{ - integerPointBulkAggregator: integerPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - stringPointEmitter: stringPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } case rc.CreateIntegerBooleanReducer != nil: fn = func(c baseReduceContext) reduceContext { @@ -1427,23 +901,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateIntegerBulkBooleanReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateIntegerBulkBooleanReducer() - return &integerBulkBooleanReduceContext{ - integerPointBulkAggregator: integerPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - booleanPointEmitter: booleanPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } default: err = fmt.Errorf("cannot apply %s to int64 field", method) @@ -1469,23 +926,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateStringBulkFloatReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateStringBulkFloatReducer() - return &stringBulkFloatReduceContext{ - stringPointBulkAggregator: stringPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - floatPointEmitter: floatPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } case rc.CreateStringIntegerReducer != nil: fn = func(c baseReduceContext) reduceContext { @@ -1504,23 +944,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateStringBulkIntegerReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateStringBulkIntegerReducer() - return &stringBulkIntegerReduceContext{ - stringPointBulkAggregator: stringPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - integerPointEmitter: integerPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } case rc.CreateStringReducer != nil: fn = func(c baseReduceContext) reduceContext { @@ -1539,23 +962,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateStringBulkReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateStringBulkReducer() - return &stringBulkReduceContext{ - stringPointBulkAggregator: stringPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - stringPointEmitter: stringPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } case rc.CreateStringBooleanReducer != nil: fn = func(c baseReduceContext) reduceContext { @@ -1574,23 +980,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateStringBulkBooleanReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateStringBulkBooleanReducer() - return &stringBulkBooleanReduceContext{ - stringPointBulkAggregator: stringPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - booleanPointEmitter: booleanPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } default: err = fmt.Errorf("cannot apply %s to string field", method) @@ -1616,23 +1005,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateBooleanBulkFloatReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateBooleanBulkFloatReducer() - return &booleanBulkFloatReduceContext{ - booleanPointBulkAggregator: booleanPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - floatPointEmitter: floatPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } case rc.CreateBooleanIntegerReducer != nil: fn = func(c baseReduceContext) reduceContext { @@ -1651,23 +1023,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateBooleanBulkIntegerReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateBooleanBulkIntegerReducer() - return &booleanBulkIntegerReduceContext{ - booleanPointBulkAggregator: booleanPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - integerPointEmitter: integerPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } case rc.CreateBooleanStringReducer != nil: fn = func(c baseReduceContext) reduceContext { @@ -1686,23 +1041,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateBooleanBulkStringReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateBooleanBulkStringReducer() - return &booleanBulkStringReduceContext{ - booleanPointBulkAggregator: booleanPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - stringPointEmitter: stringPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } case rc.CreateBooleanReducer != nil: fn = func(c baseReduceContext) reduceContext { @@ -1721,23 +1059,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.CreateBooleanBulkReducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.CreateBooleanBulkReducer() - return &booleanBulkReduceContext{ - booleanPointBulkAggregator: booleanPointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - booleanPointEmitter: booleanPointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } default: err = fmt.Errorf("cannot apply %s to bool field", method) diff --git a/influxql.gen.go.tmpl b/influxql.gen.go.tmpl index 9cbb996bd..fa3a653d0 100644 --- a/influxql.gen.go.tmpl +++ b/influxql.gen.go.tmpl @@ -8,151 +8,69 @@ import ( "github.com/influxdata/influxdb/influxql" "github.com/influxdata/kapacitor/models" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/pipeline" ) {{/* Define typed Aggregate/Emit types */}} {{range .}} -type {{.name}}PointAggregator struct { - field string - topBottomInfo *pipeline.TopBottomCallInfo - isSimpleSelector bool - aggregator influxql.{{.Name}}PointAggregator -} - -func {{.name}}PopulateAuxFieldsAndTags(ap *influxql.{{.Name}}Point, fieldsAndTags []string, fields models.Fields, tags models.Tags) { - ap.Aux = make([]interface{}, len(fieldsAndTags)) - for i, name := range fieldsAndTags { - if f, ok := fields[name]; ok { - ap.Aux[i] = f - } else { - ap.Aux[i] = tags[name] - } - } -} - -func (a *{{.name}}PointAggregator) AggregateBatch(b *models.Batch) error { - for _, p := range b.Points { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.({{.Type}}) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp {{.Type}}", a.field, value) - } - ap := &influxql.{{.Name}}Point{ - Name: b.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - {{.name}}PopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - ap.Aux = []interface{}{ p.Tags, p.Fields } - } - - a.aggregator.Aggregate{{.Name}}(ap) - } - return nil -} - -func (a *{{.name}}PointAggregator) AggregatePoint(p *models.Point) error { - value, ok := p.Fields[a.field] +func convert{{.Name}}Point( + name string, + p edge.FieldsTagsTimeGetter, + field string, + isSimpleSelector bool, + topBottomInfo *pipeline.TopBottomCallInfo, +) (*influxql.{{.Name}}Point, error) { + value, ok := p.Fields()[field] if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) + return nil, fmt.Errorf("field %s missing from point cannot aggregate", field) } typed, ok := value.({{.Type}}) if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp {{.Type}}", a.field, value) + return nil, fmt.Errorf("field %s has wrong type: got %T exp {{.Type}}", field, value) } ap := &influxql.{{.Name}}Point{ - Name: p.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), + Name: name, + Tags: influxql.NewTags(p.Tags()), + Time: p.Time().UnixNano(), Value: typed, } - if a.topBottomInfo != nil { + if topBottomInfo != nil { // We need to populate the Aux fields - {{.name}}PopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + {{.name}}PopulateAuxFieldsAndTags(ap, topBottomInfo.FieldsAndTags, p.Fields(), p.Tags()) } - if a.isSimpleSelector { - ap.Aux = []interface{}{ p.Tags, p.Fields } + if isSimpleSelector { + ap.Aux = []interface{}{ p.Tags(), p.Fields() } } - - a.aggregator.Aggregate{{.Name}}(ap) - return nil -} - + return ap, nil +} -type {{.name}}PointBulkAggregator struct { - field string +type {{.name}}PointAggregator struct { + field string topBottomInfo *pipeline.TopBottomCallInfo isSimpleSelector bool - aggregator pipeline.{{.Name}}BulkPointAggregator + aggregator influxql.{{.Name}}PointAggregator } -func (a *{{.name}}PointBulkAggregator) AggregateBatch(b *models.Batch) error { - slice := make([]influxql.{{.Name}}Point, len(b.Points)) - for i, p := range b.Points { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.({{.Type}}) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp {{.Type}}", a.field, value) - } - slice[i] = influxql.{{.Name}}Point{ - Name: b.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - {{.name}}PopulateAuxFieldsAndTags(&slice[i], a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - slice[i].Aux = []interface{}{ p.Tags, p.Fields } +func {{.name}}PopulateAuxFieldsAndTags(ap *influxql.{{.Name}}Point, fieldsAndTags []string, fields models.Fields, tags models.Tags) { + ap.Aux = make([]interface{}, len(fieldsAndTags)) + for i, name := range fieldsAndTags { + if f, ok := fields[name]; ok { + ap.Aux[i] = f + } else { + ap.Aux[i] = tags[name] } } - a.aggregator.Aggregate{{.Name}}Bulk(slice) - return nil } -func (a *{{.name}}PointBulkAggregator) AggregatePoint(p *models.Point) error { - value, ok := p.Fields[a.field] - if !ok { - return fmt.Errorf("field %s missing from point cannot aggregate", a.field) - } - typed, ok := value.({{.Type}}) - if !ok { - return fmt.Errorf("field %s has wrong type: got %T exp {{.Type}}", a.field, value) - } - ap := &influxql.{{.Name}}Point{ - Name: p.Name, - Tags: influxql.NewTags(p.Tags), - Time: p.Time.UnixNano(), - Value: typed, - } - if a.topBottomInfo != nil { - // We need to populate the Aux fields - {{.name}}PopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) - } - - if a.isSimpleSelector { - ap.Aux = []interface{}{ p.Tags, p.Fields } +func (a *{{.name}}PointAggregator) AggregatePoint(name string, p edge.FieldsTagsTimeGetter) error { + ap, err := convert{{.Name}}Point(name, p, a.field, a.isSimpleSelector, a.topBottomInfo) + if err != nil { + return nil } - a.aggregator.Aggregate{{.Name}}(ap) return nil } @@ -164,10 +82,10 @@ type {{.name}}PointEmitter struct { byName bool } -func (e *{{.name}}PointEmitter) EmitPoint() (models.Point, error) { +func (e *{{.name}}PointEmitter) EmitPoint() (edge.PointMessage, error) { slice := e.emitter.Emit() if len(slice) != 1 { - return models.Point{}, ErrEmptyEmit + return nil, nil } ap := slice[0] var t time.Time @@ -192,30 +110,29 @@ func (e *{{.name}}PointEmitter) EmitPoint() (models.Point, error) { delete(fields, e.field) } } else { - tags = e.tags + tags = e.groupInfo.Tags fields = map[string]interface{}{e.as: ap.Value} } - return models.Point{ - Name: e.name, - Time: t, - Group: e.group, - Dimensions: e.dimensions, - Tags: tags, - Fields: fields, - }, nil + return edge.NewPointMessage( + e.name, "", "", + e.groupInfo.Dimensions, + fields, + tags, + t, + ), nil } -func (e *{{.name}}PointEmitter) EmitBatch() models.Batch { +func (e *{{.name}}PointEmitter) EmitBatch() edge.BufferedBatchMessage { slice := e.emitter.Emit() - b := models.Batch{ - Name: e.name, - TMax: e.time, - Group: e.group, - ByName: e.dimensions.ByName, - Tags: e.tags, - Points: make([]models.BatchPoint, len(slice)), - } + begin := edge.NewBeginBatchMessage( + e.name, + e.groupInfo.Tags, + e.groupInfo.Dimensions.ByName, + e.time, + len(slice), + ) + points := make([]edge.BatchPointMessage, len(slice)) var t time.Time for i, ap := range slice { if e.pointTimes { @@ -230,26 +147,30 @@ func (e *{{.name}}PointEmitter) EmitBatch() models.Batch { var tags models.Tags if l := len(ap.Tags.KeyValues()); l > 0 { // Merge batch and point specific tags - tags = make(models.Tags, len(e.tags)+l) - for k, v := range e.tags { + tags = make(models.Tags, len(e.groupInfo.Tags)+l) + for k, v := range e.groupInfo.Tags { tags[k] = v } for k, v := range ap.Tags.KeyValues() { tags[k] = v } } else { - tags = e.tags - } - b.Points[i] = models.BatchPoint{ - Time: t, - Tags: tags, - Fields: map[string]interface{}{e.as: ap.Value}, - } - if t.After(b.TMax) { - b.TMax = t - } - } - return b + tags = e.groupInfo.Tags + } + points[i] = edge.NewBatchPointMessage( + models.Fields{e.as: ap.Value}, + tags, + t, + ) + if t.After(begin.Time()) { + begin.SetTime(t) + } + } + return edge.NewBufferedBatchMessage( + begin, + points, + edge.NewEndBatchMessage(), + ) } {{end}} @@ -265,11 +186,6 @@ type {{$a.name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext struct {{$e.name}}PointEmitter } -// {{$a.name}}Bulk{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext uses composition to implement the reduceContext interface -type {{$a.name}}Bulk{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext struct { - {{$a.name}}PointBulkAggregator - {{$e.name}}PointEmitter -} {{end}}{{end}} @@ -298,23 +214,6 @@ func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipelin }, } } - case rc.Create{{$a.Name}}Bulk{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}Reducer != nil: - fn = func(c baseReduceContext) reduceContext { - a, e := rc.Create{{$a.Name}}Bulk{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}Reducer() - return &{{$a.name}}Bulk{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext{ - {{$a.name}}PointBulkAggregator: {{$a.name}}PointBulkAggregator{ - field: c.field, - topBottomInfo: rc.TopBottomCallInfo, - isSimpleSelector: rc.IsSimpleSelector, - aggregator: a, - }, - {{$e.name}}PointEmitter: {{$e.name}}PointEmitter{ - baseReduceContext: c, - emitter: e, - isSimpleSelector: rc.IsSimpleSelector, - }, - } - } {{end}} default: err = fmt.Errorf("cannot apply %s to {{$a.Type}} field", method) diff --git a/influxql.go b/influxql.go index 412926185..eb766bcdb 100644 --- a/influxql.go +++ b/influxql.go @@ -4,10 +4,9 @@ import ( "fmt" "log" "reflect" - "sync" "time" - "github.com/influxdata/kapacitor/expvar" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" "github.com/pkg/errors" @@ -18,13 +17,13 @@ import ( type createReduceContextFunc func(c baseReduceContext) reduceContext -var ErrEmptyEmit = errors.New("error call to emit produced no results") - type InfluxQLNode struct { node n *pipeline.InfluxQLNode createFn createReduceContextFunc isStreamTransformation bool + + currentKind reflect.Kind } func newInfluxQLNode(et *ExecutingTask, n *pipeline.InfluxQLNode, l *log.Logger) (*InfluxQLNode, error) { @@ -33,243 +32,268 @@ func newInfluxQLNode(et *ExecutingTask, n *pipeline.InfluxQLNode, l *log.Logger) n: n, isStreamTransformation: n.ReduceCreater.IsStreamTransformation, } - m.node.runF = m.runInfluxQLs + m.node.runF = m.runInfluxQL return m, nil } -func (n *InfluxQLNode) runInfluxQLs([]byte) error { - switch n.n.Wants() { - case pipeline.StreamEdge: - return n.runStreamInfluxQL() - case pipeline.BatchEdge: - return n.runBatchInfluxQL() - default: - return fmt.Errorf("cannot map %v edge", n.n.Wants()) - } -} - type reduceContext interface { - AggregatePoint(p *models.Point) error - AggregateBatch(b *models.Batch) error - EmitPoint() (models.Point, error) - EmitBatch() models.Batch - Time() time.Time + AggregatePoint(name string, p edge.FieldsTagsTimeGetter) error + EmitPoint() (edge.PointMessage, error) + EmitBatch() edge.BufferedBatchMessage } type baseReduceContext struct { as string field string name string - group models.GroupID - dimensions models.Dimensions - tags models.Tags + groupInfo edge.GroupInfo time time.Time pointTimes bool topBottomInfo *pipeline.TopBottomCallInfo } -func (c *baseReduceContext) Time() time.Time { - return c.time +func (n *InfluxQLNode) runInfluxQL([]byte) error { + consumer := edge.NewGroupedConsumer( + n.ins[0], + n, + ) + n.statMap.Set(statCardinalityGauge, consumer.CardinalityVar()) + return consumer.Consume() +} + +func (n *InfluxQLNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) { + return edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n.newGroup(first)), + ), nil } -func (n *InfluxQLNode) runStreamInfluxQL() error { - var mu sync.RWMutex - contexts := make(map[models.GroupID]reduceContext) - valueF := func() int64 { - mu.RLock() - l := len(contexts) - mu.RUnlock() - return int64(l) +func (n *InfluxQLNode) newGroup(first edge.PointMeta) edge.ForwardReceiver { + bc := baseReduceContext{ + as: n.n.As, + field: n.n.Field, + name: first.Name(), + groupInfo: first.GroupInfo(), + time: first.Time(), + pointTimes: n.n.PointTimes || n.isStreamTransformation, } - n.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - - var kind reflect.Kind - for p, ok := n.ins[0].NextPoint(); ok; { - n.timer.Start() - mu.RLock() - context := contexts[p.Group] - mu.RUnlock() - // First point in window - if context == nil { - // Create new context - c := baseReduceContext{ - as: n.n.As, - field: n.n.Field, - name: p.Name, - group: p.Group, - dimensions: p.Dimensions, - tags: p.PointTags(), - time: p.Time, - pointTimes: n.n.PointTimes || n.isStreamTransformation, - } + g := influxqlGroup{ + n: n, + bc: bc, + } + if n.isStreamTransformation { + return &influxqlStreamingTransformGroup{ + influxqlGroup: g, + } + } + return &g +} - f, exists := p.Fields[c.field] - if !exists { - n.incrementErrorCount() - n.logger.Printf("E! field %s missing from point, skipping point", c.field) - p, ok = n.ins[0].NextPoint() - n.timer.Stop() - continue - } +type influxqlGroup struct { + n *InfluxQLNode - k := reflect.TypeOf(f).Kind() - kindChanged := k != kind - kind = k + bc baseReduceContext + rc reduceContext - createFn, err := n.getCreateFn(kindChanged, kind) - if err != nil { - return err - } + batchSize int + name string + begin edge.BeginBatchMessage +} - context = createFn(c) - mu.Lock() - contexts[p.Group] = context - mu.Unlock() +func (g *influxqlGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + g.begin = begin + g.batchSize = 0 + g.bc.time = begin.Time() + g.rc = nil + return nil, nil +} +func (g *influxqlGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + if g.rc == nil { + if err := g.realizeReduceContextFromFields(bp.Fields()); err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E!", err) + return nil, nil } - if n.isStreamTransformation { - err := context.AggregatePoint(&p) + } + g.batchSize++ + if err := g.rc.AggregatePoint(g.begin.Name(), bp); err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E! failed to aggregate point in batch:", err) + } + return nil, nil +} + +func (g *influxqlGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + if g.batchSize == 0 && !g.n.n.ReduceCreater.IsEmptyOK { + // Do not call Emit on the reducer since it can't handle empty batches. + return nil, nil + } + if g.rc == nil { + // Assume float64 type since we do not have any data. + if err := g.realizeReduceContext(reflect.Float64); err != nil { + return nil, err + } + } + m, err := g.n.emit(g.rc) + if err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E! failed to emit batch:", err) + return nil, nil + } + return m, nil +} + +func (g *influxqlGroup) Point(p edge.PointMessage) (edge.Message, error) { + if p.Time().Equal(g.bc.time) { + g.aggregatePoint(p) + } else { + // Time has elapsed, emit current context + var msg edge.Message + if g.rc != nil { + m, err := g.n.emit(g.rc) if err != nil { - n.incrementErrorCount() - n.logger.Println("E! failed to aggregate point:", err) + g.n.incrementErrorCount() + g.n.logger.Println("E! failed to emit stream:", err) } - p, ok = n.ins[0].NextPoint() + msg = m + } - err = n.emit(context) - if err != nil && err != ErrEmptyEmit { - n.incrementErrorCount() - n.logger.Println("E! failed to emit stream:", err) - } - } else { - if p.Time.Equal(context.Time()) { - err := context.AggregatePoint(&p) - if err != nil { - n.incrementErrorCount() - n.logger.Println("E! failed to aggregate point:", err) - } - // advance to next point - p, ok = n.ins[0].NextPoint() - } else { - err := n.emit(context) - if err != nil { - n.incrementErrorCount() - n.logger.Println("E! failed to emit stream:", err) - } - - // Nil out reduced point - mu.Lock() - contexts[p.Group] = nil - mu.Unlock() - // do not advance, - // go through loop again to initialize new iterator. - } + // Reset context + g.bc.name = p.Name() + g.bc.time = p.Time() + g.rc = nil + + // Aggregate the current point + g.aggregatePoint(p) + + return msg, nil + } + return nil, nil +} + +func (g *influxqlGroup) aggregatePoint(p edge.PointMessage) { + if g.rc == nil { + if err := g.realizeReduceContextFromFields(p.Fields()); err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E!", err) + return } - n.timer.Stop() } + err := g.rc.AggregatePoint(p.Name(), p) + if err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E! failed to aggregate point:", err) + } +} + +func (g *influxqlGroup) getFieldKind(fields models.Fields) (reflect.Kind, error) { + f, exists := fields[g.bc.field] + if !exists { + return reflect.Invalid, fmt.Errorf("field %q missing from point", g.bc.field) + } + + return reflect.TypeOf(f).Kind(), nil +} +func (g *influxqlGroup) realizeReduceContextFromFields(fields models.Fields) error { + k, err := g.getFieldKind(fields) + if err != nil { + return err + } + return g.realizeReduceContext(k) +} + +func (g *influxqlGroup) realizeReduceContext(kind reflect.Kind) error { + createFn, err := g.n.getCreateFn(kind) + if err != nil { + return err + } + g.rc = createFn(g.bc) return nil } -func (n *InfluxQLNode) runBatchInfluxQL() error { - var kind reflect.Kind - kindChanged := true - for b, ok := n.ins[0].NextBatch(); ok; b, ok = n.ins[0].NextBatch() { - n.timer.Start() - // Create new base context - c := baseReduceContext{ - as: n.n.As, - field: n.n.Field, - name: b.Name, - group: b.Group, - dimensions: b.PointDimensions(), - tags: b.Tags, - time: b.TMax, - pointTimes: n.n.PointTimes || n.isStreamTransformation, - } - if len(b.Points) == 0 { - if !n.n.ReduceCreater.IsEmptyOK { - // If the reduce does not handle empty batches continue - n.timer.Stop() - continue - } - if kind == reflect.Invalid { - // If we have no points and have never seen a point assume float64 - kind = reflect.Float64 - } - } else { - f, ok := b.Points[0].Fields[c.field] - if !ok { - n.incrementErrorCount() - n.logger.Printf("E! field %s missing from point, skipping batch", c.field) - n.timer.Stop() - continue - } - k := reflect.TypeOf(f).Kind() - kindChanged = k != kind - kind = k - } - createFn, err := n.getCreateFn(kindChanged, kind) - if err != nil { - return err +func (g *influxqlGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (g *influxqlGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil +} + +type influxqlStreamingTransformGroup struct { + influxqlGroup +} + +func (g *influxqlStreamingTransformGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + g.begin = begin.ShallowCopy() + g.begin.SetSizeHint(0) + g.bc.time = begin.Time() + g.rc = nil + return begin, nil +} + +func (g *influxqlStreamingTransformGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + if g.rc == nil { + if err := g.realizeReduceContextFromFields(bp.Fields()); err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E!", err) + return nil, nil } + } + if err := g.rc.AggregatePoint(g.begin.Name(), bp); err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E! failed to aggregate batch point:", err) + } + if ep, err := g.rc.EmitPoint(); err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E! failed to emit batch point:", err) + } else if ep != nil { + return edge.NewBatchPointMessage( + ep.Fields(), + ep.Tags(), + ep.Time(), + ), nil + } + return nil, nil +} - context := createFn(c) - if n.isStreamTransformation { - // We have a stream transformation, so treat the batch as if it were a stream - // Create a new batch for emitting - eb := b - eb.Points = make([]models.BatchPoint, 0, len(b.Points)) - for _, bp := range b.Points { - p := models.Point{ - Name: b.Name, - Time: bp.Time, - Fields: bp.Fields, - Tags: bp.Tags, - } - if err := context.AggregatePoint(&p); err != nil { - n.incrementErrorCount() - n.logger.Println("E! failed to aggregate batch point:", err) - } - if ep, err := context.EmitPoint(); err != nil && err != ErrEmptyEmit { - n.incrementErrorCount() - n.logger.Println("E! failed to emit batch point:", err) - } else if err != ErrEmptyEmit { - eb.Points = append(eb.Points, models.BatchPoint{ - Time: ep.Time, - Fields: ep.Fields, - Tags: ep.Tags, - }) - } - } - // Emit the complete batch - n.timer.Pause() - for _, out := range n.outs { - if err := out.CollectBatch(eb); err != nil { - n.incrementErrorCount() - n.logger.Println("E! failed to emit batch points:", err) - } - } - n.timer.Resume() - } else { - err := context.AggregateBatch(&b) - if err == nil { - if err := n.emit(context); err != nil { - n.incrementErrorCount() - n.logger.Println("E! failed to emit batch:", err) - } - } else { - n.incrementErrorCount() - n.logger.Println("E! failed to aggregate batch:", err) - } +func (g *influxqlStreamingTransformGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return end, nil +} + +func (g *influxqlStreamingTransformGroup) Point(p edge.PointMessage) (edge.Message, error) { + if g.rc == nil { + if err := g.realizeReduceContextFromFields(p.Fields()); err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E!", err) + // Skip point + return nil, nil } - n.timer.Stop() } - return nil + err := g.rc.AggregatePoint(p.Name(), p) + if err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E! failed to aggregate point:", err) + } + + m, err := g.n.emit(g.rc) + if err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E! failed to emit stream:", err) + return nil, nil + } + return m, nil +} + +func (g *influxqlStreamingTransformGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil } -func (n *InfluxQLNode) getCreateFn(changed bool, kind reflect.Kind) (createReduceContextFunc, error) { +func (n *InfluxQLNode) getCreateFn(kind reflect.Kind) (createReduceContextFunc, error) { + changed := n.currentKind != kind if !changed && n.createFn != nil { return n.createFn, nil } + n.currentKind = kind createFn, err := determineReduceContextCreateFn(n.n.Method, kind, n.n.ReduceCreater) if err != nil { return nil, errors.Wrapf(err, "invalid influxql func %s with field %s", n.n.Method, n.n.Field) @@ -278,31 +302,12 @@ func (n *InfluxQLNode) getCreateFn(changed bool, kind reflect.Kind) (createReduc return n.createFn, nil } -func (n *InfluxQLNode) emit(context reduceContext) error { +func (n *InfluxQLNode) emit(context reduceContext) (edge.Message, error) { switch n.Provides() { case pipeline.StreamEdge: - p, err := context.EmitPoint() - if err != nil { - return err - } - n.timer.Pause() - for _, out := range n.outs { - err := out.CollectPoint(p) - if err != nil { - return err - } - } - n.timer.Resume() + return context.EmitPoint() case pipeline.BatchEdge: - b := context.EmitBatch() - n.timer.Pause() - for _, out := range n.outs { - err := out.CollectBatch(b) - if err != nil { - return err - } - } - n.timer.Resume() + return context.EmitBatch(), nil } - return nil + return nil, nil } diff --git a/integrations/batcher_test.go b/integrations/batcher_test.go index 58837ad07..cd15d3e40 100644 --- a/integrations/batcher_test.go +++ b/integrations/batcher_test.go @@ -350,7 +350,6 @@ batch .period(10s) .every(10s) |difference('value') - |log() |httpOut('TestBatch_Difference') ` diff --git a/integrations/data/TestBatch_Combine.0.brpl b/integrations/data/TestBatch_Combine.0.brpl index 3811f1d5b..58ca0b9b8 100644 --- a/integrations/data/TestBatch_Combine.0.brpl +++ b/integrations/data/TestBatch_Combine.0.brpl @@ -1,18 +1,23 @@ -{"name":"request_latency","tags":{"dc":"A","service":"cart"},"points":[{"fields":{"value":0},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:00Z"},{"fields":{"value":0},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:05Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"auth"},"points":[{"fields":{"value":0},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:01Z"},{"fields":{"value":0},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:06Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"log"}, "points":[{"fields":{"value":0},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:02Z"},{"fields":{"value":0},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:07Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"cart"},"points":[{"fields":{"value":0},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:00Z"},{"fields":{"value":0},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:05Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"auth"},"points":[{"fields":{"value":0},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:01Z"},{"fields":{"value":0},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:06Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"log"}, "points":[{"fields":{"value":0},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:02Z"},{"fields":{"value":0},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:07Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"cart"},"points":[{"fields":{"value":8},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:10Z"},{"fields":{"value":3},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:15Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"auth"},"points":[{"fields":{"value":4},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:11Z"},{"fields":{"value":2},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:16Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"log"}, "points":[{"fields":{"value":7},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:12Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:17Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"cart"},"points":[{"fields":{"value":3},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:10Z"},{"fields":{"value":7},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:15Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"auth"},"points":[{"fields":{"value":9},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:11Z"},{"fields":{"value":6},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:16Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"log"}, "points":[{"fields":{"value":5},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:12Z"},{"fields":{"value":4},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:17Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"cart"},"points":[{"fields":{"value":0},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:20Z"},{"fields":{"value":0},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:25Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"auth"},"points":[{"fields":{"value":0},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:21Z"},{"fields":{"value":0},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:26Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"log"}, "points":[{"fields":{"value":0},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:22Z"},{"fields":{"value":0},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:27Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"cart"},"points":[{"fields":{"value":0},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:20Z"},{"fields":{"value":0},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:25Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"auth"},"points":[{"fields":{"value":0},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:21Z"},{"fields":{"value":0},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:26Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"log"}, "points":[{"fields":{"value":0},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:22Z"},{"fields":{"value":0},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:27Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:10Z","tags":{"dc":"A","service":"cart"},"points":[{"fields":{"value":1},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:00Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:05Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:10Z","tags":{"dc":"A","service":"auth"},"points":[{"fields":{"value":1},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:01Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:06Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:10Z","tags":{"dc":"A","service":"log"}, "points":[{"fields":{"value":1},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:02Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:07Z"}]} + +{"name":"request_latency","tmax":"2015-10-30T00:00:10Z","tags":{"dc":"B","service":"cart"},"points":[{"fields":{"value":1},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:00Z"},{"fields":{"value":1},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:05Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:10Z","tags":{"dc":"B","service":"auth"},"points":[{"fields":{"value":1},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:01Z"},{"fields":{"value":1},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:06Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:10Z","tags":{"dc":"B","service":"log"}, "points":[{"fields":{"value":1},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:02Z"},{"fields":{"value":1},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:07Z"}]} + +{"name":"request_latency","tmax":"2015-10-30T00:00:20Z","tags":{"dc":"A","service":"cart"},"points":[{"fields":{"value":8},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:10Z"},{"fields":{"value":3},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:15Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:20Z","tags":{"dc":"A","service":"auth"},"points":[{"fields":{"value":4},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:11Z"},{"fields":{"value":2},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:16Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:20Z","tags":{"dc":"A","service":"log"}, "points":[{"fields":{"value":7},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:12Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:17Z"}]} + +{"name":"request_latency","tmax":"2015-10-30T00:00:20Z","tags":{"dc":"B","service":"cart"},"points":[{"fields":{"value":3},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:10Z"},{"fields":{"value":7},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:15Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:20Z","tags":{"dc":"B","service":"auth"},"points":[{"fields":{"value":9},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:11Z"},{"fields":{"value":6},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:16Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:20Z","tags":{"dc":"B","service":"log"}, "points":[{"fields":{"value":5},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:12Z"},{"fields":{"value":4},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:17Z"}]} + +{"name":"request_latency","tmax":"2015-10-30T00:00:30Z","tags":{"dc":"A","service":"cart"},"points":[{"fields":{"value":1},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:20Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:25Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:30Z","tags":{"dc":"A","service":"auth"},"points":[{"fields":{"value":1},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:21Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:26Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:30Z","tags":{"dc":"A","service":"log"}, "points":[{"fields":{"value":1},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:22Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:27Z"}]} + +{"name":"request_latency","tmax":"2015-10-30T00:00:30Z","tags":{"dc":"B","service":"cart"},"points":[{"fields":{"value":1},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:20Z"},{"fields":{"value":1},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:25Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:30Z","tags":{"dc":"B","service":"auth"},"points":[{"fields":{"value":1},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:21Z"},{"fields":{"value":1},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:26Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:30Z","tags":{"dc":"B","service":"log"}, "points":[{"fields":{"value":1},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:22Z"},{"fields":{"value":1},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:27Z"}]} diff --git a/integrations/data/TestBatch_Flatten.0.brpl b/integrations/data/TestBatch_Flatten.0.brpl index 3811f1d5b..58ca0b9b8 100644 --- a/integrations/data/TestBatch_Flatten.0.brpl +++ b/integrations/data/TestBatch_Flatten.0.brpl @@ -1,18 +1,23 @@ -{"name":"request_latency","tags":{"dc":"A","service":"cart"},"points":[{"fields":{"value":0},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:00Z"},{"fields":{"value":0},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:05Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"auth"},"points":[{"fields":{"value":0},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:01Z"},{"fields":{"value":0},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:06Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"log"}, "points":[{"fields":{"value":0},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:02Z"},{"fields":{"value":0},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:07Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"cart"},"points":[{"fields":{"value":0},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:00Z"},{"fields":{"value":0},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:05Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"auth"},"points":[{"fields":{"value":0},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:01Z"},{"fields":{"value":0},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:06Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"log"}, "points":[{"fields":{"value":0},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:02Z"},{"fields":{"value":0},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:07Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"cart"},"points":[{"fields":{"value":8},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:10Z"},{"fields":{"value":3},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:15Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"auth"},"points":[{"fields":{"value":4},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:11Z"},{"fields":{"value":2},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:16Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"log"}, "points":[{"fields":{"value":7},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:12Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:17Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"cart"},"points":[{"fields":{"value":3},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:10Z"},{"fields":{"value":7},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:15Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"auth"},"points":[{"fields":{"value":9},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:11Z"},{"fields":{"value":6},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:16Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"log"}, "points":[{"fields":{"value":5},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:12Z"},{"fields":{"value":4},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:17Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"cart"},"points":[{"fields":{"value":0},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:20Z"},{"fields":{"value":0},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:25Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"auth"},"points":[{"fields":{"value":0},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:21Z"},{"fields":{"value":0},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:26Z"}]} -{"name":"request_latency","tags":{"dc":"A","service":"log"}, "points":[{"fields":{"value":0},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:22Z"},{"fields":{"value":0},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:27Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"cart"},"points":[{"fields":{"value":0},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:20Z"},{"fields":{"value":0},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:25Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"auth"},"points":[{"fields":{"value":0},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:21Z"},{"fields":{"value":0},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:26Z"}]} -{"name":"request_latency","tags":{"dc":"B","service":"log"}, "points":[{"fields":{"value":0},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:22Z"},{"fields":{"value":0},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:27Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:10Z","tags":{"dc":"A","service":"cart"},"points":[{"fields":{"value":1},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:00Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:05Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:10Z","tags":{"dc":"A","service":"auth"},"points":[{"fields":{"value":1},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:01Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:06Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:10Z","tags":{"dc":"A","service":"log"}, "points":[{"fields":{"value":1},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:02Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:07Z"}]} + +{"name":"request_latency","tmax":"2015-10-30T00:00:10Z","tags":{"dc":"B","service":"cart"},"points":[{"fields":{"value":1},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:00Z"},{"fields":{"value":1},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:05Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:10Z","tags":{"dc":"B","service":"auth"},"points":[{"fields":{"value":1},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:01Z"},{"fields":{"value":1},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:06Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:10Z","tags":{"dc":"B","service":"log"}, "points":[{"fields":{"value":1},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:02Z"},{"fields":{"value":1},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:07Z"}]} + +{"name":"request_latency","tmax":"2015-10-30T00:00:20Z","tags":{"dc":"A","service":"cart"},"points":[{"fields":{"value":8},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:10Z"},{"fields":{"value":3},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:15Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:20Z","tags":{"dc":"A","service":"auth"},"points":[{"fields":{"value":4},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:11Z"},{"fields":{"value":2},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:16Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:20Z","tags":{"dc":"A","service":"log"}, "points":[{"fields":{"value":7},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:12Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:17Z"}]} + +{"name":"request_latency","tmax":"2015-10-30T00:00:20Z","tags":{"dc":"B","service":"cart"},"points":[{"fields":{"value":3},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:10Z"},{"fields":{"value":7},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:15Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:20Z","tags":{"dc":"B","service":"auth"},"points":[{"fields":{"value":9},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:11Z"},{"fields":{"value":6},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:16Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:20Z","tags":{"dc":"B","service":"log"}, "points":[{"fields":{"value":5},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:12Z"},{"fields":{"value":4},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:17Z"}]} + +{"name":"request_latency","tmax":"2015-10-30T00:00:30Z","tags":{"dc":"A","service":"cart"},"points":[{"fields":{"value":1},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:20Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"cart"},"time":"2015-10-30T00:00:25Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:30Z","tags":{"dc":"A","service":"auth"},"points":[{"fields":{"value":1},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:21Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"auth"},"time":"2015-10-30T00:00:26Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:30Z","tags":{"dc":"A","service":"log"}, "points":[{"fields":{"value":1},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:22Z"},{"fields":{"value":1},"tags":{"dc":"A","service":"log"}, "time":"2015-10-30T00:00:27Z"}]} + +{"name":"request_latency","tmax":"2015-10-30T00:00:30Z","tags":{"dc":"B","service":"cart"},"points":[{"fields":{"value":1},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:20Z"},{"fields":{"value":1},"tags":{"dc":"B","service":"cart"},"time":"2015-10-30T00:00:25Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:30Z","tags":{"dc":"B","service":"auth"},"points":[{"fields":{"value":1},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:21Z"},{"fields":{"value":1},"tags":{"dc":"B","service":"auth"},"time":"2015-10-30T00:00:26Z"}]} +{"name":"request_latency","tmax":"2015-10-30T00:00:30Z","tags":{"dc":"B","service":"log"}, "points":[{"fields":{"value":1},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:22Z"},{"fields":{"value":1},"tags":{"dc":"B","service":"log"}, "time":"2015-10-30T00:00:27Z"}]} diff --git a/integrations/data/TestBatch_GroupByMeasurement.0.brpl b/integrations/data/TestBatch_GroupByMeasurement.0.brpl index 812dbea18..df92aa77f 100644 --- a/integrations/data/TestBatch_GroupByMeasurement.0.brpl +++ b/integrations/data/TestBatch_GroupByMeasurement.0.brpl @@ -1,18 +1,18 @@ -{"name":"cpu_usage_idle","group":"cpu_usage_idle,cpu=cpu-total","tags":{"cpu":"cpu-total"},"points":[{"fields":{"mean":90.38281469458698},"time":"2015-10-30T17:14:12Z"},{"fields":{"mean":86.51447101892941},"time":"2015-10-30T17:14:14Z"},{"fields":{"mean":91.71877558217454},"time":"2015-10-30T17:14:16Z"},{"fields":{"mean":87.10524436107617},"time":"2015-10-30T17:14:18Z"},{"fields":{"mean":90.3900735196668},"time":"2015-10-30T17:14:20Z"}]} -{"name":"cpu_usage_user","group":"cpu_usage_user,cpu=cpu0","tags":{"cpu":"cpu-total"},"points":[{"fields":{"mean":0.38281469458698},"time":"2015-10-30T17:14:12Z"},{"fields":{"mean":6.51447101892941},"time":"2015-10-30T17:14:14Z"},{"fields":{"mean":1.71877558217454},"time":"2015-10-30T17:14:16Z"},{"fields":{"mean":7.10524436107617},"time":"2015-10-30T17:14:18Z"},{"fields":{"mean":0.3900735196668},"time":"2015-10-30T17:14:20Z"}]} -{"name":"cpu_usage_idle","group":"cpu_usage_idle,cpu=cpu1","tags":{"cpu":"cpu0"},"points":[{"fields":{"mean":83.56930693069836},"time":"2015-10-30T17:14:12Z"},{"fields":{"mean":79.12871287128638},"time":"2015-10-30T17:14:14Z"},{"fields":{"mean":88.99559823928229},"time":"2015-10-30T17:14:16Z"},{"fields":{"mean":85.50000000000182},"time":"2015-10-30T17:14:18Z"},{"fields":{"mean":86.02860286029956},"time":"2015-10-30T17:14:20Z"}]} -{"name":"cpu_usage_user","group":"cpu_usage_user,cpu=cpu-total","tags":{"cpu":"cpu0"},"points":[{"fields":{"mean":3.56930693069836},"time":"2015-10-30T17:14:12Z"},{"fields":{"mean":9.12871287128638},"time":"2015-10-30T17:14:14Z"},{"fields":{"mean":8.99559823928229},"time":"2015-10-30T17:14:16Z"},{"fields":{"mean":5.50000000000182},"time":"2015-10-30T17:14:18Z"},{"fields":{"mean":6.02860286029956},"time":"2015-10-30T17:14:20Z"}]} -{"name":"cpu_usage_idle","group":"cpu_usage_idle,cpu=cpu0","tags":{"cpu":"cpu1"},"points":[{"fields":{"mean":93.49999999999409},"time":"2015-10-30T17:14:12Z"},{"fields":{"mean":91.44444444443974},"time":"2015-10-30T17:14:14Z"},{"fields":{"mean":93.44897959187637},"time":"2015-10-30T17:14:16Z"},{"fields":{"mean":95.99999999995998},"time":"2015-10-30T17:14:18Z"},{"fields":{"mean":97.00970097012197},"time":"2015-10-30T17:14:20Z"}]} -{"name":"cpu_usage_user","group":"cpu_usage_user,cpu=cpu1","tags":{"cpu":"cpu1"},"points":[{"fields":{"mean":3.49999999999409},"time":"2015-10-30T17:14:12Z"},{"fields":{"mean":1.44444444443974},"time":"2015-10-30T17:14:14Z"},{"fields":{"mean":3.44897959187637},"time":"2015-10-30T17:14:16Z"},{"fields":{"mean":5.99999999995998},"time":"2015-10-30T17:14:18Z"},{"fields":{"mean":7.00970097012197},"time":"2015-10-30T17:14:20Z"}]} -{"name":"cpu_usage_idle","group":"cpu_usage_idle,cpu=cpu-total","tags":{"cpu":"cpu-total"},"points":[{"fields":{"mean":90.8919959776013},"time":"2015-10-30T17:14:22Z"},{"fields":{"mean":86.54244306420236},"time":"2015-10-30T17:14:24Z"},{"fields":{"mean":91.01699558842134},"time":"2015-10-30T17:14:26Z"},{"fields":{"mean":85.66378399063848},"time":"2015-10-30T17:14:28Z"},{"fields":{"mean":89.90919811320221},"time":"2015-10-30T17:14:30Z"}]} -{"name":"cpu_usage_user","group":"cpu_usage_user,cpu=cpu0","tags":{"cpu":"cpu-total"},"points":[{"fields":{"mean":0.8919959776013},"time":"2015-10-30T17:14:22Z"},{"fields":{"mean":6.54244306420236},"time":"2015-10-30T17:14:24Z"},{"fields":{"mean":1.01699558842134},"time":"2015-10-30T17:14:26Z"},{"fields":{"mean":5.66378399063848},"time":"2015-10-30T17:14:28Z"},{"fields":{"mean":9.90919811320221},"time":"2015-10-30T17:14:30Z"}]} -{"name":"cpu_usage_idle","group":"cpu_usage_idle,cpu=cpu1","tags":{"cpu":"cpu0"},"points":[{"fields":{"mean":81.72501716191164},"time":"2015-10-30T17:14:22Z"},{"fields":{"mean":81.03810381037587},"time":"2015-10-30T17:14:24Z"},{"fields":{"mean":85.93434343435388},"time":"2015-10-30T17:14:26Z"},{"fields":{"mean":85.36734693878043},"time":"2015-10-30T17:14:28Z"},{"fields":{"mean":83.01320528210614},"time":"2015-10-30T17:14:30Z"}]} -{"name":"cpu_usage_user","group":"cpu_usage_user,cpu=cpu-total","tags":{"cpu":"cpu0"},"points":[{"fields":{"mean":1.72501716191164},"time":"2015-10-30T17:14:22Z"},{"fields":{"mean":1.03810381037587},"time":"2015-10-30T17:14:24Z"},{"fields":{"mean":5.93434343435388},"time":"2015-10-30T17:14:26Z"},{"fields":{"mean":5.36734693878043},"time":"2015-10-30T17:14:28Z"},{"fields":{"mean":3.01320528210614},"time":"2015-10-30T17:14:30Z"}]} -{"name":"cpu_usage_idle","group":"cpu_usage_idle,cpu=cpu0","tags":{"cpu":"cpu1"},"points":[{"fields":{"mean":95.98484848485191},"time":"2015-10-30T17:14:22Z"},{"fields":{"mean":92.098039215696},"time":"2015-10-30T17:14:24Z"},{"fields":{"mean":92.99999999998363},"time":"2015-10-30T17:14:26Z"},{"fields":{"mean":86.54015887023496},"time":"2015-10-30T17:14:28Z"},{"fields":{"mean":95.48979591840603},"time":"2015-10-30T17:14:30Z"}]} -{"name":"cpu_usage_user","group":"cpu_usage_user,cpu=cpu1","tags":{"cpu":"cpu1"},"points":[{"fields":{"mean":5.98484848485191},"time":"2015-10-30T17:14:22Z"},{"fields":{"mean":2.098039215696},"time":"2015-10-30T17:14:24Z"},{"fields":{"mean":2.99999999998363},"time":"2015-10-30T17:14:26Z"},{"fields":{"mean":6.54015887023496},"time":"2015-10-30T17:14:28Z"},{"fields":{"mean":5.48979591840603},"time":"2015-10-30T17:14:30Z"}]} -{"name":"cpu_usage_idle","group":"cpu_usage_idle,cpu=cpu-total","tags":{"cpu":"cpu-total"},"points":[{"fields":{"mean":91.06416290101595},"time":"2015-10-30T17:14:32Z"},{"fields":{"mean":85.9694442394385},"time":"2015-10-30T17:14:34Z"},{"fields":{"mean":90.62985736134186},"time":"2015-10-30T17:14:36Z"},{"fields":{"mean":86.45443196005628},"time":"2015-10-30T17:14:38Z"},{"fields":{"mean":88.97243107764031},"time":"2015-10-30T17:14:40Z"}]} -{"name":"cpu_usage_user","group":"cpu_usage_user,cpu=cpu0","tags":{"cpu":"cpu-total"},"points":[{"fields":{"mean":1.06416290101595},"time":"2015-10-30T17:14:32Z"},{"fields":{"mean":5.9694442394385},"time":"2015-10-30T17:14:34Z"},{"fields":{"mean":0.62985736134186},"time":"2015-10-30T17:14:36Z"},{"fields":{"mean":6.45443196005628},"time":"2015-10-30T17:14:38Z"},{"fields":{"mean":8.97243107764031},"time":"2015-10-30T17:14:40Z"}]} -{"name":"cpu_usage_idle","group":"cpu_usage_idle,cpu=cpu1","tags":{"cpu":"cpu0"},"points":[{"fields":{"mean":85.08910891088406},"time":"2015-10-30T17:14:32Z"},{"fields":{"mean":78.00000000002001},"time":"2015-10-30T17:14:34Z"},{"fields":{"mean":84.23607066586464},"time":"2015-10-30T17:14:36Z"},{"fields":{"mean":80.85858585861834},"time":"2015-10-30T17:14:38Z"},{"fields":{"mean":80.61224489791657},"time":"2015-10-30T17:14:40Z"}]} -{"name":"cpu_usage_user","group":"cpu_usage_user,cpu=cpu-total","tags":{"cpu":"cpu0"},"points":[{"fields":{"mean":5.08910891088406},"time":"2015-10-30T17:14:32Z"},{"fields":{"mean":8.00000000002001},"time":"2015-10-30T17:14:34Z"},{"fields":{"mean":4.23607066586464},"time":"2015-10-30T17:14:36Z"},{"fields":{"mean":0.85858585861834},"time":"2015-10-30T17:14:38Z"},{"fields":{"mean":0.61224489791657},"time":"2015-10-30T17:14:40Z"}]} -{"name":"cpu_usage_idle","group":"cpu_usage_idle,cpu=cpu0","tags":{"cpu":"cpu1"},"points":[{"fields":{"mean":96.49999999996908},"time":"2015-10-30T17:14:32Z"},{"fields":{"mean":93.46464646468584},"time":"2015-10-30T17:14:34Z"},{"fields":{"mean":95.00950095007724},"time":"2015-10-30T17:14:36Z"},{"fields":{"mean":92.99999999998636},"time":"2015-10-30T17:14:38Z"},{"fields":{"mean":90.99999999998545},"time":"2015-10-30T17:14:40Z"}]} -{"name":"cpu_usage_user","group":"cpu_usage_user,cpu=cpu1","tags":{"cpu":"cpu1"},"points":[{"fields":{"mean":6.49999999996908},"time":"2015-10-30T17:14:32Z"},{"fields":{"mean":3.46464646468584},"time":"2015-10-30T17:14:34Z"},{"fields":{"mean":5.00950095007724},"time":"2015-10-30T17:14:36Z"},{"fields":{"mean":2.99999999998636},"time":"2015-10-30T17:14:38Z"},{"fields":{"mean":0.99999999998545},"time":"2015-10-30T17:14:40Z"}]} +{"name":"cpu_usage_idle","byname":true,"group":"cpu_usage_idle,cpu=cpu-total","tags":{"cpu":"cpu-total"},"points":[{"fields":{"mean":90.38281469458698},"time":"2015-10-30T17:14:12Z"},{"fields":{"mean":86.51447101892941},"time":"2015-10-30T17:14:14Z"},{"fields":{"mean":91.71877558217454},"time":"2015-10-30T17:14:16Z"},{"fields":{"mean":87.10524436107617},"time":"2015-10-30T17:14:18Z"},{"fields":{"mean":90.3900735196668},"time":"2015-10-30T17:14:20Z"}]} +{"name":"cpu_usage_user","byname":true,"group":"cpu_usage_user,cpu=cpu0","tags":{"cpu":"cpu-total"},"points":[{"fields":{"mean":0.38281469458698},"time":"2015-10-30T17:14:12Z"},{"fields":{"mean":6.51447101892941},"time":"2015-10-30T17:14:14Z"},{"fields":{"mean":1.71877558217454},"time":"2015-10-30T17:14:16Z"},{"fields":{"mean":7.10524436107617},"time":"2015-10-30T17:14:18Z"},{"fields":{"mean":0.3900735196668},"time":"2015-10-30T17:14:20Z"}]} +{"name":"cpu_usage_idle","byname":true,"group":"cpu_usage_idle,cpu=cpu1","tags":{"cpu":"cpu0"},"points":[{"fields":{"mean":83.56930693069836},"time":"2015-10-30T17:14:12Z"},{"fields":{"mean":79.12871287128638},"time":"2015-10-30T17:14:14Z"},{"fields":{"mean":88.99559823928229},"time":"2015-10-30T17:14:16Z"},{"fields":{"mean":85.50000000000182},"time":"2015-10-30T17:14:18Z"},{"fields":{"mean":86.02860286029956},"time":"2015-10-30T17:14:20Z"}]} +{"name":"cpu_usage_user","byname":true,"group":"cpu_usage_user,cpu=cpu-total","tags":{"cpu":"cpu0"},"points":[{"fields":{"mean":3.56930693069836},"time":"2015-10-30T17:14:12Z"},{"fields":{"mean":9.12871287128638},"time":"2015-10-30T17:14:14Z"},{"fields":{"mean":8.99559823928229},"time":"2015-10-30T17:14:16Z"},{"fields":{"mean":5.50000000000182},"time":"2015-10-30T17:14:18Z"},{"fields":{"mean":6.02860286029956},"time":"2015-10-30T17:14:20Z"}]} +{"name":"cpu_usage_idle","byname":true,"group":"cpu_usage_idle,cpu=cpu0","tags":{"cpu":"cpu1"},"points":[{"fields":{"mean":93.49999999999409},"time":"2015-10-30T17:14:12Z"},{"fields":{"mean":91.44444444443974},"time":"2015-10-30T17:14:14Z"},{"fields":{"mean":93.44897959187637},"time":"2015-10-30T17:14:16Z"},{"fields":{"mean":95.99999999995998},"time":"2015-10-30T17:14:18Z"},{"fields":{"mean":97.00970097012197},"time":"2015-10-30T17:14:20Z"}]} +{"name":"cpu_usage_user","byname":true,"group":"cpu_usage_user,cpu=cpu1","tags":{"cpu":"cpu1"},"points":[{"fields":{"mean":3.49999999999409},"time":"2015-10-30T17:14:12Z"},{"fields":{"mean":1.44444444443974},"time":"2015-10-30T17:14:14Z"},{"fields":{"mean":3.44897959187637},"time":"2015-10-30T17:14:16Z"},{"fields":{"mean":5.99999999995998},"time":"2015-10-30T17:14:18Z"},{"fields":{"mean":7.00970097012197},"time":"2015-10-30T17:14:20Z"}]} +{"name":"cpu_usage_idle","byname":true,"group":"cpu_usage_idle,cpu=cpu-total","tags":{"cpu":"cpu-total"},"points":[{"fields":{"mean":90.8919959776013},"time":"2015-10-30T17:14:22Z"},{"fields":{"mean":86.54244306420236},"time":"2015-10-30T17:14:24Z"},{"fields":{"mean":91.01699558842134},"time":"2015-10-30T17:14:26Z"},{"fields":{"mean":85.66378399063848},"time":"2015-10-30T17:14:28Z"},{"fields":{"mean":89.90919811320221},"time":"2015-10-30T17:14:30Z"}]} +{"name":"cpu_usage_user","byname":true,"group":"cpu_usage_user,cpu=cpu0","tags":{"cpu":"cpu-total"},"points":[{"fields":{"mean":0.8919959776013},"time":"2015-10-30T17:14:22Z"},{"fields":{"mean":6.54244306420236},"time":"2015-10-30T17:14:24Z"},{"fields":{"mean":1.01699558842134},"time":"2015-10-30T17:14:26Z"},{"fields":{"mean":5.66378399063848},"time":"2015-10-30T17:14:28Z"},{"fields":{"mean":9.90919811320221},"time":"2015-10-30T17:14:30Z"}]} +{"name":"cpu_usage_idle","byname":true,"group":"cpu_usage_idle,cpu=cpu1","tags":{"cpu":"cpu0"},"points":[{"fields":{"mean":81.72501716191164},"time":"2015-10-30T17:14:22Z"},{"fields":{"mean":81.03810381037587},"time":"2015-10-30T17:14:24Z"},{"fields":{"mean":85.93434343435388},"time":"2015-10-30T17:14:26Z"},{"fields":{"mean":85.36734693878043},"time":"2015-10-30T17:14:28Z"},{"fields":{"mean":83.01320528210614},"time":"2015-10-30T17:14:30Z"}]} +{"name":"cpu_usage_user","byname":true,"group":"cpu_usage_user,cpu=cpu-total","tags":{"cpu":"cpu0"},"points":[{"fields":{"mean":1.72501716191164},"time":"2015-10-30T17:14:22Z"},{"fields":{"mean":1.03810381037587},"time":"2015-10-30T17:14:24Z"},{"fields":{"mean":5.93434343435388},"time":"2015-10-30T17:14:26Z"},{"fields":{"mean":5.36734693878043},"time":"2015-10-30T17:14:28Z"},{"fields":{"mean":3.01320528210614},"time":"2015-10-30T17:14:30Z"}]} +{"name":"cpu_usage_idle","byname":true,"group":"cpu_usage_idle,cpu=cpu0","tags":{"cpu":"cpu1"},"points":[{"fields":{"mean":95.98484848485191},"time":"2015-10-30T17:14:22Z"},{"fields":{"mean":92.098039215696},"time":"2015-10-30T17:14:24Z"},{"fields":{"mean":92.99999999998363},"time":"2015-10-30T17:14:26Z"},{"fields":{"mean":86.54015887023496},"time":"2015-10-30T17:14:28Z"},{"fields":{"mean":95.48979591840603},"time":"2015-10-30T17:14:30Z"}]} +{"name":"cpu_usage_user","byname":true,"group":"cpu_usage_user,cpu=cpu1","tags":{"cpu":"cpu1"},"points":[{"fields":{"mean":5.98484848485191},"time":"2015-10-30T17:14:22Z"},{"fields":{"mean":2.098039215696},"time":"2015-10-30T17:14:24Z"},{"fields":{"mean":2.99999999998363},"time":"2015-10-30T17:14:26Z"},{"fields":{"mean":6.54015887023496},"time":"2015-10-30T17:14:28Z"},{"fields":{"mean":5.48979591840603},"time":"2015-10-30T17:14:30Z"}]} +{"name":"cpu_usage_idle","byname":true,"group":"cpu_usage_idle,cpu=cpu-total","tags":{"cpu":"cpu-total"},"points":[{"fields":{"mean":91.06416290101595},"time":"2015-10-30T17:14:32Z"},{"fields":{"mean":85.9694442394385},"time":"2015-10-30T17:14:34Z"},{"fields":{"mean":90.62985736134186},"time":"2015-10-30T17:14:36Z"},{"fields":{"mean":86.45443196005628},"time":"2015-10-30T17:14:38Z"},{"fields":{"mean":88.97243107764031},"time":"2015-10-30T17:14:40Z"}]} +{"name":"cpu_usage_user","byname":true,"group":"cpu_usage_user,cpu=cpu0","tags":{"cpu":"cpu-total"},"points":[{"fields":{"mean":1.06416290101595},"time":"2015-10-30T17:14:32Z"},{"fields":{"mean":5.9694442394385},"time":"2015-10-30T17:14:34Z"},{"fields":{"mean":0.62985736134186},"time":"2015-10-30T17:14:36Z"},{"fields":{"mean":6.45443196005628},"time":"2015-10-30T17:14:38Z"},{"fields":{"mean":8.97243107764031},"time":"2015-10-30T17:14:40Z"}]} +{"name":"cpu_usage_idle","byname":true,"group":"cpu_usage_idle,cpu=cpu1","tags":{"cpu":"cpu0"},"points":[{"fields":{"mean":85.08910891088406},"time":"2015-10-30T17:14:32Z"},{"fields":{"mean":78.00000000002001},"time":"2015-10-30T17:14:34Z"},{"fields":{"mean":84.23607066586464},"time":"2015-10-30T17:14:36Z"},{"fields":{"mean":80.85858585861834},"time":"2015-10-30T17:14:38Z"},{"fields":{"mean":80.61224489791657},"time":"2015-10-30T17:14:40Z"}]} +{"name":"cpu_usage_user","byname":true,"group":"cpu_usage_user,cpu=cpu-total","tags":{"cpu":"cpu0"},"points":[{"fields":{"mean":5.08910891088406},"time":"2015-10-30T17:14:32Z"},{"fields":{"mean":8.00000000002001},"time":"2015-10-30T17:14:34Z"},{"fields":{"mean":4.23607066586464},"time":"2015-10-30T17:14:36Z"},{"fields":{"mean":0.85858585861834},"time":"2015-10-30T17:14:38Z"},{"fields":{"mean":0.61224489791657},"time":"2015-10-30T17:14:40Z"}]} +{"name":"cpu_usage_idle","byname":true,"group":"cpu_usage_idle,cpu=cpu0","tags":{"cpu":"cpu1"},"points":[{"fields":{"mean":96.49999999996908},"time":"2015-10-30T17:14:32Z"},{"fields":{"mean":93.46464646468584},"time":"2015-10-30T17:14:34Z"},{"fields":{"mean":95.00950095007724},"time":"2015-10-30T17:14:36Z"},{"fields":{"mean":92.99999999998636},"time":"2015-10-30T17:14:38Z"},{"fields":{"mean":90.99999999998545},"time":"2015-10-30T17:14:40Z"}]} +{"name":"cpu_usage_user","byname":true,"group":"cpu_usage_user,cpu=cpu1","tags":{"cpu":"cpu1"},"points":[{"fields":{"mean":6.49999999996908},"time":"2015-10-30T17:14:32Z"},{"fields":{"mean":3.46464646468584},"time":"2015-10-30T17:14:34Z"},{"fields":{"mean":5.00950095007724},"time":"2015-10-30T17:14:36Z"},{"fields":{"mean":2.99999999998636},"time":"2015-10-30T17:14:38Z"},{"fields":{"mean":0.99999999998545},"time":"2015-10-30T17:14:40Z"}]} diff --git a/integrations/data/TestStream_Sample.srpl b/integrations/data/TestStream_Sample.srpl new file mode 100644 index 000000000..712fde446 --- /dev/null +++ b/integrations/data/TestStream_Sample.srpl @@ -0,0 +1,33 @@ +dbname +rpname +packets value=1000 0000000000 +dbname +rpname +packets value=1001 0000000001 +dbname +rpname +packets value=1002 0000000002 +dbname +rpname +packets value=1003 0000000003 +dbname +rpname +packets value=1004 0000000004 +dbname +rpname +packets value=1005 0000000005 +dbname +rpname +packets value=1006 0000000006 +dbname +rpname +packets value=1007 0000000007 +dbname +rpname +packets value=1008 0000000008 +dbname +rpname +packets value=1009 0000000009 +dbname +rpname +packets value=1010 0000000010 diff --git a/integrations/data/TestStream_TopSelector.srpl b/integrations/data/TestStream_TopSelector.srpl index 622674b1c..3514e8c0e 100644 --- a/integrations/data/TestStream_TopSelector.srpl +++ b/integrations/data/TestStream_TopSelector.srpl @@ -1,618 +1,612 @@ dbname rpname -scores,game=g0,player=p0 value=434 1447114137 +scores,game=g0,player=p0 value=434 0000000000 dbname rpname -scores,game=g0,player=p1 value=967 1447114137 +scores,game=g0,player=p1 value=967 0000000000 dbname rpname -scores,game=g0,player=p2 value=534 1447114137 +scores,game=g0,player=p2 value=534 0000000000 dbname rpname -scores,game=g0,player=p3 value=669 1447114137 +scores,game=g0,player=p3 value=669 0000000000 dbname rpname -scores,game=g0,player=p4 value=173 1447114137 +scores,game=g0,player=p4 value=173 0000000000 dbname rpname -scores,game=g0,player=p5 value=88 1447114137 +scores,game=g0,player=p5 value=88 0000000000 dbname rpname -scores,game=g0,player=p6 value=848 1447114137 +scores,game=g0,player=p6 value=848 0000000000 dbname rpname -scores,game=g0,player=p7 value=340 1447114137 +scores,game=g0,player=p7 value=340 0000000000 dbname rpname -scores,game=g0,player=p8 value=775 1447114137 +scores,game=g0,player=p8 value=775 0000000000 dbname rpname -scores,game=g0,player=p9 value=817 1447114137 +scores,game=g0,player=p9 value=817 0000000000 dbname rpname -scores,game=g0,player=p10 value=542 1447114137 +scores,game=g0,player=p10 value=542 0000000000 dbname rpname -scores,game=g0,player=p11 value=598 1447114137 +scores,game=g0,player=p11 value=598 0000000000 dbname rpname -scores,game=g0,player=p12 value=300 1447114137 +scores,game=g0,player=p12 value=300 0000000000 dbname rpname -scores,game=g0,player=p13 value=750 1447114137 +scores,game=g0,player=p13 value=750 0000000000 dbname rpname -scores,game=g0,player=p14 value=875 1447114137 +scores,game=g0,player=p14 value=875 0000000000 dbname rpname -scores,game=g0,player=p15 value=519 1447114137 +scores,game=g0,player=p15 value=519 0000000000 dbname rpname -scores,game=g0,player=p16 value=432 1447114137 +scores,game=g0,player=p16 value=432 0000000000 dbname rpname -scores,game=g0,player=p17 value=157 1447114137 +scores,game=g0,player=p17 value=157 0000000000 dbname rpname -scores,game=g0,player=p18 value=128 1447114137 +scores,game=g0,player=p18 value=128 0000000000 dbname rpname -scores,game=g0,player=p19 value=70 1447114137 +scores,game=g0,player=p19 value=70 0000000000 dbname rpname -scores,game=g1,player=p0 value=50 1447114137 +scores,game=g1,player=p0 value=50 0000000000 dbname rpname -scores,game=g1,player=p1 value=671 1447114137 +scores,game=g1,player=p1 value=671 0000000000 dbname rpname -scores,game=g1,player=p2 value=328 1447114137 +scores,game=g1,player=p2 value=328 0000000000 dbname rpname -scores,game=g1,player=p3 value=836 1447114137 +scores,game=g1,player=p3 value=836 0000000000 dbname rpname -scores,game=g1,player=p4 value=866 1447114137 +scores,game=g1,player=p4 value=866 0000000000 dbname rpname -scores,game=g1,player=p5 value=884 1447114137 +scores,game=g1,player=p5 value=884 0000000000 dbname rpname -scores,game=g1,player=p6 value=989 1447114137 +scores,game=g1,player=p6 value=989 0000000000 dbname rpname -scores,game=g1,player=p7 value=42 1447114137 +scores,game=g1,player=p7 value=42 0000000000 dbname rpname -scores,game=g1,player=p8 value=919 1447114137 +scores,game=g1,player=p8 value=919 0000000000 dbname rpname -scores,game=g1,player=p9 value=561 1447114137 +scores,game=g1,player=p9 value=561 0000000000 dbname rpname -scores,game=g1,player=p10 value=959 1447114137 +scores,game=g1,player=p10 value=959 0000000000 dbname rpname -scores,game=g1,player=p11 value=220 1447114137 +scores,game=g1,player=p11 value=220 0000000000 dbname rpname -scores,game=g1,player=p12 value=91 1447114137 +scores,game=g1,player=p12 value=91 0000000000 dbname rpname -scores,game=g1,player=p13 value=123 1447114137 +scores,game=g1,player=p13 value=123 0000000000 dbname rpname -scores,game=g1,player=p14 value=737 1447114137 +scores,game=g1,player=p14 value=737 0000000000 dbname rpname -scores,game=g1,player=p15 value=909 1447114137 +scores,game=g1,player=p15 value=909 0000000000 dbname rpname -scores,game=g1,player=p16 value=786 1447114137 +scores,game=g1,player=p16 value=786 0000000000 dbname rpname -scores,game=g1,player=p17 value=471 1447114137 +scores,game=g1,player=p17 value=471 0000000000 dbname rpname -scores,game=g1,player=p18 value=152 1447114137 +scores,game=g1,player=p18 value=152 0000000000 dbname rpname -scores,game=g1,player=p19 value=254 1447114137 +scores,game=g1,player=p19 value=254 0000000000 dbname rpname -scores,game=g0,player=p0 value=347 1447114138 +scores,game=g0,player=p0 value=347 0000000001 dbname rpname -scores,game=g0,player=p1 value=623 1447114138 +scores,game=g0,player=p1 value=623 0000000001 dbname rpname -scores,game=g0,player=p2 value=872 1447114138 +scores,game=g0,player=p2 value=872 0000000001 dbname rpname -scores,game=g0,player=p3 value=798 1447114138 +scores,game=g0,player=p3 value=798 0000000001 dbname rpname -scores,game=g0,player=p4 value=840 1447114138 +scores,game=g0,player=p4 value=840 0000000001 dbname rpname -scores,game=g0,player=p5 value=584 1447114138 +scores,game=g0,player=p5 value=584 0000000001 dbname rpname -scores,game=g0,player=p6 value=843 1447114138 +scores,game=g0,player=p6 value=843 0000000001 dbname rpname -scores,game=g0,player=p7 value=16 1447114138 +scores,game=g0,player=p7 value=16 0000000001 dbname rpname -scores,game=g0,player=p8 value=269 1447114138 +scores,game=g0,player=p8 value=269 0000000001 dbname rpname -scores,game=g0,player=p9 value=538 1447114138 +scores,game=g0,player=p9 value=538 0000000001 dbname rpname -scores,game=g0,player=p10 value=224 1447114138 +scores,game=g0,player=p10 value=224 0000000001 dbname rpname -scores,game=g0,player=p11 value=931 1447114138 +scores,game=g0,player=p11 value=931 0000000001 dbname rpname -scores,game=g0,player=p12 value=196 1447114138 +scores,game=g0,player=p12 value=196 0000000001 dbname rpname -scores,game=g0,player=p13 value=894 1447114138 +scores,game=g0,player=p13 value=894 0000000001 dbname rpname -scores,game=g0,player=p14 value=159 1447114138 +scores,game=g0,player=p14 value=159 0000000001 dbname rpname -scores,game=g0,player=p15 value=69 1447114138 +scores,game=g0,player=p15 value=69 0000000001 dbname rpname -scores,game=g0,player=p16 value=789 1447114138 +scores,game=g0,player=p16 value=789 0000000001 dbname rpname -scores,game=g0,player=p17 value=518 1447114138 +scores,game=g0,player=p17 value=518 0000000001 dbname rpname -scores,game=g0,player=p18 value=73 1447114138 +scores,game=g0,player=p18 value=73 0000000001 dbname rpname -scores,game=g0,player=p19 value=116 1447114138 +scores,game=g0,player=p19 value=116 0000000001 dbname rpname -scores,game=g1,player=p0 value=965 1447114138 +scores,game=g1,player=p0 value=965 0000000001 dbname rpname -scores,game=g1,player=p1 value=553 1447114138 +scores,game=g1,player=p1 value=553 0000000001 dbname rpname -scores,game=g1,player=p2 value=599 1447114138 +scores,game=g1,player=p2 value=599 0000000001 dbname rpname -scores,game=g1,player=p3 value=594 1447114138 +scores,game=g1,player=p3 value=594 0000000001 dbname rpname -scores,game=g1,player=p4 value=213 1447114138 +scores,game=g1,player=p4 value=213 0000000001 dbname rpname -scores,game=g1,player=p5 value=392 1447114138 +scores,game=g1,player=p5 value=392 0000000001 dbname rpname -scores,game=g1,player=p6 value=466 1447114138 +scores,game=g1,player=p6 value=466 0000000001 dbname rpname -scores,game=g1,player=p7 value=63 1447114138 +scores,game=g1,player=p7 value=63 0000000001 dbname rpname -scores,game=g1,player=p8 value=953 1447114138 +scores,game=g1,player=p8 value=953 0000000001 dbname rpname -scores,game=g1,player=p9 value=660 1447114138 +scores,game=g1,player=p9 value=660 0000000001 dbname rpname -scores,game=g1,player=p10 value=466 1447114138 +scores,game=g1,player=p10 value=466 0000000001 dbname rpname -scores,game=g1,player=p11 value=261 1447114138 +scores,game=g1,player=p11 value=261 0000000001 dbname rpname -scores,game=g1,player=p12 value=833 1447114138 +scores,game=g1,player=p12 value=833 0000000001 dbname rpname -scores,game=g1,player=p13 value=734 1447114138 +scores,game=g1,player=p13 value=734 0000000001 dbname rpname -scores,game=g1,player=p14 value=73 1447114138 +scores,game=g1,player=p14 value=73 0000000001 dbname rpname -scores,game=g1,player=p15 value=277 1447114138 +scores,game=g1,player=p15 value=277 0000000001 dbname rpname -scores,game=g1,player=p16 value=188 1447114138 +scores,game=g1,player=p16 value=188 0000000001 dbname rpname -scores,game=g1,player=p17 value=463 1447114138 +scores,game=g1,player=p17 value=463 0000000001 dbname rpname -scores,game=g1,player=p18 value=813 1447114138 +scores,game=g1,player=p18 value=813 0000000001 dbname rpname -scores,game=g1,player=p19 value=370 1447114138 +scores,game=g1,player=p19 value=370 0000000001 dbname rpname -scores,game=g0,player=p0 value=209 1447114139 +scores,game=g0,player=p0 value=209 0000000002 dbname rpname -scores,game=g0,player=p1 value=104 1447114139 +scores,game=g0,player=p1 value=104 0000000002 dbname rpname -scores,game=g0,player=p2 value=905 1447114139 +scores,game=g0,player=p2 value=905 0000000002 dbname rpname -scores,game=g0,player=p3 value=881 1447114139 +scores,game=g0,player=p3 value=881 0000000002 dbname rpname -scores,game=g0,player=p4 value=306 1447114139 +scores,game=g0,player=p4 value=306 0000000002 dbname rpname -scores,game=g0,player=p5 value=907 1447114139 +scores,game=g0,player=p5 value=907 0000000002 dbname rpname -scores,game=g0,player=p6 value=817 1447114139 +scores,game=g0,player=p6 value=817 0000000002 dbname rpname -scores,game=g0,player=p7 value=124 1447114139 +scores,game=g0,player=p7 value=124 0000000002 dbname rpname -scores,game=g0,player=p8 value=285 1447114139 +scores,game=g0,player=p8 value=285 0000000002 dbname rpname -scores,game=g0,player=p9 value=467 1447114139 +scores,game=g0,player=p9 value=467 0000000002 dbname rpname -scores,game=g0,player=p10 value=499 1447114139 +scores,game=g0,player=p10 value=499 0000000002 dbname rpname -scores,game=g0,player=p11 value=855 1447114139 +scores,game=g0,player=p11 value=855 0000000002 dbname rpname -scores,game=g0,player=p12 value=616 1447114139 +scores,game=g0,player=p12 value=616 0000000002 dbname rpname -scores,game=g0,player=p13 value=391 1447114139 +scores,game=g0,player=p13 value=391 0000000002 dbname rpname -scores,game=g0,player=p14 value=218 1447114139 +scores,game=g0,player=p14 value=218 0000000002 dbname rpname -scores,game=g0,player=p15 value=66 1447114139 +scores,game=g0,player=p15 value=66 0000000002 dbname rpname -scores,game=g0,player=p16 value=644 1447114139 +scores,game=g0,player=p16 value=644 0000000002 dbname rpname -scores,game=g0,player=p17 value=336 1447114139 +scores,game=g0,player=p17 value=336 0000000002 dbname rpname -scores,game=g0,player=p18 value=938 1447114139 +scores,game=g0,player=p18 value=938 0000000002 dbname rpname -scores,game=g0,player=p19 value=972 1447114139 +scores,game=g0,player=p19 value=972 0000000002 dbname rpname -scores,game=g1,player=p0 value=487 1447114139 +scores,game=g1,player=p0 value=487 0000000002 dbname rpname -scores,game=g1,player=p1 value=265 1447114139 +scores,game=g1,player=p1 value=265 0000000002 dbname rpname -scores,game=g1,player=p2 value=602 1447114139 +scores,game=g1,player=p2 value=602 0000000002 dbname rpname -scores,game=g1,player=p3 value=511 1447114139 +scores,game=g1,player=p3 value=511 0000000002 dbname rpname -scores,game=g1,player=p4 value=848 1447114139 +scores,game=g1,player=p4 value=848 0000000002 dbname rpname -scores,game=g1,player=p5 value=734 1447114139 +scores,game=g1,player=p5 value=734 0000000002 dbname rpname -scores,game=g1,player=p6 value=857 1447114139 +scores,game=g1,player=p6 value=857 0000000002 dbname rpname -scores,game=g1,player=p7 value=140 1447114139 +scores,game=g1,player=p7 value=140 0000000002 dbname rpname -scores,game=g1,player=p8 value=247 1447114139 +scores,game=g1,player=p8 value=247 0000000002 dbname rpname -scores,game=g1,player=p9 value=981 1447114139 +scores,game=g1,player=p9 value=981 0000000002 dbname rpname -scores,game=g1,player=p10 value=640 1447114139 +scores,game=g1,player=p10 value=640 0000000002 dbname rpname -scores,game=g1,player=p11 value=485 1447114139 +scores,game=g1,player=p11 value=485 0000000002 dbname rpname -scores,game=g1,player=p12 value=681 1447114139 +scores,game=g1,player=p12 value=681 0000000002 dbname rpname -scores,game=g1,player=p13 value=109 1447114139 +scores,game=g1,player=p13 value=109 0000000002 dbname rpname -scores,game=g1,player=p14 value=55 1447114139 +scores,game=g1,player=p14 value=55 0000000002 dbname rpname -scores,game=g1,player=p15 value=184 1447114139 +scores,game=g1,player=p15 value=184 0000000002 dbname rpname -scores,game=g1,player=p16 value=659 1447114139 +scores,game=g1,player=p16 value=659 0000000002 dbname rpname -scores,game=g1,player=p17 value=784 1447114139 +scores,game=g1,player=p17 value=784 0000000002 dbname rpname -scores,game=g1,player=p18 value=452 1447114139 +scores,game=g1,player=p18 value=452 0000000002 dbname rpname -scores,game=g1,player=p19 value=986 1447114139 +scores,game=g1,player=p19 value=986 0000000002 dbname rpname -scores,game=g0,player=p0 value=248 1447114140 +scores,game=g0,player=p0 value=248 0000000003 dbname rpname -scores,game=g0,player=p1 value=539 1447114140 +scores,game=g0,player=p1 value=539 0000000003 dbname rpname -scores,game=g0,player=p2 value=347 1447114140 +scores,game=g0,player=p2 value=347 0000000003 dbname rpname -scores,game=g0,player=p3 value=114 1447114140 +scores,game=g0,player=p3 value=114 0000000003 dbname rpname -scores,game=g0,player=p4 value=328 1447114140 +scores,game=g0,player=p4 value=328 0000000003 dbname rpname -scores,game=g0,player=p5 value=877 1447114140 +scores,game=g0,player=p5 value=877 0000000003 dbname rpname -top_scores_gap,game=g0 gap=145,topFirst=918,topLast=773 1447114070 +scores,game=g0,player=p6 value=708 0000000003 dbname rpname -scores,game=g0,player=p6 value=708 1447114140 +scores,game=g0,player=p7 value=978 0000000003 dbname rpname -scores,game=g0,player=p7 value=978 1447114140 +scores,game=g0,player=p8 value=454 0000000003 dbname rpname -scores,game=g0,player=p8 value=454 1447114140 +scores,game=g0,player=p9 value=878 0000000003 dbname rpname -scores,game=g0,player=p9 value=878 1447114140 +scores,game=g0,player=p10 value=957 0000000003 dbname rpname -scores,game=g0,player=p10 value=957 1447114140 +scores,game=g0,player=p11 value=356 0000000003 dbname rpname -scores,game=g0,player=p11 value=356 1447114140 +scores,game=g0,player=p12 value=539 0000000003 dbname rpname -scores,game=g0,player=p12 value=539 1447114140 +scores,game=g0,player=p13 value=623 0000000003 dbname rpname -scores,game=g0,player=p13 value=623 1447114140 +scores,game=g0,player=p14 value=627 0000000003 dbname rpname -scores,game=g0,player=p14 value=627 1447114140 +scores,game=g0,player=p15 value=791 0000000003 dbname rpname -scores,game=g0,player=p15 value=791 1447114140 +scores,game=g0,player=p16 value=247 0000000003 dbname rpname -scores,game=g0,player=p16 value=247 1447114140 +scores,game=g0,player=p17 value=780 0000000003 dbname rpname -scores,game=g0,player=p17 value=780 1447114140 +scores,game=g0,player=p18 value=138 0000000003 dbname rpname -scores,game=g0,player=p18 value=138 1447114140 +scores,game=g0,player=p19 value=491 0000000003 dbname rpname -scores,game=g0,player=p19 value=491 1447114140 +scores,game=g1,player=p0 value=879 0000000003 dbname rpname -scores,game=g1,player=p0 value=879 1447114140 +scores,game=g1,player=p1 value=617 0000000003 dbname rpname -scores,game=g1,player=p1 value=617 1447114140 +scores,game=g1,player=p2 value=265 0000000003 dbname rpname -scores,game=g1,player=p2 value=265 1447114140 +scores,game=g1,player=p3 value=788 0000000003 dbname rpname -scores,game=g1,player=p3 value=788 1447114140 +scores,game=g1,player=p4 value=685 0000000003 dbname rpname -scores,game=g1,player=p4 value=685 1447114140 +scores,game=g1,player=p5 value=383 0000000003 dbname rpname -scores,game=g1,player=p5 value=383 1447114140 +scores,game=g1,player=p6 value=180 0000000003 dbname rpname -scores,game=g1,player=p6 value=180 1447114140 +scores,game=g1,player=p7 value=770 0000000003 dbname rpname -scores,game=g1,player=p7 value=770 1447114140 +scores,game=g1,player=p8 value=344 0000000003 dbname rpname -scores,game=g1,player=p8 value=344 1447114140 +scores,game=g1,player=p9 value=809 0000000003 dbname rpname -scores,game=g1,player=p9 value=809 1447114140 +scores,game=g1,player=p10 value=97 0000000003 dbname rpname -scores,game=g1,player=p10 value=97 1447114140 +scores,game=g1,player=p11 value=534 0000000003 dbname rpname -scores,game=g1,player=p11 value=534 1447114140 +scores,game=g1,player=p12 value=887 0000000003 dbname rpname -scores,game=g1,player=p12 value=887 1447114140 +scores,game=g1,player=p13 value=272 0000000003 dbname rpname -scores,game=g1,player=p13 value=272 1447114140 +scores,game=g1,player=p14 value=277 0000000003 dbname rpname -scores,game=g1,player=p14 value=277 1447114140 +scores,game=g1,player=p15 value=872 0000000003 dbname rpname -scores,game=g1,player=p15 value=872 1447114140 +scores,game=g1,player=p16 value=863 0000000003 dbname rpname -scores,game=g1,player=p16 value=863 1447114140 +scores,game=g1,player=p17 value=311 0000000003 dbname rpname -scores,game=g1,player=p17 value=311 1447114140 +scores,game=g1,player=p18 value=749 0000000003 dbname rpname -scores,game=g1,player=p18 value=749 1447114140 +scores,game=g1,player=p19 value=926 0000000003 dbname rpname -scores,game=g1,player=p19 value=926 1447114140 +scores,game=g0,player=p0 value=971 0000000004 dbname rpname -top_scores_gap,game=g1 gap=142,topFirst=998,topLast=856 1447114070 +scores,game=g0,player=p1 value=244 0000000004 dbname rpname -scores,game=g0,player=p0 value=971 1447114141 +scores,game=g0,player=p2 value=599 0000000004 dbname rpname -scores,game=g0,player=p1 value=244 1447114141 +scores,game=g0,player=p3 value=172 0000000004 dbname rpname -scores,game=g0,player=p2 value=599 1447114141 +scores,game=g0,player=p4 value=985 0000000004 dbname rpname -scores,game=g0,player=p3 value=172 1447114141 +scores,game=g0,player=p5 value=494 0000000004 dbname rpname -scores,game=g0,player=p4 value=985 1447114141 +scores,game=g0,player=p6 value=897 0000000004 dbname rpname -scores,game=g0,player=p5 value=494 1447114141 +scores,game=g0,player=p7 value=338 0000000004 dbname rpname -scores,game=g0,player=p6 value=897 1447114141 +scores,game=g0,player=p8 value=90 0000000004 dbname rpname -scores,game=g0,player=p7 value=338 1447114141 +scores,game=g0,player=p9 value=765 0000000004 dbname rpname -scores,game=g0,player=p8 value=90 1447114141 +scores,game=g0,player=p10 value=665 0000000004 dbname rpname -scores,game=g0,player=p9 value=765 1447114141 +scores,game=g0,player=p11 value=215 0000000004 dbname rpname -scores,game=g0,player=p10 value=665 1447114141 +scores,game=g0,player=p12 value=929 0000000004 dbname rpname -scores,game=g0,player=p11 value=215 1447114141 +scores,game=g0,player=p13 value=186 0000000004 dbname rpname -scores,game=g0,player=p12 value=929 1447114141 +scores,game=g0,player=p14 value=631 0000000004 dbname rpname -scores,game=g0,player=p13 value=186 1447114141 +scores,game=g0,player=p15 value=489 0000000004 dbname rpname -scores,game=g0,player=p14 value=631 1447114141 +scores,game=g0,player=p16 value=97 0000000004 dbname rpname -scores,game=g0,player=p15 value=489 1447114141 +scores,game=g0,player=p17 value=725 0000000004 dbname rpname -scores,game=g0,player=p16 value=97 1447114141 +scores,game=g0,player=p18 value=432 0000000004 dbname rpname -scores,game=g0,player=p17 value=725 1447114141 +scores,game=g0,player=p19 value=806 0000000004 dbname rpname -scores,game=g0,player=p18 value=432 1447114141 +scores,game=g1,player=p0 value=431 0000000004 dbname rpname -scores,game=g0,player=p19 value=806 1447114141 +scores,game=g1,player=p1 value=473 0000000004 dbname rpname -scores,game=g1,player=p0 value=431 1447114141 +scores,game=g1,player=p2 value=495 0000000004 dbname rpname -scores,game=g1,player=p1 value=473 1447114141 +scores,game=g1,player=p3 value=853 0000000004 dbname rpname -scores,game=g1,player=p2 value=495 1447114141 +scores,game=g1,player=p4 value=558 0000000004 dbname rpname -scores,game=g1,player=p3 value=853 1447114141 +scores,game=g1,player=p5 value=345 0000000004 dbname rpname -scores,game=g1,player=p4 value=558 1447114141 +scores,game=g1,player=p6 value=988 0000000004 dbname rpname -scores,game=g1,player=p5 value=345 1447114141 +scores,game=g1,player=p7 value=726 0000000004 dbname rpname -scores,game=g1,player=p6 value=988 1447114141 +scores,game=g1,player=p8 value=639 0000000004 dbname rpname -scores,game=g1,player=p7 value=726 1447114141 +scores,game=g1,player=p9 value=266 0000000004 dbname rpname -scores,game=g1,player=p8 value=639 1447114141 +scores,game=g1,player=p10 value=613 0000000004 dbname rpname -scores,game=g1,player=p9 value=266 1447114141 +scores,game=g1,player=p11 value=382 0000000004 dbname rpname -scores,game=g1,player=p10 value=613 1447114141 +scores,game=g1,player=p12 value=596 0000000004 dbname rpname -scores,game=g1,player=p11 value=382 1447114141 +scores,game=g1,player=p13 value=138 0000000004 dbname rpname -scores,game=g1,player=p12 value=596 1447114141 +scores,game=g1,player=p14 value=480 0000000004 dbname rpname -scores,game=g1,player=p13 value=138 1447114141 +scores,game=g1,player=p15 value=53 0000000004 dbname rpname -scores,game=g1,player=p14 value=480 1447114141 +scores,game=g1,player=p16 value=748 0000000004 dbname rpname -scores,game=g1,player=p15 value=53 1447114141 +scores,game=g1,player=p17 value=434 0000000004 dbname rpname -scores,game=g1,player=p16 value=748 1447114141 +scores,game=g1,player=p18 value=739 0000000004 dbname rpname -scores,game=g1,player=p17 value=434 1447114141 +scores,game=g1,player=p19 value=861 0000000004 dbname rpname -scores,game=g1,player=p18 value=739 1447114141 +scores,game=g0,player=p16 value=748 0000000005 dbname rpname -scores,game=g1,player=p19 value=861 1447114141 +scores,game=g1,player=p16 value=748 0000000005 dbname rpname -scores,game=g0,player=p16 value=748 1447114142 +scores,game=g0,player=p17 value=434 0000000006 dbname rpname -scores,game=g1,player=p16 value=748 1447114142 -dbname -rpname -scores,game=g0,player=p17 value=434 1447114143 -dbname -rpname -scores,game=g1,player=p17 value=434 1447114143 +scores,game=g1,player=p17 value=434 0000000006 diff --git a/integrations/streamer_test.go b/integrations/streamer_test.go index 17525bba5..20d33a6e6 100644 --- a/integrations/streamer_test.go +++ b/integrations/streamer_test.go @@ -4201,6 +4201,8 @@ building .as('building', 'floor') .on('building') .streamName('power_floor_percentage') + |log() + .prefix('JOINED') |eval(lambda: "floor.value" / "building.value") .as('value') |httpOut('TestStream_JoinOn_AcrossMeasurement') @@ -5083,22 +5085,24 @@ stream t.Fatal(err) } - for _, tc := range testCases { - t.Log("Method:", tc.Method) - var script bytes.Buffer - if tc.Args == "" { - tc.Args = "'value'" - } - tmpl.Execute(&script, tc) - testStreamerWithOutput( - t, - "TestStream_InfluxQL_Float", - script.String(), - 13*time.Second, - tc.ER, - false, - nil, - ) + for i, tc := range testCases { + t.Run(fmt.Sprintf("%s-%d", tc.Method, i), func(t *testing.T) { + t.Log("Method:", tc.Method) + var script bytes.Buffer + if tc.Args == "" { + tc.Args = "'value'" + } + tmpl.Execute(&script, tc) + testStreamerWithOutput( + t, + "TestStream_InfluxQL_Float", + script.String(), + 13*time.Second, + tc.ER, + false, + nil, + ) + }) } } @@ -9518,7 +9522,7 @@ stream func TestStream_TopSelector(t *testing.T) { var script = ` -var topScores = stream +stream |from() .measurement('scores') // Get the most recent score for each player @@ -9531,14 +9535,7 @@ var topScores = stream // Calculate the top 5 scores per game |groupBy('game') |top(5, 'last', 'player') - -topScores - |httpOut('top_scores') - -topScores - |sample(4s) - |count('top') - |httpOut('top_scores_sampled') + |httpOut('TestStream_TopSelector') ` tw := time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC) @@ -9571,78 +9568,73 @@ topScores }, } - sampleER := models.Result{ + testStreamerWithOutput(t, "TestStream_TopSelector", script, 10*time.Second, er, false, nil) +} + +func TestStream_Sample_Count(t *testing.T) { + var script = ` +stream + |from() + .measurement('packets') + |sample(2) + |window() + .every(4s) + .period(4s) + .align() + |httpOut('TestStream_Sample') +` + + er := models.Result{ Series: models.Rows{ { - Name: "scores", - Tags: map[string]string{"game": "g0"}, - Columns: []string{"time", "count"}, - Values: [][]interface{}{{ - time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC), - 5.0, - }}, - }, - { - Name: "scores", - Tags: map[string]string{"game": "g1"}, - Columns: []string{"time", "count"}, - Values: [][]interface{}{{ - time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC), - 5.0, - }}, + Name: "packets", + Columns: []string{"time", "value"}, + Values: [][]interface{}{ + { + time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC), + 1004.0, + }, + { + time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC), + 1006.0, + }, + }, }, }, } - clock, et, replayErr, tm := testStreamer(t, "TestStream_TopSelector", script, nil) - defer tm.Close() - - err := fastForwardTask(clock, et, replayErr, tm, 10*time.Second) - if err != nil { - t.Error(err) - } - - // Get the result - output, err := et.GetOutput("top_scores") - if err != nil { - t.Fatal(err) - } - - resp, err := http.Get(output.Endpoint()) - if err != nil { - t.Fatal(err) - } - - // Assert we got the expected result - result := models.Result{} - err = json.NewDecoder(resp.Body).Decode(&result) - if err != nil { - t.Fatal(err) - } - if eq, msg := compareResults(er, result); !eq { - t.Error(msg) - } + testStreamerWithOutput(t, "TestStream_Sample", script, 12*time.Second, er, false, nil) +} - // Get the result - output, err = et.GetOutput("top_scores_sampled") - if err != nil { - t.Fatal(err) - } +func TestStream_Sample_Time(t *testing.T) { + var script = ` +stream + |from() + .measurement('packets') + |sample(3s) + |window() + .every(4s) + .period(4s) + .align() + |httpOut('TestStream_Sample') +` - resp, err = http.Get(output.Endpoint()) - if err != nil { - t.Fatal(err) + er := models.Result{ + Series: models.Rows{ + { + Name: "packets", + Columns: []string{"time", "value"}, + Values: [][]interface{}{ + { + time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC), + 1006.0, + }, + }, + }, + }, } - // Assert we got the expected result - result = models.Result{} - err = json.NewDecoder(resp.Body).Decode(&result) - if err != nil { - t.Fatal(err) - } - if eq, msg := compareResults(sampleER, result); !eq { - t.Error(msg) - } + testStreamerWithOutput(t, "TestStream_Sample", script, 12*time.Second, er, false, nil) } func TestStream_DerivativeCardinality(t *testing.T) { @@ -9877,7 +9869,7 @@ stream }, "max3": map[string]interface{}{ "emitted": int64(0), - "working_cardinality": int64(0), + "working_cardinality": int64(9), "avg_exec_time_ns": int64(0), "errors": int64(0), "collected": int64(81), diff --git a/join.go b/join.go index 552b5c68c..d18cdbaa2 100644 --- a/join.go +++ b/join.go @@ -7,29 +7,29 @@ import ( "time" "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" - "github.com/influxdata/kapacitor/timer" + "github.com/pkg/errors" ) type JoinNode struct { node - j *pipeline.JoinNode - fill influxql.FillOption - fillValue interface{} - groups map[models.GroupID]*group - mu sync.RWMutex - runningGroups sync.WaitGroup + j *pipeline.JoinNode + fill influxql.FillOption + fillValue interface{} + + groupsMu sync.RWMutex + groups map[models.GroupID]*joinGroup + + // Represents the lower bound of times per group per source + lowMarks map[srcGroup]time.Time // Buffer for caching points that need to be matched with specific points. matchGroupsBuffer map[models.GroupID][]srcPoint // Buffer for caching specific points until their match arrivces. specificGroupsBuffer map[models.GroupID][]srcPoint - // Represents the lower bound of times per group per parent - lowMarks map[srcGroup]time.Time - - groupsMu sync.RWMutex reported map[int]bool allReported bool @@ -40,6 +40,7 @@ func newJoinNode(et *ExecutingTask, n *pipeline.JoinNode, l *log.Logger) (*JoinN jn := &JoinNode{ j: n, node: node{Node: n, et: et, logger: l}, + groups: make(map[models.GroupID]*joinGroup), matchGroupsBuffer: make(map[models.GroupID][]srcPoint), specificGroupsBuffer: make(map[models.GroupID][]srcPoint), lowMarks: make(map[srcGroup]time.Time), @@ -66,72 +67,61 @@ func newJoinNode(et *ExecutingTask, n *pipeline.JoinNode, l *log.Logger) (*JoinN return jn, nil } -func (j *JoinNode) runJoin([]byte) error { - j.groups = make(map[models.GroupID]*group) +func (n *JoinNode) runJoin([]byte) error { + consumer := edge.NewMultiConsumerWithStats(n.ins, n) valueF := func() int64 { - j.groupsMu.RLock() - l := len(j.groups) - j.groupsMu.RUnlock() + n.groupsMu.RLock() + l := len(n.groups) + n.groupsMu.RUnlock() return int64(l) } - j.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - - groupErrs := make(chan error, 1) - done := make(chan struct{}, len(j.ins)) - - for i := range j.ins { - // Start gorouting per parent so we do not deadlock. - // This way independent of the order that parents receive data - // we can handle it. - t := j.et.tm.TimingService.NewTimer(j.statMap.Get(statAverageExecTime).(timer.Setter)) - go func(i int, t timer.Timer) { - defer func() { - done <- struct{}{} - }() - in := j.ins[i] - for p, ok := in.Next(); ok; p, ok = in.Next() { - t.Start() - srcP := srcPoint{src: i, p: p} - if len(j.j.Dimensions) > 0 { - // Match points with their group based on join dimensions. - j.matchPoints(srcP, groupErrs) - } else { - // Just send point on to group, we are not joining on specific dimensions. - func() { - j.mu.Lock() - defer j.mu.Unlock() - group := j.getGroup(p, groupErrs) - // Send current point - group.points <- srcP - }() - } - t.Stop() - } - }(i, t) - } - for range j.ins { - select { - case <-done: - case err := <-groupErrs: + n.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) + + return consumer.Consume() +} + +func (n *JoinNode) BufferedBatch(src int, batch edge.BufferedBatchMessage) error { + return n.doMessage(src, batch) +} + +func (n *JoinNode) Point(src int, p edge.PointMessage) error { + return n.doMessage(src, p) +} + +func (n *JoinNode) Barrier(src int, b edge.BarrierMessage) error { + return edge.Forward(n.outs, b) +} + +func (n *JoinNode) Finish() error { + // No more points are coming signal all groups to finish up. + for _, group := range n.groups { + if err := group.Finish(); err != nil { return err } } - // No more points are coming signal all groups to finish up. - j.groupsMu.RLock() - for _, group := range j.groups { - close(group.points) - } - j.groupsMu.RUnlock() + return nil +} - j.runningGroups.Wait() - j.groupsMu.RLock() - for _, group := range j.groups { - err := group.emitAll() - if err != nil { - return err - } +type messageMeta interface { + edge.Message + edge.PointMeta +} +type srcPoint struct { + Src int + Msg messageMeta +} + +func (n *JoinNode) doMessage(src int, m messageMeta) error { + n.timer.Start() + defer n.timer.Stop() + if len(n.j.Dimensions) > 0 { + // Match points with their group based on join dimensions. + n.matchPoints(srcPoint{Src: src, Msg: m}) + } else { + // Just send point on to group, we are not joining on specific dimensions. + group := n.getOrCreateGroup(m.GroupID()) + group.Collect(src, m) } - j.groupsMu.RUnlock() return nil } @@ -139,62 +129,60 @@ func (j *JoinNode) runJoin([]byte) error { // with the less specific points as they arrive. // // Where 'more specific' means, that a point has more dimensions than the join.on dimensions. -func (j *JoinNode) matchPoints(p srcPoint, groupErrs chan<- error) { +func (n *JoinNode) matchPoints(p srcPoint) { // Specific points may be sent to the joinset without a matching point, but not the other way around. // This is because the specific points have the needed specific tag data. // The joinset will later handle the fill inner/outer join operations. - j.mu.Lock() - defer j.mu.Unlock() - if !j.allReported { - j.reported[p.src] = true - j.allReported = len(j.reported) == len(j.ins) + if !n.allReported { + n.reported[p.Src] = true + n.allReported = len(n.reported) == len(n.ins) } - t := p.p.PointTime().Round(j.j.Tolerance) + t := p.Msg.Time().Round(n.j.Tolerance) groupId := models.ToGroupID( - p.p.PointName(), - p.p.PointTags(), + p.Msg.Name(), + p.Msg.GroupInfo().Tags, models.Dimensions{ - ByName: p.p.PointDimensions().ByName, - TagNames: j.j.Dimensions, + ByName: p.Msg.Dimensions().ByName, + TagNames: n.j.Dimensions, }, ) // Update current srcGroup lowMark - srcG := srcGroup{src: p.src, groupId: groupId} - j.lowMarks[srcG] = t + srcG := srcGroup{src: p.Src, groupId: groupId} + n.lowMarks[srcG] = t // Determine lowMark, the oldest time per parent per group. var lowMark time.Time - if j.allReported { - for s := 0; s < len(j.ins); s++ { + if n.allReported { + for s := 0; s < len(n.ins); s++ { sg := srcGroup{src: s, groupId: groupId} - if lm := j.lowMarks[sg]; lowMark.IsZero() || lm.Before(lowMark) { + if lm := n.lowMarks[sg]; lowMark.IsZero() || lm.Before(lowMark) { lowMark = lm } } } // Check for cached specific points that can now be sent alone. - if j.allReported { + if n.allReported { // Send all cached specific point that won't match anymore. var i int - buf := j.specificGroupsBuffer[groupId] + buf := n.specificGroupsBuffer[groupId] l := len(buf) for i = 0; i < l; i++ { - st := buf[i].p.PointTime().Round(j.j.Tolerance) + st := buf[i].Msg.Time().Round(n.j.Tolerance) if st.Before(lowMark) { // Send point by itself since it won't get a match. - j.sendSpecificPoint(buf[i], groupErrs) + n.sendSpecificPoint(buf[i]) } else { break } } // Remove all sent points. - j.specificGroupsBuffer[groupId] = buf[i:] + n.specificGroupsBuffer[groupId] = buf[i:] } - if len(p.p.PointDimensions().TagNames) > len(j.j.Dimensions) { + if len(p.Msg.Dimensions().TagNames) > len(n.j.Dimensions) { // We have a specific point and three options: // 1. Find the cached match point and send both to group. // 2. Cache the specific point for later. @@ -202,156 +190,127 @@ func (j *JoinNode) matchPoints(p srcPoint, groupErrs chan<- error) { // Search for a match. // Also purge any old match points. - matches := j.matchGroupsBuffer[groupId] + matches := n.matchGroupsBuffer[groupId] matched := false var i int l := len(matches) for i = 0; i < l; i++ { match := matches[i] - pt := match.p.PointTime().Round(j.j.Tolerance) + pt := match.Msg.Time().Round(n.j.Tolerance) if pt.Equal(t) { // Option 1, send both points - j.sendMatchPoint(p, match, groupErrs) + n.sendMatchPoint(p, match) matched = true } if !pt.Before(lowMark) { break } } - if j.allReported { + if n.allReported { // Can't trust lowMark until all parents have reported. // Remove any unneeded match points. - j.matchGroupsBuffer[groupId] = matches[i:] + n.matchGroupsBuffer[groupId] = matches[i:] } // If the point didn't match that leaves us with options 2 and 3. if !matched { - if j.allReported && t.Before(lowMark) { + if n.allReported && t.Before(lowMark) { // Option 3 // Send this specific point by itself since it won't get a match. - j.sendSpecificPoint(p, groupErrs) + n.sendSpecificPoint(p) } else { // Option 2 // Cache this point for when its match arrives. - j.specificGroupsBuffer[groupId] = append(j.specificGroupsBuffer[groupId], p) + n.specificGroupsBuffer[groupId] = append(n.specificGroupsBuffer[groupId], p) } } } else { // Cache match point. - j.matchGroupsBuffer[groupId] = append(j.matchGroupsBuffer[groupId], p) + n.matchGroupsBuffer[groupId] = append(n.matchGroupsBuffer[groupId], p) // Send all specific points that match, to the group. var i int - buf := j.specificGroupsBuffer[groupId] + buf := n.specificGroupsBuffer[groupId] l := len(buf) for i = 0; i < l; i++ { - st := buf[i].p.PointTime().Round(j.j.Tolerance) + st := buf[i].Msg.Time().Round(n.j.Tolerance) if st.Equal(t) { - j.sendMatchPoint(buf[i], p, groupErrs) + n.sendMatchPoint(buf[i], p) } else { break } } // Remove all sent points - j.specificGroupsBuffer[groupId] = buf[i:] + n.specificGroupsBuffer[groupId] = buf[i:] } } // Add the specific tags from the specific point to the matched point // and then send both on to the group. -func (j *JoinNode) sendMatchPoint(specific, matched srcPoint, groupErrs chan<- error) { - np := matched.p.Copy().Setter() - for key, value := range specific.p.PointTags() { - np.SetNewDimTag(key, value) - } - np.UpdateGroup() - group := j.getGroup(specific.p, groupErrs) - // Send current point - group.points <- specific - // Send new matched point - matched.p = np.Interface() - group.points <- matched +func (n *JoinNode) sendMatchPoint(specific, matched srcPoint) { + var newMatched messageMeta + switch msg := matched.Msg.(type) { + case edge.BufferedBatchMessage: + b := msg.ShallowCopy() + b.SetBegin(b.Begin().ShallowCopy()) + b.Begin().SetTags(specific.Msg.GroupInfo().Tags) + newMatched = b + case edge.PointMessage: + p := msg.ShallowCopy() + info := specific.Msg.GroupInfo() + p.SetTagsAndDimensions(info.Tags, info.Dimensions) + newMatched = p + } + group := n.getOrCreateGroup(specific.Msg.GroupID()) + // Collect specific point + group.Collect(specific.Src, specific.Msg) + // Collect new matched point + group.Collect(matched.Src, newMatched) } // Send only the specific point to the group -func (j *JoinNode) sendSpecificPoint(specific srcPoint, groupErrs chan<- error) { - group := j.getGroup(specific.p, groupErrs) - // Send current point - group.points <- specific +func (n *JoinNode) sendSpecificPoint(specific srcPoint) { + group := n.getOrCreateGroup(specific.Msg.GroupID()) + group.Collect(specific.Src, specific.Msg) } // safely get the group for the point or create one if it doesn't exist. -func (j *JoinNode) getGroup(p models.PointInterface, groupErrs chan<- error) *group { - j.groupsMu.RLock() - group := j.groups[p.PointGroup()] - j.groupsMu.RUnlock() +func (n *JoinNode) getOrCreateGroup(groupID models.GroupID) *joinGroup { + group := n.groups[groupID] if group == nil { - group = newGroup(len(j.ins), j) - j.groupsMu.Lock() - j.groups[p.PointGroup()] = group - j.runningGroups.Add(1) - j.groupsMu.Unlock() - go func() { - err := group.run() - if err != nil { - j.incrementErrorCount() - j.logger.Println("E! join group error:", err) - select { - case groupErrs <- err: - default: - } - } - }() + group = n.newGroup(len(n.ins)) + n.groupsMu.Lock() + n.groups[groupID] = group + n.groupsMu.Unlock() } return group } -// A groupId and its parent -type srcGroup struct { - src int - groupId models.GroupID -} - -// represents an incoming data point and which parent it came from -type srcPoint struct { - src int - p models.PointInterface +func (n *JoinNode) newGroup(count int) *joinGroup { + return &joinGroup{ + n: n, + sets: make(map[time.Time][]*joinset), + head: make([]time.Time, count), + } } // handles emitting joined sets once enough data has arrived from parents. -type group struct { +type joinGroup struct { + n *JoinNode + sets map[time.Time][]*joinset head []time.Time oldestTime time.Time - j *JoinNode - points chan srcPoint } -func newGroup(i int, j *JoinNode) *group { - return &group{ - sets: make(map[time.Time][]*joinset), - head: make([]time.Time, i), - j: j, - points: make(chan srcPoint), - } +func (g *joinGroup) Finish() error { + return g.emitAll() } -// start consuming incoming points -func (g *group) run() error { - defer g.j.runningGroups.Done() - for sp := range g.points { - err := g.collect(sp.src, sp.p) - if err != nil { - return err - } - } - return nil -} - -// collect a point from a given parent. +// Collect a point from a given parent. // emit the oldest set if we have collected enough data. -func (g *group) collect(i int, p models.PointInterface) error { - t := p.PointTime().Round(g.j.j.Tolerance) +func (g *joinGroup) Collect(src int, p timeMessage) error { + t := p.Time().Round(g.n.j.Tolerance) if t.Before(g.oldestTime) || g.oldestTime.IsZero() { g.oldestTime = t } @@ -359,45 +318,25 @@ func (g *group) collect(i int, p models.PointInterface) error { var set *joinset sets := g.sets[t] if len(sets) == 0 { - set = newJoinset( - g.j, - g.j.j.StreamName, - g.j.fill, - g.j.fillValue, - g.j.j.Names, - g.j.j.Delimiter, - g.j.j.Tolerance, - t, - g.j.logger, - ) + set = g.newJoinset(t) sets = append(sets, set) g.sets[t] = sets } - for j := 0; j < len(sets); j++ { - if !sets[j].Has(i) { - set = sets[j] + for i := 0; i < len(sets); i++ { + if !sets[i].Has(src) { + set = sets[i] break } } if set == nil { - set = newJoinset( - g.j, - g.j.j.StreamName, - g.j.fill, - g.j.fillValue, - g.j.j.Names, - g.j.j.Delimiter, - g.j.j.Tolerance, - t, - g.j.logger, - ) + set = g.newJoinset(t) sets = append(sets, set) g.sets[t] = sets } - set.Set(i, p) + set.Set(src, p) // Update head - g.head[i] = t + g.head[src] = t onlyReadySets := false for _, t := range g.head { @@ -413,8 +352,22 @@ func (g *group) collect(i int, p models.PointInterface) error { return nil } +func (g *joinGroup) newJoinset(t time.Time) *joinset { + return newJoinset( + g.n, + g.n.j.StreamName, + g.n.fill, + g.n.fillValue, + g.n.j.Names, + g.n.j.Delimiter, + g.n.j.Tolerance, + t, + g.n.logger, + ) +} + // emit a set and update the oldestTime. -func (g *group) emit(onlyReadySets bool) error { +func (g *joinGroup) emit(onlyReadySets bool) error { sets := g.sets[g.oldestTime] i := 0 for ; i < len(sets); i++ { @@ -443,7 +396,7 @@ func (g *group) emit(onlyReadySets bool) error { } // emit sets until we have none left. -func (g *group) emitAll() error { +func (g *joinGroup) emitAll() error { var lastErr error for len(g.sets) > 0 { err := g.emit(false) @@ -455,35 +408,41 @@ func (g *group) emitAll() error { } // emit a single joined set -func (g *group) emitJoinedSet(set *joinset) error { +func (g *joinGroup) emitJoinedSet(set *joinset) error { if set.name == "" { - set.name = set.First().PointName() + set.name = set.First().(edge.NameGetter).Name() } - switch g.j.Wants() { + switch g.n.Wants() { case pipeline.StreamEdge: - p, ok := set.JoinIntoPoint() - if ok { - for _, out := range g.j.outs { - err := out.CollectPoint(p) - if err != nil { - return err - } + p, err := set.JoinIntoPoint() + if err != nil { + return errors.Wrap(err, "failed to join into point") + } + if p != nil { + if err := edge.Forward(g.n.outs, p); err != nil { + return err } } case pipeline.BatchEdge: - b, ok := set.JoinIntoBatch() - if ok { - for _, out := range g.j.outs { - err := out.CollectBatch(b) - if err != nil { - return err - } + b, err := set.JoinIntoBatch() + if err != nil { + return errors.Wrap(err, "failed to join into batch") + } + if b != nil { + if err := edge.Forward(g.n.outs, b); err != nil { + return err } } } return nil } +// A groupId and its parent +type srcGroup struct { + src int + groupId models.GroupID +} + // represents a set of points or batches from the same joined time type joinset struct { j *JoinNode @@ -495,7 +454,7 @@ type joinset struct { time time.Time tolerance time.Duration - values []models.PointInterface + values []edge.Message expected int size int @@ -526,7 +485,7 @@ func newJoinset( prefixes: prefixes, delimiter: delimiter, expected: expected, - values: make([]models.PointInterface, expected), + values: make([]edge.Message, expected), first: expected, time: time, tolerance: tolerance, @@ -543,7 +502,7 @@ func (js *joinset) Has(i int) bool { } // add a point to the set from a given parent index. -func (js *joinset) Set(i int, v models.PointInterface) { +func (js *joinset) Set(i int, v edge.Message) { if i < js.first { js.first = i } @@ -552,55 +511,67 @@ func (js *joinset) Set(i int, v models.PointInterface) { } // a valid point in the set -func (js *joinset) First() models.PointInterface { +func (js *joinset) First() edge.Message { return js.values[js.first] } // join all points into a single point -func (js *joinset) JoinIntoPoint() (models.Point, bool) { - fields := make(models.Fields, js.size*len(js.First().PointFields())) - for i, p := range js.values { - if p == nil { +func (js *joinset) JoinIntoPoint() (edge.PointMessage, error) { + first, ok := js.First().(edge.PointMessage) + if !ok { + return nil, fmt.Errorf("unexpected type of first value %T", js.First()) + } + firstFields := first.Fields() + fields := make(models.Fields, js.size*len(firstFields)) + for i, v := range js.values { + if v == nil { switch js.fill { case influxql.NullFill: - for k := range js.First().PointFields() { + for k := range firstFields { fields[js.prefixes[i]+js.delimiter+k] = nil } case influxql.NumberFill: - for k := range js.First().PointFields() { + for k := range firstFields { fields[js.prefixes[i]+js.delimiter+k] = js.fillValue } default: // inner join no valid point possible - return models.Point{}, false + return nil, nil } } else { - for k, v := range p.PointFields() { + p, ok := v.(edge.FieldGetter) + if !ok { + return nil, fmt.Errorf("unexpected type %T", v) + } + for k, v := range p.Fields() { fields[js.prefixes[i]+js.delimiter+k] = v } } } - p := models.Point{ - Name: js.name, - Group: js.First().PointGroup(), - Tags: js.First().PointTags(), - Dimensions: js.First().PointDimensions(), - Time: js.time, - Fields: fields, - } - - return p, true + np := edge.NewPointMessage( + js.name, "", "", + first.Dimensions(), + fields, + first.GroupInfo().Tags, + js.time, + ) + return np, nil } // join all batches the set into a single batch -func (js *joinset) JoinIntoBatch() (models.Batch, bool) { - newBatch := models.Batch{ - Name: js.name, - Group: js.First().PointGroup(), - Tags: js.First().PointTags(), - ByName: js.First().PointDimensions().ByName, - TMax: js.time, - } +func (js *joinset) JoinIntoBatch() (edge.BufferedBatchMessage, error) { + first, ok := js.First().(edge.BufferedBatchMessage) + if !ok { + return nil, fmt.Errorf("unexpected type of first value %T", js.First()) + } + newBegin := edge.NewBeginBatchMessage( + js.name, + first.Tags(), + first.Dimensions().ByName, + js.time, + 0, + ) + newPoints := make([]edge.BatchPointMessage, 0, len(first.Points())) empty := make([]bool, js.expected) emptyCount := 0 indexes := make([]int, js.expected) @@ -608,7 +579,7 @@ func (js *joinset) JoinIntoBatch() (models.Batch, bool) { BATCH_POINT: for emptyCount < js.expected { - set := make([]*models.BatchPoint, js.expected) + set := make([]edge.BatchPointMessage, js.expected) setTime := time.Time{} count := 0 for i, batch := range js.values { @@ -620,19 +591,17 @@ BATCH_POINT: empty[i] = true continue } - b, ok := batch.(models.Batch) + b, ok := batch.(edge.BufferedBatchMessage) if !ok { - js.j.incrementErrorCount() - js.logger.Printf("E! invalid join data got %T expected models.Batch", batch) - return models.Batch{}, false + return nil, fmt.Errorf("unexpected type of batch value %T", batch) } - if indexes[i] == len(b.Points) { + if indexes[i] == len(b.Points()) { emptyCount++ empty[i] = true continue } - bp := b.Points[indexes[i]] - t := bp.Time.Round(js.tolerance) + bp := b.Points()[indexes[i]] + t := bp.Time().Round(js.tolerance) if setTime.IsZero() { setTime = t } @@ -645,16 +614,16 @@ BATCH_POINT: } set[j] = nil } - set[i] = &bp + set[i] = bp indexes[i]++ count = 1 } else if t.Equal(setTime) { if fieldNames == nil { - for k := range bp.Fields { + for k := range bp.Fields() { fieldNames = append(fieldNames, k) } } - set[i] = &bp + set[i] = bp indexes[i]++ count++ } @@ -682,19 +651,24 @@ BATCH_POINT: continue BATCH_POINT } } else { - for k, v := range bp.Fields { + for k, v := range bp.Fields() { fields[js.prefixes[i]+js.delimiter+k] = v } } } - bp := models.BatchPoint{ - Tags: newBatch.Tags, - Time: setTime, - Fields: fields, - } - newBatch.Points = append(newBatch.Points, bp) - } - return newBatch, true + bp := edge.NewBatchPointMessage( + fields, + newBegin.Tags(), + setTime, + ) + newPoints = append(newPoints, bp) + } + newBegin.SetSizeHint(len(newPoints)) + return edge.NewBufferedBatchMessage( + newBegin, + newPoints, + edge.NewEndBatchMessage(), + ), nil } type durationVar struct { diff --git a/k8s_autoscale.go b/k8s_autoscale.go index 12868706e..968f75a64 100644 --- a/k8s_autoscale.go +++ b/k8s_autoscale.go @@ -3,9 +3,9 @@ package kapacitor import ( "fmt" "log" - "sync" "time" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" @@ -27,7 +27,7 @@ type K8sAutoscaleNode struct { client client.Client - replicasExprs map[models.GroupID]stateful.Expression + replicasExpr stateful.Expression replicasScopePool stateful.ScopePool resourceStates map[string]resourceState @@ -36,8 +36,6 @@ type K8sAutoscaleNode struct { decreaseCount *expvar.Int cooldownDropsCount *expvar.Int - replicasExprsMu sync.RWMutex - min int max int } @@ -62,70 +60,87 @@ func newK8sAutoscaleNode(et *ExecutingTask, n *pipeline.K8sAutoscaleNode, l *log kn.node.runF = kn.runAutoscale // Initialize the replicas lambda expression scope pool if n.Replicas != nil { - kn.replicasExprs = make(map[models.GroupID]stateful.Expression) + expr, err := stateful.NewExpression(n.Replicas.Expression) + if err != nil { + return nil, err + } + kn.replicasExpr = expr kn.replicasScopePool = stateful.NewScopePool(ast.FindReferenceVariables(n.Replicas.Expression)) } return kn, nil } -func (k *K8sAutoscaleNode) runAutoscale([]byte) error { - valueF := func() int64 { - k.replicasExprsMu.RLock() - l := len(k.replicasExprs) - k.replicasExprsMu.RUnlock() - return int64(l) +func (n *K8sAutoscaleNode) runAutoscale([]byte) error { + n.increaseCount = &expvar.Int{} + n.decreaseCount = &expvar.Int{} + n.cooldownDropsCount = &expvar.Int{} + + n.statMap.Set(statsK8sIncreaseEventsCount, n.increaseCount) + n.statMap.Set(statsK8sDecreaseEventsCount, n.decreaseCount) + n.statMap.Set(statsK8sCooldownDropsCount, n.cooldownDropsCount) + + consumer := edge.NewGroupedConsumer( + n.ins[0], + n, + ) + n.statMap.Set(statCardinalityGauge, consumer.CardinalityVar()) + return consumer.Consume() +} + +func (n *K8sAutoscaleNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) { + return edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n.newGroup()), + ), nil +} + +func (n *K8sAutoscaleNode) newGroup() *k8sAutoscaleGroup { + return &k8sAutoscaleGroup{ + n: n, + expr: n.replicasExpr.CopyReset(), } - k.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - - k.increaseCount = &expvar.Int{} - k.decreaseCount = &expvar.Int{} - k.cooldownDropsCount = &expvar.Int{} - - k.statMap.Set(statsK8sIncreaseEventsCount, k.increaseCount) - k.statMap.Set(statsK8sDecreaseEventsCount, k.decreaseCount) - k.statMap.Set(statsK8sCooldownDropsCount, k.cooldownDropsCount) - - switch k.Wants() { - case pipeline.StreamEdge: - for p, ok := k.ins[0].NextPoint(); ok; p, ok = k.ins[0].NextPoint() { - k.timer.Start() - if np, err := k.handlePoint(p.Name, p.Group, p.Dimensions, p.Time, p.Fields, p.Tags); err != nil { - k.incrementErrorCount() - k.logger.Println("E!", err) - } else if np.Name != "" { - k.timer.Pause() - for _, child := range k.outs { - err := child.CollectPoint(np) - if err != nil { - return err - } - } - k.timer.Resume() - } - k.timer.Stop() - } - case pipeline.BatchEdge: - for b, ok := k.ins[0].NextBatch(); ok; b, ok = k.ins[0].NextBatch() { - k.timer.Start() - for _, p := range b.Points { - if np, err := k.handlePoint(b.Name, b.Group, b.PointDimensions(), p.Time, p.Fields, p.Tags); err != nil { - k.incrementErrorCount() - k.logger.Println("E!", err) - } else if np.Name != "" { - k.timer.Pause() - for _, child := range k.outs { - err := child.CollectPoint(np) - if err != nil { - return err - } - } - k.timer.Resume() - } - } - k.timer.Stop() - } +} + +type k8sAutoscaleGroup struct { + n *K8sAutoscaleNode + + expr stateful.Expression + + begin edge.BeginBatchMessage +} + +func (g *k8sAutoscaleGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + g.begin = begin + return nil, nil +} + +func (g *k8sAutoscaleGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + np, err := g.n.handlePoint(g.begin.Name(), g.begin.Dimensions(), bp, g.expr) + if err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E!", err) } - return nil + return np, nil +} + +func (g *k8sAutoscaleGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return nil, nil +} + +func (g *k8sAutoscaleGroup) Point(p edge.PointMessage) (edge.Message, error) { + np, err := g.n.handlePoint(p.Name(), p.Dimensions(), p, g.expr) + if err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E!", err) + } + return np, nil +} + +func (g *k8sAutoscaleGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (g *k8sAutoscaleGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil } type resourceState struct { @@ -142,28 +157,26 @@ type event struct { New int } -func (k *K8sAutoscaleNode) handlePoint(streamName string, group models.GroupID, dims models.Dimensions, t time.Time, fields models.Fields, tags models.Tags) (models.Point, error) { - namespace, kind, name, err := k.getResourceFromPoint(tags) +func (n *K8sAutoscaleNode) handlePoint(streamName string, dims models.Dimensions, p edge.FieldsTagsTimeGetter, expr stateful.Expression) (edge.PointMessage, error) { + namespace, kind, name, err := n.getResourceFromPoint(p.Tags()) if err != nil { - return models.Point{}, err + return nil, err } - state, ok := k.resourceStates[name] + state, ok := n.resourceStates[name] if !ok { // If we haven't seen this resource before, get its state - scale, err := k.getResource(namespace, kind, name) + scale, err := n.getResource(namespace, kind, name) if err != nil { - return models.Point{}, errors.Wrapf(err, "could not determine initial scale for %s/%s/%s", namespace, kind, name) + return nil, errors.Wrapf(err, "could not determine initial scale for %s/%s/%s", namespace, kind, name) } state.current = int(scale.Spec.Replicas) - k.resourceStates[name] = state + n.resourceStates[name] = state } // Eval the replicas expression - k.replicasExprsMu.Lock() - newReplicas, err := k.evalExpr(state.current, group, k.k.Replicas, k.replicasExprs, k.replicasScopePool, t, fields, tags) - k.replicasExprsMu.Unlock() + newReplicas, err := n.evalExpr(state.current, expr, p) if err != nil { - return models.Point{}, errors.Wrap(err, "failed to evaluate the replicas expression") + return nil, errors.Wrap(err, "failed to evaluate the replicas expression") } // Create the event @@ -175,17 +188,17 @@ func (k *K8sAutoscaleNode) handlePoint(streamName string, group models.GroupID, New: newReplicas, } // Check bounds - if k.max > 0 && e.New > k.max { - e.New = k.max + if n.max > 0 && e.New > n.max { + e.New = n.max } - if e.New < k.min { - e.New = k.min + if e.New < n.min { + e.New = n.min } // Validate something changed if e.New == e.Old { // Nothing to do - return models.Point{}, nil + return nil, nil } // Update local copy of state @@ -193,78 +206,77 @@ func (k *K8sAutoscaleNode) handlePoint(streamName string, group models.GroupID, state.current = e.New // Check last change cooldown times + t := p.Time() var counter *expvar.Int switch { case change > 0: - if t.Before(state.lastIncrease.Add(k.k.IncreaseCooldown)) { + if t.Before(state.lastIncrease.Add(n.k.IncreaseCooldown)) { // Still hot, nothing to do - k.cooldownDropsCount.Add(1) - return models.Point{}, nil + n.cooldownDropsCount.Add(1) + return nil, nil } state.lastIncrease = t - counter = k.increaseCount + counter = n.increaseCount case change < 0: - if t.Before(state.lastDecrease.Add(k.k.DecreaseCooldown)) { + if t.Before(state.lastDecrease.Add(n.k.DecreaseCooldown)) { // Still hot, nothing to do - k.cooldownDropsCount.Add(1) - return models.Point{}, nil + n.cooldownDropsCount.Add(1) + return nil, nil } state.lastDecrease = t - counter = k.decreaseCount + counter = n.decreaseCount } // We have a valid event to apply - if err := k.applyEvent(e); err != nil { - return models.Point{}, errors.Wrap(err, "failed to apply scaling event") + if err := n.applyEvent(e); err != nil { + return nil, errors.Wrap(err, "failed to apply scaling event") } // Only save the updated state if we were successful - k.resourceStates[name] = state + n.resourceStates[name] = state // Count event counter.Add(1) // Create new tags for the point. // Leave room for the namespace,kind, and resource tags. - newTags := make(models.Tags, len(tags)+3) + newTags := make(models.Tags, len(dims.TagNames)+3) // Copy group by tags for _, d := range dims.TagNames { - newTags[d] = tags[d] + newTags[d] = p.Tags()[d] } // Set namespace,kind,resource tags - if k.k.NamespaceTag != "" { - newTags[k.k.NamespaceTag] = namespace + if n.k.NamespaceTag != "" { + newTags[n.k.NamespaceTag] = namespace } - if k.k.KindTag != "" { - newTags[k.k.KindTag] = kind + if n.k.KindTag != "" { + newTags[n.k.KindTag] = kind } - if k.k.ResourceTag != "" { - newTags[k.k.ResourceTag] = name + if n.k.ResourceTag != "" { + newTags[n.k.ResourceTag] = name } // Create point representing the event - p := models.Point{ - Name: streamName, - Time: t, - Group: group, - Dimensions: dims, - Tags: newTags, - Fields: models.Fields{ + return edge.NewPointMessage( + streamName, "", "", + dims, + models.Fields{ "old": int64(e.Old), "new": int64(e.New), }, - } - return p, nil + newTags, + t, + ), nil } -func (k *K8sAutoscaleNode) getResourceFromPoint(tags models.Tags) (namespace, kind, name string, err error) { +func (n *K8sAutoscaleNode) getResourceFromPoint(tags models.Tags) (namespace, kind, name string, err error) { // Get the name of the resource switch { - case k.k.ResourceName != "": - name = k.k.ResourceName - case k.k.ResourceNameTag != "": - t, ok := tags[k.k.ResourceNameTag] + case n.k.ResourceName != "": + name = n.k.ResourceName + case n.k.ResourceNameTag != "": + t, ok := tags[n.k.ResourceNameTag] if ok { name = t } @@ -274,29 +286,29 @@ func (k *K8sAutoscaleNode) getResourceFromPoint(tags models.Tags) (namespace, ki if name == "" { return "", "", "", errors.New("could not determine the name of the resource") } - namespace = k.k.Namespace + namespace = n.k.Namespace if namespace == "" { namespace = client.NamespaceDefault } - kind = k.k.Kind + kind = n.k.Kind return } -func (k *K8sAutoscaleNode) getResource(namespace, kind, name string) (*client.Scale, error) { - scales := k.client.Scales(namespace) +func (n *K8sAutoscaleNode) getResource(namespace, kind, name string) (*client.Scale, error) { + scales := n.client.Scales(namespace) scale, err := scales.Get(kind, name) return scale, errors.Wrapf(err, "failed to get the scale for resource %s/%s/%s", namespace, kind, name) } -func (k *K8sAutoscaleNode) applyEvent(e event) error { - k.logger.Printf("D! setting scale replicas to %d was %d for %s/%s/%s", e.New, e.Old, e.Namespace, e.Kind, e.Name) - scales := k.client.Scales(e.Namespace) - scale, err := k.getResource(e.Namespace, e.Kind, e.Name) +func (n *K8sAutoscaleNode) applyEvent(e event) error { + n.logger.Printf("D! setting scale replicas to %d was %d for %s/%s/%s", e.New, e.Old, e.Namespace, e.Kind, e.Name) + scales := n.client.Scales(e.Namespace) + scale, err := n.getResource(e.Namespace, e.Kind, e.Name) if err != nil { return err } if scale.Spec.Replicas != int32(e.Old) { - k.logger.Printf("W! the kubernetes scale spec and Kapacitor's spec do not match for resource %s/%s/%s, did it change externally?", e.Namespace, e.Kind, e.Name) + n.logger.Printf("W! the kubernetes scale spec and Kapacitor's spec do not match for resource %s/%s/%s, did it change externally?", e.Namespace, e.Kind, e.Name) } scale.Spec.Replicas = int32(e.New) @@ -306,49 +318,28 @@ func (k *K8sAutoscaleNode) applyEvent(e event) error { return nil } -func (k *K8sAutoscaleNode) evalExpr( +func (n *K8sAutoscaleNode) evalExpr( current int, - group models.GroupID, - lambda *ast.LambdaNode, - expressionsMap map[models.GroupID]stateful.Expression, - pool stateful.ScopePool, - t time.Time, - fields models.Fields, - tags models.Tags, + expr stateful.Expression, + p edge.FieldsTagsTimeGetter, ) (int, error) { - expr, ok := expressionsMap[group] - if !ok { - var err error - expr, err = stateful.NewExpression(lambda.Expression) - if err != nil { - return 0, err - } - expressionsMap[group] = expr - } - i, err := k.evalInt(int64(current), expr, pool, t, fields, tags) - return int(i), err -} - -// evalInt - Evaluate a given expression as an int64 against a set of fields and tags. -// The CurrentField is also set on the scope if not empty. -func (k *K8sAutoscaleNode) evalInt(current int64, se stateful.Expression, scopePool stateful.ScopePool, now time.Time, fields models.Fields, tags models.Tags) (int64, error) { - vars := scopePool.Get() - defer scopePool.Put(vars) + vars := n.replicasScopePool.Get() + defer n.replicasScopePool.Put(vars) // Set the current replicas value on the scope if requested. - if k.k.CurrentField != "" { - vars.Set(k.k.CurrentField, current) + if n.k.CurrentField != "" { + vars.Set(n.k.CurrentField, current) } // Fill the scope with the rest of the values - err := fillScope(vars, scopePool.ReferenceVariables(), now, fields, tags) + err := fillScope(vars, n.replicasScopePool.ReferenceVariables(), p) if err != nil { return 0, err } - i, err := se.EvalInt(vars) + i, err := expr.EvalInt(vars) if err != nil { return 0, err } - return i, nil + return int(i), err } diff --git a/kapacitor_loopback.go b/kapacitor_loopback.go index 6bdc3bbfb..9569c6848 100644 --- a/kapacitor_loopback.go +++ b/kapacitor_loopback.go @@ -4,6 +4,7 @@ import ( "fmt" "log" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" @@ -18,6 +19,8 @@ type KapacitorLoopbackNode struct { k *pipeline.KapacitorLoopbackNode pointsWritten *expvar.Int + + begin edge.BeginBatchMessage } func newKapacitorLoopbackNode(et *ExecutingTask, n *pipeline.KapacitorLoopbackNode, l *log.Logger) (*KapacitorLoopbackNode, error) { @@ -35,73 +38,94 @@ func newKapacitorLoopbackNode(et *ExecutingTask, n *pipeline.KapacitorLoopbackNo return kn, nil } -func (k *KapacitorLoopbackNode) runOut([]byte) error { - k.pointsWritten = &expvar.Int{} - - k.statMap.Set(statsInfluxDBPointsWritten, k.pointsWritten) - - switch k.Wants() { - case pipeline.StreamEdge: - for p, ok := k.ins[0].NextPoint(); ok; p, ok = k.ins[0].NextPoint() { - k.timer.Start() - if k.k.Database != "" { - p.Database = k.k.Database - } - if k.k.RetentionPolicy != "" { - p.RetentionPolicy = k.k.RetentionPolicy - } - if k.k.Measurement != "" { - p.Name = k.k.Measurement - } - if len(k.k.Tags) > 0 { - p.Tags = p.Tags.Copy() - for k, v := range k.k.Tags { - p.Tags[k] = v - } - } - err := k.et.tm.WriteKapacitorPoint(p) - if err != nil { - k.incrementErrorCount() - k.logger.Println("E! failed to write point over loopback") - } else { - k.pointsWritten.Add(1) - } - k.timer.Stop() +func (n *KapacitorLoopbackNode) runOut([]byte) error { + n.pointsWritten = &expvar.Int{} + n.statMap.Set(statsInfluxDBPointsWritten, n.pointsWritten) + + consumer := edge.NewConsumerWithReceiver( + n.ins[0], + n, + ) + return consumer.Consume() +} + +func (n *KapacitorLoopbackNode) Point(p edge.PointMessage) error { + n.timer.Start() + defer n.timer.Stop() + + p = p.ShallowCopy() + + if n.k.Database != "" { + p.SetDatabase(n.k.Database) + } + if n.k.RetentionPolicy != "" { + p.SetRetentionPolicy(n.k.RetentionPolicy) + } + if n.k.Measurement != "" { + p.SetName(n.k.Measurement) + } + if len(n.k.Tags) > 0 { + tags := p.Tags().Copy() + for k, v := range n.k.Tags { + tags[k] = v } - case pipeline.BatchEdge: - for b, ok := k.ins[0].NextBatch(); ok; b, ok = k.ins[0].NextBatch() { - k.timer.Start() - if k.k.Measurement != "" { - b.Name = k.k.Measurement - } - written := int64(0) - for _, bp := range b.Points { - tags := bp.Tags - if len(k.k.Tags) > 0 { - tags = bp.Tags.Copy() - for k, v := range k.k.Tags { - tags[k] = v - } - } - p := models.Point{ - Database: k.k.Database, - RetentionPolicy: k.k.RetentionPolicy, - Name: b.Name, - Tags: tags, - Fields: bp.Fields, - Time: bp.Time, - } - err := k.et.tm.WriteKapacitorPoint(p) - if err != nil { - k.incrementErrorCount() - k.logger.Println("E! failed to write point over loopback") - } else { - written++ - } - } - k.pointsWritten.Add(written) - k.timer.Stop() + p.SetTags(tags) + } + + n.timer.Pause() + err := n.et.tm.WriteKapacitorPoint(p) + n.timer.Resume() + + if err != nil { + n.incrementErrorCount() + n.logger.Println("E! failed to write point over loopback") + } else { + n.pointsWritten.Add(1) + } + return nil +} + +func (n *KapacitorLoopbackNode) BeginBatch(begin edge.BeginBatchMessage) error { + n.begin = begin + return nil +} + +func (n *KapacitorLoopbackNode) BatchPoint(bp edge.BatchPointMessage) error { + tags := bp.Tags() + if len(n.k.Tags) > 0 { + tags = bp.Tags().Copy() + for k, v := range n.k.Tags { + tags[k] = v } } + p := edge.NewPointMessage( + n.begin.Name(), + n.k.Database, + n.k.RetentionPolicy, + models.Dimensions{}, + bp.Fields(), + tags, + bp.Time(), + ) + + n.timer.Pause() + err := n.et.tm.WriteKapacitorPoint(p) + n.timer.Resume() + + if err != nil { + n.incrementErrorCount() + n.logger.Println("E! failed to write point over loopback") + } else { + n.pointsWritten.Add(1) + } + return nil +} +func (n *KapacitorLoopbackNode) EndBatch(edge.EndBatchMessage) error { + return nil +} +func (n *KapacitorLoopbackNode) Barrier(edge.BarrierMessage) error { + return nil +} +func (n *KapacitorLoopbackNode) DeleteGroup(edge.DeleteGroupMessage) error { return nil } diff --git a/log.go b/log.go index a2c07f2de..b2423d2ea 100644 --- a/log.go +++ b/log.go @@ -7,14 +7,19 @@ import ( "log" "strings" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/pipeline" "github.com/influxdata/wlog" ) type LogNode struct { node - level wlog.Level - prefix string + + key string + buf bytes.Buffer + enc *json.Encoder + + batchBuffer *edge.BatchBuffer } // Create a new LogNode which logs all data it receives @@ -24,51 +29,64 @@ func newLogNode(et *ExecutingTask, n *pipeline.LogNode, l *log.Logger) (*LogNode return nil, fmt.Errorf("invalid log level %s", n.Level) } nn := &LogNode{ - node: node{Node: n, et: et, logger: l}, - level: level, - prefix: n.Prefix, + node: node{Node: n, et: et, logger: l}, + key: fmt.Sprintf("%c! %s", wlog.ReverseLevels[level], n.Prefix), + batchBuffer: new(edge.BatchBuffer), } + nn.enc = json.NewEncoder(&nn.buf) nn.node.runF = nn.runLog return nn, nil } -func (s *LogNode) runLog([]byte) error { - key := fmt.Sprintf("%c! %s", wlog.ReverseLevels[s.level], s.prefix) - var buf bytes.Buffer - env := json.NewEncoder(&buf) - switch s.Wants() { - case pipeline.StreamEdge: - for p, ok := s.ins[0].NextPoint(); ok; p, ok = s.ins[0].NextPoint() { - buf.Reset() - if err := env.Encode(p); err != nil { - s.incrementErrorCount() - s.logger.Println("E!", err) - continue - } - s.logger.Println(key, buf.String()) - for _, child := range s.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } - } - case pipeline.BatchEdge: - for b, ok := s.ins[0].NextBatch(); ok; b, ok = s.ins[0].NextBatch() { - buf.Reset() - if err := env.Encode(b); err != nil { - s.incrementErrorCount() - s.logger.Println("E!", err) - continue - } - s.logger.Println(key, buf.String()) - for _, child := range s.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } - } +func (n *LogNode) runLog([]byte) error { + consumer := edge.NewConsumerWithReceiver( + n.ins[0], + edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n), + ), + ) + return consumer.Consume() + +} + +func (n *LogNode) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + return nil, n.batchBuffer.BeginBatch(begin) +} + +func (n *LogNode) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + return nil, n.batchBuffer.BatchPoint(bp) +} + +func (n *LogNode) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return n.BufferedBatch(n.batchBuffer.BufferedBatchMessage(end)) +} + +func (n *LogNode) BufferedBatch(batch edge.BufferedBatchMessage) (edge.Message, error) { + n.buf.Reset() + if err := n.enc.Encode(batch); err != nil { + n.incrementErrorCount() + n.logger.Println("E!", err) + return batch, nil + } + n.logger.Println(n.key, n.buf.String()) + return batch, nil +} + +func (n *LogNode) Point(p edge.PointMessage) (edge.Message, error) { + n.buf.Reset() + if err := n.enc.Encode(p); err != nil { + n.incrementErrorCount() + n.logger.Println("E!", err) + return p, nil } - return nil + n.logger.Println(n.key, n.buf.String()) + return p, nil +} + +func (n *LogNode) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (n *LogNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil } diff --git a/models/batch.go b/models/batch.go deleted file mode 100644 index 5d7be3319..000000000 --- a/models/batch.go +++ /dev/null @@ -1,221 +0,0 @@ -package models - -import ( - "encoding/json" - "errors" - "fmt" - "sort" - "time" - - "github.com/influxdata/kapacitor/influxdb" -) - -// A point in batch, similar to Point but most information is -// found on the containing Batch. -// -// Tags on a BatchPoint are a superset of the tags on the Batch -// All points in a batch should have the same tag and field keys. -type BatchPoint struct { - Time time.Time `json:"time"` - Fields Fields `json:"fields"` - Tags Tags `json:"tags"` -} - -func BatchPointFromPoint(p Point) BatchPoint { - return BatchPoint{ - Time: p.Time, - Fields: p.Fields, - Tags: p.Tags, - } -} - -type Batch struct { - Name string `json:"name,omitempty"` - TMax time.Time `json:"tmax,omitempty"` - Group GroupID `json:"group,omitempty"` - ByName bool `json:"byname,omitempty"` - Tags Tags `json:"tags,omitempty"` - Points []BatchPoint `json:"points,omitempty"` -} - -func (b Batch) PointName() string { - return b.Name -} -func (b Batch) PointGroup() GroupID { - return b.Group -} -func (b Batch) PointTime() time.Time { - return b.TMax -} - -func (b Batch) PointFields() Fields { - if len(b.Points) > 0 { - return b.Points[0].Fields - } - return nil -} - -func (b Batch) PointTags() Tags { - return b.Tags -} - -func (b Batch) PointDimensions() Dimensions { - return Dimensions{ - ByName: b.ByName, - TagNames: SortedKeys(b.Tags), - } -} - -func (b Batch) Copy() PointInterface { - cb := b - cb.Tags = b.Tags.Copy() - cb.Points = make([]BatchPoint, len(b.Points)) - for i, p := range b.Points { - cb.Points[i] = p - cb.Points[i].Fields = p.Fields.Copy() - cb.Points[i].Tags = p.Tags.Copy() - } - return cb -} - -// ShallowCopyPoints creates a new slice for the points but only shallow copies the points themselves. -// Then if a single point needs to be modified it must first be copied. -func (b Batch) ShallowCopyPoints() []BatchPoint { - points := make([]BatchPoint, len(b.Points)) - copy(points, b.Points) - return points -} - -func (b Batch) Setter() PointSetter { - return &b -} - -func (b *Batch) Interface() PointInterface { - return *b -} - -func (b *Batch) SetNewDimTag(key string, value string) { - b.Tags[key] = value - for _, p := range b.Points { - p.Tags[key] = value - } -} - -func (b *Batch) UpdateGroup() { - b.Group = ToGroupID(b.Name, b.Tags, b.PointDimensions()) -} - -func BatchToResult(b Batch) Result { - row := BatchToRow(b) - r := Result{ - Series: Rows{row}, - } - return r -} - -func BatchToRow(b Batch) (row *Row) { - row = &Row{ - Name: b.Name, - Tags: b.Tags, - } - if len(b.Points) == 0 { - return - } - row.Columns = []string{"time"} - p := b.Points[0] - for f := range p.Fields { - row.Columns = append(row.Columns, f) - } - // Append tags that are not on the batch - for t := range p.Tags { - if _, ok := b.Tags[t]; !ok { - row.Columns = append(row.Columns, t) - } - } - // Sort all columns but leave time as first - sort.Strings(row.Columns[1:]) - row.Values = make([][]interface{}, len(b.Points)) - for i, p := range b.Points { - row.Values[i] = make([]interface{}, len(row.Columns)) - row.Values[i][0] = p.Time - for j, c := range row.Columns[1:] { - if v, ok := p.Fields[c]; ok { - row.Values[i][j+1] = v - } else if v, ok := p.Tags[c]; ok { - row.Values[i][j+1] = v - } - } - } - return -} - -func ResultToBatches(res influxdb.Result, groupByName bool) ([]Batch, error) { - if res.Err != "" { - return nil, errors.New(res.Err) - } - batches := make([]Batch, 0, len(res.Series)) - dims := Dimensions{ - ByName: groupByName, - } - for _, series := range res.Series { - var name string - if groupByName { - name = series.Name - } - dims.TagNames = SortedKeys(series.Tags) - groupID := ToGroupID( - name, - series.Tags, - dims, - ) - b := Batch{ - Name: series.Name, - Group: groupID, - Tags: series.Tags, - } - b.Points = make([]BatchPoint, 0, len(series.Values)) - for _, v := range series.Values { - fields := make(Fields) - var t time.Time - for i, c := range series.Columns { - if c == "time" { - tStr, ok := v[i].(string) - if !ok { - return nil, fmt.Errorf("unexpected time value: %v", v[i]) - } - var err error - t, err = time.Parse(time.RFC3339Nano, tStr) - if err != nil { - t, err = time.Parse(time.RFC3339, tStr) - if err != nil { - return nil, fmt.Errorf("unexpected time format: %v", err) - } - } - } else { - value := v[i] - if n, ok := value.(json.Number); ok { - f, err := n.Float64() - if err == nil { - value = f - } - } - if value == nil { - continue - } - fields[c] = value - } - } - if len(fields) > 0 { - if t.After(b.TMax) { - b.TMax = t - } - b.Points = append( - b.Points, - BatchPoint{Time: t, Fields: fields, Tags: b.Tags}, - ) - } - } - batches = append(batches, b) - } - return batches, nil -} diff --git a/models/point.go b/models/point.go index 301c53e56..e65d81cf4 100644 --- a/models/point.go +++ b/models/point.go @@ -3,122 +3,52 @@ package models import ( "bytes" "sort" - "strconv" - "time" - - "github.com/influxdata/influxdb/models" ) type GroupID string -type Fields map[string]interface{} -type Tags map[string]string const ( NilGroup GroupID = "" ) -// Common interface for both Point and Batch objects -type PointInterface interface { - PointName() string - PointTime() time.Time - PointGroup() GroupID - PointTags() Tags - PointDimensions() Dimensions - PointFields() Fields - - // Return a copy of self - Copy() PointInterface - Setter() PointSetter -} - -type PointSetter interface { - PointInterface - SetNewDimTag(key string, value string) - UpdateGroup() - Interface() PointInterface -} - type Dimensions struct { ByName bool TagNames []string } -// Represents a single data point -type Point struct { - Name string - Database string - RetentionPolicy string - - Group GroupID - Dimensions Dimensions - - Tags Tags - - Fields Fields - - Time time.Time -} - -func (p Point) PointName() string { - return p.Name -} - -func (p Point) PointGroup() GroupID { - return p.Group -} - -func (p Point) PointTime() time.Time { - return p.Time -} - -func (p Point) PointFields() Fields { - return p.Fields -} - -func (p Point) PointTags() Tags { - tags := make(Tags, len(p.Dimensions.TagNames)) - for _, dim := range p.Dimensions.TagNames { - tags[dim] = p.Tags[dim] +func (d Dimensions) Equal(o Dimensions) bool { + if d.ByName != o.ByName || len(d.TagNames) != len(o.TagNames) { + return false } - return tags -} - -func (p Point) PointDimensions() Dimensions { - return p.Dimensions + for i := range d.TagNames { + if d.TagNames[i] != o.TagNames[i] { + return false + } + } + return true } - -func (p Point) Copy() PointInterface { - cp := p - cp.Fields = p.Fields.Copy() - cp.Tags = p.Tags.Copy() - cp.Dimensions = p.Dimensions.Copy() - return &cp +func (d Dimensions) Copy() Dimensions { + tags := make([]string, len(d.TagNames)) + copy(tags, d.TagNames) + return Dimensions{ByName: d.ByName, TagNames: tags} } -func (p Point) Setter() PointSetter { - return &p +func (d Dimensions) ToSet() map[string]bool { + set := make(map[string]bool, len(d.TagNames)) + for _, dim := range d.TagNames { + set[dim] = true + } + return set } -func (p *Point) Interface() PointInterface { - return *p -} +type Fields map[string]interface{} -func (p *Point) SetNewDimTag(key string, value string) { - p.Tags[key] = value - // Only add dim if it does not exist. - for _, dim := range p.Dimensions.TagNames { - if dim == key { - // Key exists we are done. - return - } +func (f Fields) Copy() Fields { + cf := make(Fields, len(f)) + for k, v := range f { + cf[k] = v } - // Key doesn't exist add it. - p.Dimensions.TagNames = append(p.Dimensions.TagNames, key) -} - -func (p *Point) UpdateGroup() { - sort.Strings(p.Dimensions.TagNames) - p.Group = ToGroupID(p.Name, p.Tags, p.Dimensions) + return cf } func SortedFields(fields Fields) []string { @@ -130,6 +60,16 @@ func SortedFields(fields Fields) []string { return a } +type Tags map[string]string + +func (t Tags) Copy() Tags { + ct := make(Tags, len(t)) + for k, v := range t { + ct[k] = v + } + return ct +} + func SortedKeys(tags map[string]string) []string { a := make([]string, 0, len(tags)) for k := range tags { @@ -163,86 +103,3 @@ func ToGroupID(name string, tags map[string]string, dims Dimensions) GroupID { } return GroupID(buf.Bytes()) } - -// Returns byte array of a line protocol representation of the point -func (p Point) Bytes(precision string) []byte { - key := models.MakeKey([]byte(p.Name), models.NewTags(p.Tags)) - fields := models.Fields(p.Fields).MarshalBinary() - kl := len(key) - fl := len(fields) - var bytes []byte - - if p.Time.IsZero() { - bytes = make([]byte, fl+kl+1) - copy(bytes, key) - bytes[kl] = ' ' - copy(bytes[kl+1:], fields) - } else { - timeStr := strconv.FormatInt(p.Time.UnixNano()/models.GetPrecisionMultiplier(precision), 10) - tl := len(timeStr) - bytes = make([]byte, fl+kl+tl+2) - copy(bytes, key) - bytes[kl] = ' ' - copy(bytes[kl+1:], fields) - bytes[kl+fl+1] = ' ' - copy(bytes[kl+fl+2:], []byte(timeStr)) - } - - return bytes -} - -func PointToRow(p Point) (row *Row) { - row = &Row{ - Name: p.Name, - Tags: p.Tags, - Columns: []string{"time"}, - Values: make([][]interface{}, 1), - } - - for _, f := range SortedFields(p.Fields) { - row.Columns = append(row.Columns, f) - } - row.Values[0] = make([]interface{}, len(p.Fields)+1) - row.Values[0][0] = p.Time - for i, c := range row.Columns[1:] { - row.Values[0][i+1] = p.Fields[c] - } - return -} - -func (f Fields) Copy() Fields { - cf := make(Fields, len(f)) - for k, v := range f { - cf[k] = v - } - return cf -} - -func (t Tags) Copy() Tags { - ct := make(Tags, len(t)) - for k, v := range t { - ct[k] = v - } - return ct -} - -func (d Dimensions) Copy() Dimensions { - tags := make([]string, len(d.TagNames)) - copy(tags, d.TagNames) - return Dimensions{ByName: d.ByName, TagNames: tags} -} - -func (d Dimensions) ToSet() map[string]bool { - set := make(map[string]bool, len(d.TagNames)) - for _, dim := range d.TagNames { - set[dim] = true - } - return set -} - -// Simple container for point data. -type RawPoint struct { - Time time.Time - Fields Fields - Tags Tags -} diff --git a/models/result.go b/models/result.go index 5972304cd..b6ec24948 100644 --- a/models/result.go +++ b/models/result.go @@ -2,6 +2,7 @@ package models import ( "encoding/json" + "errors" "time" ) @@ -15,6 +16,21 @@ func (r Result) String() string { return string(b) } +func (r *Result) UnmarshalJSON(data []byte) error { + var o struct { + Series Rows `json:"series"` + Err string `json:"error"` + } + if err := json.Unmarshal(data, &o); err != nil { + return err + } + r.Series = o.Series + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + // Rows represents a collection of rows. Rows implements sort.Interface. type Rows []*Row diff --git a/node.go b/node.go index ec14ec9e4..db09ab15a 100644 --- a/node.go +++ b/node.go @@ -10,6 +10,7 @@ import ( "sync/atomic" "time" + "github.com/influxdata/kapacitor/edge" kexpvar "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" @@ -28,7 +29,7 @@ const ( type Node interface { pipeline.Node - addParentEdge(*Edge) + addParentEdge(edge.StatsEdge) init() @@ -78,8 +79,8 @@ type node struct { err error finishedMu sync.Mutex finished bool - ins []*Edge - outs []*Edge + ins []edge.StatsEdge + outs []edge.StatsEdge logger *log.Logger timer timer.Timer statsKey string @@ -88,7 +89,7 @@ type node struct { nodeErrors *kexpvar.Int } -func (n *node) addParentEdge(e *Edge) { +func (n *node) addParentEdge(e edge.StatsEdge) { n.ins = append(n.ins, e) } @@ -164,7 +165,7 @@ func (n *node) Wait() error { return n.err } -func (n *node) addChild(c Node) (*Edge, error) { +func (n *node) addChild(c Node) (edge.StatsEdge, error) { if n.Provides() != c.Wants() { return nil, fmt.Errorf("cannot add child mismatched edges: %s:%s -> %s:%s", n.Name(), n.Provides(), c.Name(), c.Wants()) } @@ -241,7 +242,7 @@ func (n *node) edot(buf *bytes.Buffer, labels bool) { fmt.Sprintf("%s -> %s [label=\"processed=%d\"];\n", n.Name(), c.Name(), - n.outs[i].collectedCount(), + n.outs[i].Collected(), ), )) } @@ -273,7 +274,7 @@ func (n *node) edot(buf *bytes.Buffer, labels bool) { fmt.Sprintf("%s -> %s [processed=\"%d\"];\n", n.Name(), c.Name(), - n.outs[i].collectedCount(), + n.outs[i].Collected(), ), )) } @@ -283,7 +284,7 @@ func (n *node) edot(buf *bytes.Buffer, labels bool) { // node collected count is the sum of emitted counts of parent edges func (n *node) collectedCount() (count int64) { for _, in := range n.ins { - count += in.emittedCount() + count += in.Emitted() } return } @@ -291,7 +292,7 @@ func (n *node) collectedCount() (count int64) { // node emitted count is the sum of collected counts of children edges func (n *node) emittedCount() (count int64) { for _, out := range n.outs { - count += out.collectedCount() + count += out.Collected() } return } @@ -331,14 +332,14 @@ func (n *node) nodeStatsByGroup() (stats map[models.GroupID]nodeStats) { // Get the counts for just one output. stats = make(map[models.GroupID]nodeStats) if len(n.outs) > 0 { - n.outs[0].readGroupStats(func(group models.GroupID, c, e int64, tags models.Tags, dims models.Dimensions) { - stats[group] = nodeStats{ + n.outs[0].ReadGroupStats(func(g *edge.GroupStats) { + stats[g.GroupInfo.ID] = nodeStats{ Fields: models.Fields{ // A node's emitted count is the collected count of its output. - "emitted": c, + "emitted": g.Collected, }, - Tags: tags, - Dimensions: dims, + Tags: g.GroupInfo.Tags, + Dimensions: g.GroupInfo.Dimensions, } }) } diff --git a/noop.go b/noop.go index ac12f9d78..53c4cb3ee 100644 --- a/noop.go +++ b/noop.go @@ -3,6 +3,7 @@ package kapacitor import ( "log" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/pipeline" ) @@ -19,25 +20,10 @@ func newNoOpNode(et *ExecutingTask, n *pipeline.NoOpNode, l *log.Logger) (*NoOpN return nn, nil } -func (s *NoOpNode) runNoOp([]byte) error { - switch s.Wants() { - case pipeline.StreamEdge: - for p, ok := s.ins[0].NextPoint(); ok; p, ok = s.ins[0].NextPoint() { - for _, child := range s.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } - } - case pipeline.BatchEdge: - for b, ok := s.ins[0].NextBatch(); ok; b, ok = s.ins[0].NextBatch() { - for _, child := range s.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } +func (n *NoOpNode) runNoOp([]byte) error { + for m, ok := n.ins[0].Emit(); ok; m, ok = n.ins[0].Emit() { + if err := edge.Forward(n.outs, m); err != nil { + return err } } return nil diff --git a/pipeline/influxql.gen.go b/pipeline/influxql.gen.go index a17d68f8b..e84d5475f 100644 --- a/pipeline/influxql.gen.go +++ b/pipeline/influxql.gen.go @@ -10,76 +10,40 @@ import "github.com/influxdata/influxdb/influxql" //tick:ignore type ReduceCreater struct { - CreateFloatReducer func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) - CreateFloatBulkReducer func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) + CreateFloatReducer func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) - CreateFloatIntegerReducer func() (influxql.FloatPointAggregator, influxql.IntegerPointEmitter) - CreateFloatBulkIntegerReducer func() (FloatBulkPointAggregator, influxql.IntegerPointEmitter) + CreateFloatIntegerReducer func() (influxql.FloatPointAggregator, influxql.IntegerPointEmitter) - CreateFloatStringReducer func() (influxql.FloatPointAggregator, influxql.StringPointEmitter) - CreateFloatBulkStringReducer func() (FloatBulkPointAggregator, influxql.StringPointEmitter) + CreateFloatStringReducer func() (influxql.FloatPointAggregator, influxql.StringPointEmitter) - CreateFloatBooleanReducer func() (influxql.FloatPointAggregator, influxql.BooleanPointEmitter) - CreateFloatBulkBooleanReducer func() (FloatBulkPointAggregator, influxql.BooleanPointEmitter) + CreateFloatBooleanReducer func() (influxql.FloatPointAggregator, influxql.BooleanPointEmitter) - CreateIntegerFloatReducer func() (influxql.IntegerPointAggregator, influxql.FloatPointEmitter) - CreateIntegerBulkFloatReducer func() (IntegerBulkPointAggregator, influxql.FloatPointEmitter) + CreateIntegerFloatReducer func() (influxql.IntegerPointAggregator, influxql.FloatPointEmitter) - CreateIntegerReducer func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) - CreateIntegerBulkReducer func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) + CreateIntegerReducer func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) - CreateIntegerStringReducer func() (influxql.IntegerPointAggregator, influxql.StringPointEmitter) - CreateIntegerBulkStringReducer func() (IntegerBulkPointAggregator, influxql.StringPointEmitter) + CreateIntegerStringReducer func() (influxql.IntegerPointAggregator, influxql.StringPointEmitter) - CreateIntegerBooleanReducer func() (influxql.IntegerPointAggregator, influxql.BooleanPointEmitter) - CreateIntegerBulkBooleanReducer func() (IntegerBulkPointAggregator, influxql.BooleanPointEmitter) + CreateIntegerBooleanReducer func() (influxql.IntegerPointAggregator, influxql.BooleanPointEmitter) - CreateStringFloatReducer func() (influxql.StringPointAggregator, influxql.FloatPointEmitter) - CreateStringBulkFloatReducer func() (StringBulkPointAggregator, influxql.FloatPointEmitter) + CreateStringFloatReducer func() (influxql.StringPointAggregator, influxql.FloatPointEmitter) - CreateStringIntegerReducer func() (influxql.StringPointAggregator, influxql.IntegerPointEmitter) - CreateStringBulkIntegerReducer func() (StringBulkPointAggregator, influxql.IntegerPointEmitter) + CreateStringIntegerReducer func() (influxql.StringPointAggregator, influxql.IntegerPointEmitter) - CreateStringReducer func() (influxql.StringPointAggregator, influxql.StringPointEmitter) - CreateStringBulkReducer func() (StringBulkPointAggregator, influxql.StringPointEmitter) + CreateStringReducer func() (influxql.StringPointAggregator, influxql.StringPointEmitter) - CreateStringBooleanReducer func() (influxql.StringPointAggregator, influxql.BooleanPointEmitter) - CreateStringBulkBooleanReducer func() (StringBulkPointAggregator, influxql.BooleanPointEmitter) + CreateStringBooleanReducer func() (influxql.StringPointAggregator, influxql.BooleanPointEmitter) - CreateBooleanFloatReducer func() (influxql.BooleanPointAggregator, influxql.FloatPointEmitter) - CreateBooleanBulkFloatReducer func() (BooleanBulkPointAggregator, influxql.FloatPointEmitter) + CreateBooleanFloatReducer func() (influxql.BooleanPointAggregator, influxql.FloatPointEmitter) - CreateBooleanIntegerReducer func() (influxql.BooleanPointAggregator, influxql.IntegerPointEmitter) - CreateBooleanBulkIntegerReducer func() (BooleanBulkPointAggregator, influxql.IntegerPointEmitter) + CreateBooleanIntegerReducer func() (influxql.BooleanPointAggregator, influxql.IntegerPointEmitter) - CreateBooleanStringReducer func() (influxql.BooleanPointAggregator, influxql.StringPointEmitter) - CreateBooleanBulkStringReducer func() (BooleanBulkPointAggregator, influxql.StringPointEmitter) + CreateBooleanStringReducer func() (influxql.BooleanPointAggregator, influxql.StringPointEmitter) - CreateBooleanReducer func() (influxql.BooleanPointAggregator, influxql.BooleanPointEmitter) - CreateBooleanBulkReducer func() (BooleanBulkPointAggregator, influxql.BooleanPointEmitter) + CreateBooleanReducer func() (influxql.BooleanPointAggregator, influxql.BooleanPointEmitter) TopBottomCallInfo *TopBottomCallInfo IsSimpleSelector bool IsStreamTransformation bool IsEmptyOK bool } - -type FloatBulkPointAggregator interface { - influxql.FloatPointAggregator - influxql.FloatBulkPointAggregator -} - -type IntegerBulkPointAggregator interface { - influxql.IntegerPointAggregator - influxql.IntegerBulkPointAggregator -} - -type StringBulkPointAggregator interface { - influxql.StringPointAggregator - influxql.StringBulkPointAggregator -} - -type BooleanBulkPointAggregator interface { - influxql.BooleanPointAggregator - influxql.BooleanBulkPointAggregator -} diff --git a/pipeline/influxql.gen.go.tmpl b/pipeline/influxql.gen.go.tmpl index 90167210c..5880b872f 100644 --- a/pipeline/influxql.gen.go.tmpl +++ b/pipeline/influxql.gen.go.tmpl @@ -8,7 +8,6 @@ type ReduceCreater struct { {{range $a := $types}} {{range $e := $types}} Create{{$a.Name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}Reducer func() (influxql.{{$a.Name}}PointAggregator, influxql.{{$e.Name}}PointEmitter) - Create{{$a.Name}}Bulk{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}Reducer func() ({{$a.Name}}BulkPointAggregator, influxql.{{$e.Name}}PointEmitter) {{end}}{{end}}{{end}} TopBottomCallInfo *TopBottomCallInfo @@ -17,9 +16,3 @@ type ReduceCreater struct { IsEmptyOK bool } -{{range .}} -type {{.Name}}BulkPointAggregator interface { - influxql.{{.Name}}PointAggregator - influxql.{{.Name}}BulkPointAggregator -} -{{end}} diff --git a/pipeline/influxql.go b/pipeline/influxql.go index f551642fd..99ef70226 100644 --- a/pipeline/influxql.go +++ b/pipeline/influxql.go @@ -139,11 +139,11 @@ func (n *chainnode) Mean(field string) *InfluxQLNode { // if you want the median point use `.percentile(field, 50.0)`. func (n *chainnode) Median(field string) *InfluxQLNode { i := newInfluxQLNode("median", field, n.Provides(), StreamEdge, ReduceCreater{ - CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { fn := influxql.NewFloatSliceFuncReducer(influxql.FloatMedianReduceSlice) return fn, fn }, - CreateIntegerBulkFloatReducer: func() (IntegerBulkPointAggregator, influxql.FloatPointEmitter) { + CreateIntegerFloatReducer: func() (influxql.IntegerPointAggregator, influxql.FloatPointEmitter) { fn := influxql.NewIntegerSliceFuncFloatReducer(influxql.IntegerMedianReduceSlice) return fn, fn }, @@ -155,11 +155,11 @@ func (n *chainnode) Median(field string) *InfluxQLNode { // Compute the mode of the data. func (n *chainnode) Mode(field string) *InfluxQLNode { i := newInfluxQLNode("mode", field, n.Provides(), StreamEdge, ReduceCreater{ - CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { fn := influxql.NewFloatSliceFuncReducer(influxql.FloatModeReduceSlice) return fn, fn }, - CreateIntegerBulkReducer: func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) { + CreateIntegerReducer: func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) { fn := influxql.NewIntegerSliceFuncReducer(influxql.IntegerModeReduceSlice) return fn, fn }, @@ -171,11 +171,11 @@ func (n *chainnode) Mode(field string) *InfluxQLNode { // Compute the difference between `min` and `max` points. func (n *chainnode) Spread(field string) *InfluxQLNode { i := newInfluxQLNode("spread", field, n.Provides(), StreamEdge, ReduceCreater{ - CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { fn := influxql.NewFloatSliceFuncReducer(influxql.FloatSpreadReduceSlice) return fn, fn }, - CreateIntegerBulkReducer: func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) { + CreateIntegerReducer: func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) { fn := influxql.NewIntegerSliceFuncReducer(influxql.IntegerSpreadReduceSlice) return fn, fn }, @@ -292,11 +292,11 @@ func (n *chainnode) Max(field string) *InfluxQLNode { // Select a point at the given percentile. This is a selector function, no interpolation between points is performed. func (n *chainnode) Percentile(field string, percentile float64) *InfluxQLNode { i := newInfluxQLNode("percentile", field, n.Provides(), StreamEdge, ReduceCreater{ - CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { fn := influxql.NewFloatSliceFuncReducer(influxql.NewFloatPercentileReduceSliceFunc(percentile)) return fn, fn }, - CreateIntegerBulkReducer: func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) { + CreateIntegerReducer: func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) { fn := influxql.NewIntegerSliceFuncReducer(influxql.NewIntegerPercentileReduceSliceFunc(percentile)) return fn, fn }, @@ -318,7 +318,7 @@ func (n *chainnode) Top(num int64, field string, fieldsAndTags ...string) *Influ tags[i] = i } i := newInfluxQLNode("top", field, n.Provides(), BatchEdge, ReduceCreater{ - CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { fn := influxql.NewFloatSliceFuncReducer(influxql.NewFloatTopReduceSliceFunc( int(num), tags, @@ -326,7 +326,7 @@ func (n *chainnode) Top(num int64, field string, fieldsAndTags ...string) *Influ )) return fn, fn }, - CreateIntegerBulkReducer: func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) { + CreateIntegerReducer: func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) { fn := influxql.NewIntegerSliceFuncReducer(influxql.NewIntegerTopReduceSliceFunc( int(num), tags, @@ -349,7 +349,7 @@ func (n *chainnode) Bottom(num int64, field string, fieldsAndTags ...string) *In tags[i] = i } i := newInfluxQLNode("bottom", field, n.Provides(), BatchEdge, ReduceCreater{ - CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { fn := influxql.NewFloatSliceFuncReducer(influxql.NewFloatBottomReduceSliceFunc( int(num), tags, @@ -357,7 +357,7 @@ func (n *chainnode) Bottom(num int64, field string, fieldsAndTags ...string) *In )) return fn, fn }, - CreateIntegerBulkReducer: func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) { + CreateIntegerReducer: func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) { fn := influxql.NewIntegerSliceFuncReducer(influxql.NewIntegerBottomReduceSliceFunc( int(num), tags, @@ -380,11 +380,11 @@ func (n *chainnode) Bottom(num int64, field string, fieldsAndTags ...string) *In // Compute the standard deviation. func (n *chainnode) Stddev(field string) *InfluxQLNode { i := newInfluxQLNode("stddev", field, n.Provides(), StreamEdge, ReduceCreater{ - CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { fn := influxql.NewFloatSliceFuncReducer(influxql.FloatStddevReduceSlice) return fn, fn }, - CreateIntegerBulkFloatReducer: func() (IntegerBulkPointAggregator, influxql.FloatPointEmitter) { + CreateIntegerFloatReducer: func() (influxql.IntegerPointAggregator, influxql.FloatPointEmitter) { fn := influxql.NewIntegerSliceFuncFloatReducer(influxql.IntegerStddevReduceSlice) return fn, fn }, diff --git a/replay.go b/replay.go index 78a0f3511..c5e1f9c31 100644 --- a/replay.go +++ b/replay.go @@ -9,11 +9,12 @@ import ( dbmodels "github.com/influxdata/influxdb/models" "github.com/influxdata/kapacitor/clock" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" ) // Replay stream data from a channel source. -func ReplayStreamFromChan(clck clock.Clock, points <-chan models.Point, collector StreamCollector, recTime bool) <-chan error { +func ReplayStreamFromChan(clck clock.Clock, points <-chan edge.PointMessage, collector StreamCollector, recTime bool) <-chan error { errC := make(chan error, 1) go func() { errC <- replayStreamFromChan(clck, points, collector, recTime) @@ -25,7 +26,7 @@ func ReplayStreamFromChan(clck clock.Clock, points <-chan models.Point, collecto func ReplayStreamFromIO(clck clock.Clock, data io.ReadCloser, collector StreamCollector, recTime bool, precision string) <-chan error { allErrs := make(chan error, 2) errC := make(chan error, 1) - points := make(chan models.Point) + points := make(chan edge.PointMessage) go func() { allErrs <- replayStreamFromChan(clck, points, collector, recTime) }() @@ -45,19 +46,20 @@ func ReplayStreamFromIO(clck clock.Clock, data io.ReadCloser, collector StreamCo return errC } -func replayStreamFromChan(clck clock.Clock, points <-chan models.Point, collector StreamCollector, recTime bool) error { +func replayStreamFromChan(clck clock.Clock, points <-chan edge.PointMessage, collector StreamCollector, recTime bool) error { defer collector.Close() start := time.Time{} var diff time.Duration zero := clck.Zero() for p := range points { if start.IsZero() { - start = p.Time + start = p.Time() diff = zero.Sub(start) } - waitTime := p.Time.Add(diff).UTC() + waitTime := p.Time().Add(diff).UTC() if !recTime { - p.Time = waitTime + p = p.ShallowCopy() + p.SetTime(waitTime) } clck.Until(waitTime) err := collector.CollectPoint(p) @@ -68,7 +70,7 @@ func replayStreamFromChan(clck clock.Clock, points <-chan models.Point, collecto return nil } -func readPointsFromIO(data io.ReadCloser, points chan<- models.Point, precision string) error { +func readPointsFromIO(data io.ReadCloser, points chan<- edge.PointMessage, precision string) error { defer data.Close() defer close(points) @@ -93,22 +95,22 @@ func readPointsFromIO(data io.ReadCloser, points chan<- models.Point, precision return err } mp := mps[0] - p := models.Point{ - Database: db, - RetentionPolicy: rp, - Name: mp.Name(), - Group: models.NilGroup, - Tags: models.Tags(mp.Tags().Map()), - Fields: models.Fields(mp.Fields()), - Time: mp.Time().UTC(), - } + p := edge.NewPointMessage( + mp.Name(), + db, + rp, + models.Dimensions{}, + models.Fields(mp.Fields()), + models.Tags(mp.Tags().Map()), + mp.Time().UTC(), + ) points <- p } return nil } // Replay batch data from a channel source. -func ReplayBatchFromChan(clck clock.Clock, batches []<-chan models.Batch, collectors []BatchCollector, recTime bool) <-chan error { +func ReplayBatchFromChan(clck clock.Clock, batches []<-chan edge.BufferedBatchMessage, collectors []BatchCollector, recTime bool) <-chan error { errC := make(chan error, 1) if e, g := len(batches), len(collectors); e != g { errC <- fmt.Errorf("unexpected number of batch collectors. exp %d got %d", e, g) @@ -117,7 +119,7 @@ func ReplayBatchFromChan(clck clock.Clock, batches []<-chan models.Batch, collec allErrs := make(chan error, len(batches)) for i := range batches { - go func(collector BatchCollector, batches <-chan models.Batch, clck clock.Clock, recTime bool) { + go func(collector BatchCollector, batches <-chan edge.BufferedBatchMessage, clck clock.Clock, recTime bool) { allErrs <- replayBatchFromChan(clck, batches, collector, recTime) }(collectors[i], batches[i], clck, recTime) } @@ -146,11 +148,11 @@ func ReplayBatchFromIO(clck clock.Clock, data []io.ReadCloser, collectors []Batc allErrs := make(chan error, len(data)*2) for i := range data { - batches := make(chan models.Batch) - go func(collector BatchCollector, batches <-chan models.Batch, clck clock.Clock, recTime bool) { + batches := make(chan edge.BufferedBatchMessage) + go func(collector BatchCollector, batches <-chan edge.BufferedBatchMessage, clck clock.Clock, recTime bool) { allErrs <- replayBatchFromChan(clck, batches, collector, recTime) }(collectors[i], batches, clck, recTime) - go func(data io.ReadCloser, batches chan<- models.Batch) { + go func(data io.ReadCloser, batches chan<- edge.BufferedBatchMessage) { allErrs <- readBatchFromIO(data, batches) }(data[i], batches) } @@ -169,7 +171,7 @@ func ReplayBatchFromIO(clck clock.Clock, data []io.ReadCloser, collectors []Batc } // Replay the batch data from a single source -func replayBatchFromChan(clck clock.Clock, batches <-chan models.Batch, collector BatchCollector, recTime bool) error { +func replayBatchFromChan(clck clock.Clock, batches <-chan edge.BufferedBatchMessage, collector BatchCollector, recTime bool) error { defer collector.Close() // Find relative times @@ -178,39 +180,40 @@ func replayBatchFromChan(clck clock.Clock, batches <-chan models.Batch, collecto zero := clck.Zero() for b := range batches { - if len(b.Points) == 0 { + if len(b.Points()) == 0 { // Emit empty batch - if b.TMax.IsZero() { + if b.Begin().Time().IsZero() { // Set tmax to last batch if not set. - b.TMax = tmax + b.Begin().SetTime(tmax) } else { - b.TMax = b.TMax.UTC() - tmax = b.TMax + tmax = b.Begin().Time().UTC() + b.Begin().SetTime(tmax) } if err := collector.CollectBatch(b); err != nil { return err } continue } + points := b.Points() if start.IsZero() { - start = b.Points[0].Time + start = points[0].Time() diff = zero.Sub(start) } var lastTime time.Time if !recTime { - for i := range b.Points { - b.Points[i].Time = b.Points[i].Time.Add(diff).UTC() + for i := range points { + points[i].SetTime(points[i].Time().Add(diff).UTC()) } - lastTime = b.Points[len(b.Points)-1].Time + lastTime = points[len(points)-1].Time() } else { - lastTime = b.Points[len(b.Points)-1].Time.Add(diff).UTC() + lastTime = points[len(points)-1].Time().Add(diff).UTC() } clck.Until(lastTime) - if lpt := b.Points[len(b.Points)-1].Time; b.TMax.Before(lpt) { - b.TMax = lpt + if lpt := points[len(points)-1].Time(); b.Begin().Time().Before(lpt) { + b.Begin().SetTime(lpt) } - b.TMax = b.TMax.UTC() - tmax = b.TMax + tmax = b.Begin().Time().UTC() + b.Begin().SetTime(tmax) if err := collector.CollectBatch(b); err != nil { return err } @@ -219,45 +222,26 @@ func replayBatchFromChan(clck clock.Clock, batches <-chan models.Batch, collecto } // Replay the batch data from a single source -func readBatchFromIO(data io.ReadCloser, batches chan<- models.Batch) error { +func readBatchFromIO(data io.ReadCloser, batches chan<- edge.BufferedBatchMessage) error { defer close(batches) defer data.Close() - dec := json.NewDecoder(data) + dec := edge.NewBufferedBatchMessageDecoder(data) for dec.More() { - var b models.Batch - err := dec.Decode(&b) + b, err := dec.Decode() if err != nil { return err } - if len(b.Points) == 0 { + if len(b.Points()) == 0 { // do nothing continue } - if b.Group == "" { - b.Group = models.ToGroupID( - b.Name, - b.Tags, - models.Dimensions{ - ByName: b.ByName, - TagNames: models.SortedKeys(b.Tags), - }, - ) - } - // Add tags to all points - if len(b.Tags) > 0 { - for i := range b.Points { - if len(b.Points[i].Tags) == 0 { - b.Points[i].Tags = b.Tags - } - } - } batches <- b } return nil } -func WritePointForRecording(w io.Writer, p models.Point, precision string) error { - if _, err := fmt.Fprintf(w, "%s\n%s\n", p.Database, p.RetentionPolicy); err != nil { +func WritePointForRecording(w io.Writer, p edge.PointMessage, precision string) error { + if _, err := fmt.Fprintf(w, "%s\n%s\n", p.Database(), p.RetentionPolicy()); err != nil { return err } if _, err := w.Write(p.Bytes(precision)); err != nil { @@ -269,7 +253,7 @@ func WritePointForRecording(w io.Writer, p models.Point, precision string) error return nil } -func WriteBatchForRecording(w io.Writer, b models.Batch) error { +func WriteBatchForRecording(w io.Writer, b edge.BufferedBatchMessage) error { enc := json.NewEncoder(w) err := enc.Encode(b) if err != nil { diff --git a/sample.go b/sample.go index 010a2e728..254fe00ea 100644 --- a/sample.go +++ b/sample.go @@ -3,10 +3,9 @@ package kapacitor import ( "errors" "log" - "sync" "time" - "github.com/influxdata/kapacitor/expvar" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" ) @@ -15,8 +14,6 @@ type SampleNode struct { node s *pipeline.SampleNode - countsMu sync.RWMutex - counts map[models.GroupID]int64 duration time.Duration } @@ -36,61 +33,72 @@ func newSampleNode(et *ExecutingTask, n *pipeline.SampleNode, l *log.Logger) (*S return sn, nil } -func (s *SampleNode) runSample([]byte) error { - valueF := func() int64 { - s.countsMu.RLock() - l := len(s.counts) - s.countsMu.RUnlock() - return int64(l) +func (n *SampleNode) runSample([]byte) error { + consumer := edge.NewGroupedConsumer( + n.ins[0], + n, + ) + n.statMap.Set(statCardinalityGauge, consumer.CardinalityVar()) + return consumer.Consume() +} + +func (n *SampleNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) { + return edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n.newGroup()), + ), nil +} +func (n *SampleNode) newGroup() *sampleGroup { + return &sampleGroup{ + n: n, + } +} + +type sampleGroup struct { + n *SampleNode + + count int64 +} + +func (g *sampleGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + g.count = 0 + return begin, nil +} + +func (g *sampleGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + keep := g.n.shouldKeep(g.count, bp.Time()) + g.count++ + if keep { + return bp, nil } - s.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) + return nil, nil +} - switch s.Wants() { - case pipeline.StreamEdge: - for p, ok := s.ins[0].NextPoint(); ok; p, ok = s.ins[0].NextPoint() { - s.timer.Start() - if s.shouldKeep(p.Group, p.Time) { - s.timer.Pause() - for _, child := range s.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } - s.timer.Resume() - } - s.timer.Stop() - } - case pipeline.BatchEdge: - for b, ok := s.ins[0].NextBatch(); ok; b, ok = s.ins[0].NextBatch() { - s.timer.Start() - if s.shouldKeep(b.Group, b.TMax) { - s.timer.Pause() - for _, child := range s.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } - s.timer.Resume() - } - s.timer.Stop() - } +func (g *sampleGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return end, nil +} + +func (g *sampleGroup) Point(p edge.PointMessage) (edge.Message, error) { + keep := g.n.shouldKeep(g.count, p.Time()) + g.count++ + if keep { + return p, nil } - return nil + return nil, nil +} + +func (g *sampleGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (g *sampleGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil } -func (s *SampleNode) shouldKeep(group models.GroupID, t time.Time) bool { - if s.duration != 0 { - keepTime := t.Truncate(s.duration) +func (n *SampleNode) shouldKeep(count int64, t time.Time) bool { + if n.duration != 0 { + keepTime := t.Truncate(n.duration) return t.Equal(keepTime) } else { - s.countsMu.Lock() - count := s.counts[group] - keep := count%s.s.N == 0 - count++ - s.counts[group] = count - s.countsMu.Unlock() - return keep + return count%n.s.N == 0 } } diff --git a/server/server_test.go b/server/server_test.go index 1ede674f0..7427a2089 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -4171,7 +4171,7 @@ func TestServer_RecordReplayQuery_Missing(t *testing.T) { if err != nil { t.Fatal(err) } - defer os.RemoveAll(tmpDir) + //defer os.RemoveAll(tmpDir) tick := `stream |from() diff --git a/services/replay/service.go b/services/replay/service.go index 395953b83..54c9d9076 100644 --- a/services/replay/service.go +++ b/services/replay/service.go @@ -22,6 +22,7 @@ import ( "github.com/influxdata/kapacitor" kclient "github.com/influxdata/kapacitor/client/v1" "github.com/influxdata/kapacitor/clock" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/influxdb" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/services/httpd" @@ -79,7 +80,7 @@ type Service struct { Delete(*kapacitor.TaskMaster) } TaskMaster interface { - NewFork(name string, dbrps []kapacitor.DBRP, measurements []string) (*kapacitor.Edge, error) + NewFork(name string, dbrps []kapacitor.DBRP, measurements []string) (edge.StatsEdge, error) DelFork(name string) New(name string) *kapacitor.TaskMaster Stream(name string) (kapacitor.StreamCollector, error) @@ -1151,7 +1152,7 @@ func (r *Service) doLiveQueryReplay(id string, task *kapacitor.Task, clk clock.C runErrC := make(chan error, 1) switch task.Type { case kapacitor.StreamTask: - source := make(chan models.Point) + source := make(chan edge.PointMessage) go func() { runErrC <- r.runQueryStream(source, query, cluster) }() @@ -1161,12 +1162,12 @@ func (r *Service) doLiveQueryReplay(id string, task *kapacitor.Task, clk clock.C } replayErrC = kapacitor.ReplayStreamFromChan(clk, source, stream, recTime) case kapacitor.BatchTask: - source := make(chan models.Batch) + source := make(chan edge.BufferedBatchMessage) go func() { runErrC <- r.runQueryBatch(source, query, cluster) }() collectors := tm.BatchCollectors(task.ID) - replayErrC = kapacitor.ReplayBatchFromChan(clk, []<-chan models.Batch{source}, collectors, recTime) + replayErrC = kapacitor.ReplayBatchFromChan(clk, []<-chan edge.BufferedBatchMessage{source}, collectors, recTime) } for i := 0; i < 2; i++ { var err error @@ -1262,11 +1263,17 @@ func (s *Service) doRecordStream(id string, dataSource DataSource, stop time.Tim done := make(chan struct{}) go func() { closed := false - for p, ok := e.NextPoint(); ok; p, ok = e.NextPoint() { + for m, ok := e.Emit(); ok; m, ok = e.Emit() { if closed { continue } - if p.Time.After(stop) { + p, isPoint := m.(edge.PointMessage) + if !isPoint { + // Skip messages that are not points + continue + } + + if p.Time().After(stop) { closed = true close(done) //continue to read any data already on the edge, but just drop it. @@ -1325,7 +1332,7 @@ func (s *Service) doRecordBatch(dataSource DataSource, t *kapacitor.Task, start, return nil } -func (s *Service) startRecordBatch(t *kapacitor.Task, start, stop time.Time) ([]<-chan models.Batch, <-chan error, error) { +func (s *Service) startRecordBatch(t *kapacitor.Task, start, stop time.Time) ([]<-chan edge.BufferedBatchMessage, <-chan error, error) { // We do not open the task master so it does not need to be closed et, err := kapacitor.NewExecutingTask(s.TaskMaster.New(""), t) if err != nil { @@ -1341,11 +1348,11 @@ func (s *Service) startRecordBatch(t *kapacitor.Task, start, stop time.Time) ([] return nil, nil, errors.New("InfluxDB not configured, cannot record batch query") } - sources := make([]<-chan models.Batch, len(batches)) + sources := make([]<-chan edge.BufferedBatchMessage, len(batches)) errors := make(chan error, len(batches)) for batchIndex, batchQueries := range batches { - source := make(chan models.Batch) + source := make(chan edge.BufferedBatchMessage) sources[batchIndex] = source go func(cluster string, queries []*kapacitor.Query, groupByName bool) { defer close(source) @@ -1369,15 +1376,15 @@ func (s *Service) startRecordBatch(t *kapacitor.Task, start, stop time.Time) ([] return } for _, res := range resp.Results { - batches, err := models.ResultToBatches(res, groupByName) + batches, err := edge.ResultToBufferedBatches(res, groupByName) if err != nil { errors <- err return } for _, b := range batches { // Set stop time based off query bounds - if b.TMax.IsZero() || !q.IsGroupedByTime() { - b.TMax = q.StopTime() + if b.Begin().Time().IsZero() || !q.IsGroupedByTime() { + b.Begin().SetTime(q.StopTime()) } source <- b } @@ -1400,7 +1407,7 @@ func (s *Service) startRecordBatch(t *kapacitor.Task, start, stop time.Time) ([] return sources, errC, nil } -func (r *Service) saveBatchRecording(dataSource DataSource, sources []<-chan models.Batch) error { +func (r *Service) saveBatchRecording(dataSource DataSource, sources []<-chan edge.BufferedBatchMessage) error { archiver, err := dataSource.BatchArchiver() if err != nil { return err @@ -1422,7 +1429,7 @@ func (r *Service) doRecordQuery(dataSource DataSource, q string, typ RecordingTy errC := make(chan error, 2) switch typ { case StreamRecording: - points := make(chan models.Point) + points := make(chan edge.PointMessage) go func() { errC <- r.runQueryStream(points, q, cluster) }() @@ -1430,7 +1437,7 @@ func (r *Service) doRecordQuery(dataSource DataSource, q string, typ RecordingTy errC <- r.saveStreamQuery(dataSource, points, precision) }() case BatchRecording: - batches := make(chan models.Batch) + batches := make(chan edge.BufferedBatchMessage) go func() { errC <- r.runQueryBatch(batches, q, cluster) }() @@ -1447,7 +1454,7 @@ func (r *Service) doRecordQuery(dataSource DataSource, q string, typ RecordingTy return nil } -func (r *Service) runQueryStream(source chan<- models.Point, q, cluster string) error { +func (r *Service) runQueryStream(source chan<- edge.PointMessage, q, cluster string) error { defer close(source) dbrp, resp, err := r.execQuery(q, cluster) if err != nil { @@ -1455,7 +1462,7 @@ func (r *Service) runQueryStream(source chan<- models.Point, q, cluster string) } // Write results to sources for _, res := range resp.Results { - batches, err := models.ResultToBatches(res, false) + batches, err := edge.ResultToBufferedBatches(res, false) if err != nil { return err } @@ -1464,10 +1471,10 @@ func (r *Service) runQueryStream(source chan<- models.Point, q, cluster string) // Find earliest time of first points current := time.Time{} for _, batch := range batches { - if len(batch.Points) > 0 && + if len(batch.Points()) > 0 && (current.IsZero() || - batch.Points[0].Time.Before(current)) { - current = batch.Points[0].Time + batch.Points()[0].Time().Before(current)) { + current = batch.Points()[0].Time() } } @@ -1481,7 +1488,7 @@ func (r *Service) runQueryStream(source chan<- models.Point, q, cluster string) next := time.Time{} for b := range batches { - l := len(batches[b].Points) + l := len(batches[b].Points()) if l == 0 { if !finished[b] { finishedCount++ @@ -1491,26 +1498,27 @@ func (r *Service) runQueryStream(source chan<- models.Point, q, cluster string) } i := 0 for ; i < l; i++ { - bp := batches[b].Points[i] - if bp.Time.After(current) { - if next.IsZero() || bp.Time.Before(next) { - next = bp.Time + bp := batches[b].Points()[i] + if bp.Time().After(current) { + if next.IsZero() || bp.Time().Before(next) { + next = bp.Time() } break } // Write point - p := models.Point{ - Name: batches[b].Name, - Database: dbrp.Database, - RetentionPolicy: dbrp.RetentionPolicy, - Tags: bp.Tags, - Fields: bp.Fields, - Time: bp.Time, - } + p := edge.NewPointMessage( + batches[b].Name(), + dbrp.Database, + dbrp.RetentionPolicy, + models.Dimensions{}, + bp.Fields(), + bp.Tags(), + bp.Time(), + ) source <- p } // Remove written points - batches[b].Points = batches[b].Points[i:] + batches[b].SetPoints(batches[b].Points()[i:]) } current = next } @@ -1518,7 +1526,7 @@ func (r *Service) runQueryStream(source chan<- models.Point, q, cluster string) return nil } -func (r *Service) runQueryBatch(source chan<- models.Batch, q string, cluster string) error { +func (r *Service) runQueryBatch(source chan<- edge.BufferedBatchMessage, q string, cluster string) error { defer close(source) _, resp, err := r.execQuery(q, cluster) if err != nil { @@ -1526,7 +1534,7 @@ func (r *Service) runQueryBatch(source chan<- models.Batch, q string, cluster st } // Write results to sources for _, res := range resp.Results { - batches, err := models.ResultToBatches(res, false) + batches, err := edge.ResultToBufferedBatches(res, false) if err != nil { return err } @@ -1537,7 +1545,7 @@ func (r *Service) runQueryBatch(source chan<- models.Batch, q string, cluster st return nil } -func (r *Service) saveBatchQuery(dataSource DataSource, batches <-chan models.Batch) error { +func (r *Service) saveBatchQuery(dataSource DataSource, batches <-chan edge.BufferedBatchMessage) error { archiver, err := dataSource.BatchArchiver() if err != nil { return err @@ -1557,7 +1565,7 @@ func (r *Service) saveBatchQuery(dataSource DataSource, batches <-chan models.Ba return archiver.Close() } -func (s *Service) saveStreamQuery(dataSource DataSource, points <-chan models.Point, precision string) error { +func (s *Service) saveStreamQuery(dataSource DataSource, points <-chan edge.PointMessage, precision string) error { sw, err := dataSource.StreamWriter() if err != nil { return err diff --git a/services/scraper/service.go b/services/scraper/service.go index 46d214e57..4f18d4c1d 100644 --- a/services/scraper/service.go +++ b/services/scraper/service.go @@ -7,6 +7,7 @@ import ( "sync" "sync/atomic" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" @@ -22,7 +23,7 @@ var ( // Service represents the scraper manager type Service struct { PointsWriter interface { - WriteKapacitorPoint(models.Point) error + WriteKapacitorPoint(edge.PointMessage) error } mu sync.Mutex wg sync.WaitGroup @@ -182,14 +183,15 @@ func (s *Service) Append(sample *model.Sample) error { "value": value, } - return s.PointsWriter.WriteKapacitorPoint(models.Point{ - Database: db, - RetentionPolicy: rp, - Name: tags[model.MetricNameLabel], - Tags: tags, - Fields: fields, - Time: sample.Timestamp.Time(), - }) + return s.PointsWriter.WriteKapacitorPoint(edge.NewPointMessage( + tags[model.MetricNameLabel], + db, + rp, + models.Dimensions{}, + fields, + tags, + sample.Timestamp.Time(), + )) } // NeedsThrottling conforms to SampleAppender and never returns true currently. diff --git a/services/stats/service.go b/services/stats/service.go index 037f7677a..7e20634a0 100644 --- a/services/stats/service.go +++ b/services/stats/service.go @@ -28,6 +28,7 @@ import ( "time" "github.com/influxdata/kapacitor" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/server/vars" "github.com/influxdata/kapacitor/timer" @@ -118,15 +119,15 @@ func (s *Service) reportStats() { return } for _, stat := range data { - p := models.Point{ - Database: s.db, - RetentionPolicy: s.rp, - Name: stat.Name, - Group: models.NilGroup, - Tags: models.Tags(stat.Tags), - Time: now, - Fields: models.Fields(stat.Values), - } + p := edge.NewPointMessage( + stat.Name, + s.db, + s.rp, + models.Dimensions{}, + models.Fields(stat.Values), + models.Tags(stat.Tags), + now, + ) s.stream.CollectPoint(p) } } diff --git a/shift.go b/shift.go index 4c95de67d..b003c08c6 100644 --- a/shift.go +++ b/shift.go @@ -5,6 +5,7 @@ import ( "log" "time" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/pipeline" ) @@ -29,36 +30,46 @@ func newShiftNode(et *ExecutingTask, n *pipeline.ShiftNode, l *log.Logger) (*Shi return sn, nil } -func (s *ShiftNode) runShift([]byte) error { - switch s.Wants() { - case pipeline.StreamEdge: - for p, ok := s.ins[0].NextPoint(); ok; p, ok = s.ins[0].NextPoint() { - s.timer.Start() - p.Time = p.Time.Add(s.shift) - s.timer.Stop() - for _, child := range s.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } - } - case pipeline.BatchEdge: - for b, ok := s.ins[0].NextBatch(); ok; b, ok = s.ins[0].NextBatch() { - s.timer.Start() - b.TMax = b.TMax.Add(s.shift) - b.Points = b.ShallowCopyPoints() - for i, p := range b.Points { - b.Points[i].Time = p.Time.Add(s.shift) - } - s.timer.Stop() - for _, child := range s.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } - } - } - return nil +func (n *ShiftNode) runShift([]byte) error { + consumer := edge.NewConsumerWithReceiver( + n.ins[0], + edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n), + ), + ) + return consumer.Consume() +} + +func (n *ShiftNode) doShift(t edge.TimeSetter) { + t.SetTime(t.Time().Add(n.shift)) +} + +func (n *ShiftNode) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + begin = begin.ShallowCopy() + n.doShift(begin) + return begin, nil +} + +func (n *ShiftNode) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + bp = bp.ShallowCopy() + n.doShift(bp) + return bp, nil +} + +func (n *ShiftNode) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return end, nil +} + +func (n *ShiftNode) Point(p edge.PointMessage) (edge.Message, error) { + p = p.ShallowCopy() + n.doShift(p) + return p, nil +} + +func (n *ShiftNode) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (n *ShiftNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil } diff --git a/state_tracking.go b/state_tracking.go index 43b57a1ad..cff6bb228 100644 --- a/state_tracking.go +++ b/state_tracking.go @@ -3,146 +3,113 @@ package kapacitor import ( "fmt" "log" - "sync" "time" - "github.com/influxdata/kapacitor/expvar" - "github.com/influxdata/kapacitor/models" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/pipeline" "github.com/influxdata/kapacitor/tick/ast" "github.com/influxdata/kapacitor/tick/stateful" ) type stateTracker interface { - track(p models.BatchPoint, inState bool) interface{} + track(t time.Time, inState bool) interface{} reset() } type stateTrackingGroup struct { + n *StateTrackingNode stateful.Expression - stateful.ScopePool tracker stateTracker } type StateTrackingNode struct { node - lambda *ast.LambdaNode - as string + as string - newTracker func() stateTracker + expr stateful.Expression + scopePool stateful.ScopePool - groupsMu sync.RWMutex - groups map[models.GroupID]*stateTrackingGroup + newTracker func() stateTracker } -func (stn *StateTrackingNode) group(g models.GroupID) (*stateTrackingGroup, error) { - stn.groupsMu.RLock() - stg := stn.groups[g] - stn.groupsMu.RUnlock() +func (n *StateTrackingNode) runStateTracking(_ []byte) error { + consumer := edge.NewGroupedConsumer( + n.ins[0], + n, + ) + n.statMap.Set(statCardinalityGauge, consumer.CardinalityVar()) + return consumer.Consume() +} - if stg == nil { - // Grab the write lock - stn.groupsMu.Lock() - defer stn.groupsMu.Unlock() +func (n *StateTrackingNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) { + return edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n.newGroup()), + ), nil +} - // Check again now that we have the write lock - stg = stn.groups[g] - if stg == nil { - // Create a new tracking group - stg = &stateTrackingGroup{} +func (n *StateTrackingNode) newGroup() *stateTrackingGroup { + // Create a new tracking group + g := &stateTrackingGroup{ + n: n, + } - var err error - stg.Expression, err = stateful.NewExpression(stn.lambda.Expression) - if err != nil { - return nil, fmt.Errorf("Failed to compile expression: %v", err) - } + g.Expression = n.expr.CopyReset() - stg.ScopePool = stateful.NewScopePool(ast.FindReferenceVariables(stn.lambda.Expression)) + g.tracker = n.newTracker() + return g +} - stg.tracker = stn.newTracker() +func (g *stateTrackingGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + g.tracker.reset() + return begin, nil +} - stn.groups[g] = stg - } +func (g *stateTrackingGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + bp = bp.ShallowCopy() + err := g.track(bp) + if err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E! error while evaluating expression:", err) + return nil, nil } - return stg, nil + return bp, nil } -func (stn *StateTrackingNode) runStateTracking(_ []byte) error { - // Setup working_cardinality gauage. - valueF := func() int64 { - stn.groupsMu.RLock() - l := len(stn.groups) - stn.groupsMu.RUnlock() - return int64(l) +func (g *stateTrackingGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return end, nil +} + +func (g *stateTrackingGroup) Point(p edge.PointMessage) (edge.Message, error) { + p = p.ShallowCopy() + err := g.track(p) + if err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E! error while evaluating expression:", err) + return nil, nil } - stn.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - - switch stn.Provides() { - case pipeline.StreamEdge: - for p, ok := stn.ins[0].NextPoint(); ok; p, ok = stn.ins[0].NextPoint() { - stn.timer.Start() - stg, err := stn.group(p.Group) - if err != nil { - return err - } - - pass, err := EvalPredicate(stg.Expression, stg.ScopePool, p.Time, p.Fields, p.Tags) - if err != nil { - stn.incrementErrorCount() - stn.logger.Println("E! error while evaluating expression:", err) - stn.timer.Stop() - continue - } - - p.Fields = p.Fields.Copy() - p.Fields[stn.as] = stg.tracker.track(models.BatchPointFromPoint(p), pass) - - stn.timer.Stop() - for _, child := range stn.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } - } - case pipeline.BatchEdge: - for b, ok := stn.ins[0].NextBatch(); ok; b, ok = stn.ins[0].NextBatch() { - stn.timer.Start() - - stg, err := stn.group(b.Group) - if err != nil { - return err - } - stg.tracker.reset() - - b.Points = b.ShallowCopyPoints() - for i := 0; i < len(b.Points); { - p := &b.Points[i] - pass, err := EvalPredicate(stg.Expression, stg.ScopePool, p.Time, p.Fields, p.Tags) - if err != nil { - stn.incrementErrorCount() - stn.logger.Println("E! error while evaluating epression:", err) - b.Points = append(b.Points[:i], b.Points[i+1:]...) - continue - } - i++ - - p.Fields = p.Fields.Copy() - p.Fields[stn.as] = stg.tracker.track(*p, pass) - } - - stn.timer.Stop() - for _, child := range stn.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } - } + return p, nil +} + +func (g *stateTrackingGroup) track(p edge.FieldsTagsTimeSetter) error { + pass, err := EvalPredicate(g.Expression, g.n.scopePool, p) + if err != nil { + return err } + + fields := p.Fields().Copy() + fields[g.n.as] = g.tracker.track(p.Time(), pass) + p.SetFields(fields) return nil } +func (g *stateTrackingGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (g *stateTrackingGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil +} + type stateDurationTracker struct { sd *pipeline.StateDurationNode @@ -153,32 +120,36 @@ func (sdt *stateDurationTracker) reset() { sdt.startTime = time.Time{} } -func (sdt *stateDurationTracker) track(p models.BatchPoint, inState bool) interface{} { +func (sdt *stateDurationTracker) track(t time.Time, inState bool) interface{} { if !inState { sdt.startTime = time.Time{} return float64(-1) } if sdt.startTime.IsZero() { - sdt.startTime = p.Time + sdt.startTime = t } - return float64(p.Time.Sub(sdt.startTime)) / float64(sdt.sd.Unit) + return float64(t.Sub(sdt.startTime)) / float64(sdt.sd.Unit) } func newStateDurationNode(et *ExecutingTask, sd *pipeline.StateDurationNode, l *log.Logger) (*StateTrackingNode, error) { if sd.Lambda == nil { return nil, fmt.Errorf("nil expression passed to StateDurationNode") } - stn := &StateTrackingNode{ - node: node{Node: sd, et: et, logger: l}, - lambda: sd.Lambda, - as: sd.As, - - groups: make(map[models.GroupID]*stateTrackingGroup), + // Validate lambda expression + expr, err := stateful.NewExpression(sd.Lambda.Expression) + if err != nil { + return nil, err + } + n := &StateTrackingNode{ + node: node{Node: sd, et: et, logger: l}, + as: sd.As, newTracker: func() stateTracker { return &stateDurationTracker{sd: sd} }, + expr: expr, + scopePool: stateful.NewScopePool(ast.FindReferenceVariables(sd.Lambda.Expression)), } - stn.node.runF = stn.runStateTracking - return stn, nil + n.node.runF = n.runStateTracking + return n, nil } type stateCountTracker struct { @@ -189,7 +160,7 @@ func (sct *stateCountTracker) reset() { sct.count = 0 } -func (sct *stateCountTracker) track(p models.BatchPoint, inState bool) interface{} { +func (sct *stateCountTracker) track(t time.Time, inState bool) interface{} { if !inState { sct.count = 0 return int64(-1) @@ -203,14 +174,18 @@ func newStateCountNode(et *ExecutingTask, sc *pipeline.StateCountNode, l *log.Lo if sc.Lambda == nil { return nil, fmt.Errorf("nil expression passed to StateCountNode") } - stn := &StateTrackingNode{ - node: node{Node: sc, et: et, logger: l}, - lambda: sc.Lambda, - as: sc.As, - - groups: make(map[models.GroupID]*stateTrackingGroup), + // Validate lambda expression + expr, err := stateful.NewExpression(sc.Lambda.Expression) + if err != nil { + return nil, err + } + n := &StateTrackingNode{ + node: node{Node: sc, et: et, logger: l}, + as: sc.As, newTracker: func() stateTracker { return &stateCountTracker{} }, + expr: expr, + scopePool: stateful.NewScopePool(ast.FindReferenceVariables(sc.Lambda.Expression)), } - stn.node.runF = stn.runStateTracking - return stn, nil + n.node.runF = n.runStateTracking + return n, nil } diff --git a/stats.go b/stats.go index 5536163ae..ade34e2d0 100644 --- a/stats.go +++ b/stats.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/influxdata/kapacitor/models" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/pipeline" ) @@ -37,31 +37,31 @@ func newStatsNode(et *ExecutingTask, n *pipeline.StatsNode, l *log.Logger) (*Sta return sn, nil } -func (s *StatsNode) runStats([]byte) error { - if s.s.AlignFlag { +func (n *StatsNode) runStats([]byte) error { + if n.s.AlignFlag { // Wait till we are roughly aligned with the interval. now := time.Now() - next := now.Truncate(s.s.Interval).Add(s.s.Interval) + next := now.Truncate(n.s.Interval).Add(n.s.Interval) after := time.NewTicker(next.Sub(now)) select { case <-after.C: after.Stop() - case <-s.closing: + case <-n.closing: after.Stop() return nil } - if err := s.emit(now); err != nil { + if err := n.emit(now); err != nil { return err } } - ticker := time.NewTicker(s.s.Interval) + ticker := time.NewTicker(n.s.Interval) defer ticker.Stop() for { select { - case <-s.closing: + case <-n.closing: return nil case now := <-ticker.C: - if err := s.emit(now); err != nil { + if err := n.emit(now); err != nil { return err } } @@ -69,40 +69,41 @@ func (s *StatsNode) runStats([]byte) error { } // Emit a set of stats data points. -func (s *StatsNode) emit(now time.Time) error { - s.timer.Start() - point := models.Point{ - Name: "stats", - Tags: map[string]string{"node": s.en.Name()}, - Time: now.UTC(), - } - if s.s.AlignFlag { - point.Time = point.Time.Round(s.s.Interval) +func (n *StatsNode) emit(now time.Time) error { + n.timer.Start() + defer n.timer.Stop() + + name := "stats" + t := now.UTC() + if n.s.AlignFlag { + t = t.Round(n.s.Interval) } - stats := s.en.nodeStatsByGroup() - for group, stat := range stats { - point.Fields = stat.Fields - point.Group = group - point.Dimensions = stat.Dimensions - point.Tags = stat.Tags - s.timer.Pause() - for _, out := range s.outs { - err := out.CollectPoint(point) + stats := n.en.nodeStatsByGroup() + for _, stat := range stats { + point := edge.NewPointMessage( + name, "", "", + stat.Dimensions, + stat.Fields, + stat.Tags, + t, + ) + n.timer.Pause() + for _, out := range n.outs { + err := out.Collect(point) if err != nil { return err } } - s.timer.Resume() + n.timer.Resume() } - s.timer.Stop() return nil } -func (s *StatsNode) stopStats() { - s.mu.Lock() - defer s.mu.Unlock() - if !s.closed { - s.closed = true - close(s.closing) +func (n *StatsNode) stopStats() { + n.mu.Lock() + defer n.mu.Unlock() + if !n.closed { + n.closed = true + close(n.closing) } } diff --git a/stream.go b/stream.go index e81702742..58196a46b 100644 --- a/stream.go +++ b/stream.go @@ -1,9 +1,11 @@ package kapacitor import ( + "errors" "fmt" "log" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" "github.com/influxdata/kapacitor/tick/ast" @@ -25,10 +27,10 @@ func newStreamNode(et *ExecutingTask, n *pipeline.StreamNode, l *log.Logger) (*S return sn, nil } -func (s *StreamNode) runSourceStream([]byte) error { - for pt, ok := s.ins[0].NextPoint(); ok; pt, ok = s.ins[0].NextPoint() { - for _, child := range s.outs { - err := child.CollectPoint(pt) +func (n *StreamNode) runSourceStream([]byte) error { + for m, ok := n.ins[0].Emit(); ok; m, ok = n.ins[0].Emit() { + for _, child := range n.outs { + err := child.Collect(m) if err != nil { return err } @@ -42,7 +44,7 @@ type FromNode struct { s *pipeline.FromNode expression stateful.Expression scopePool stateful.ScopePool - dimensions []string + tagNames []string allDimensions bool db string rp string @@ -59,7 +61,7 @@ func newFromNode(et *ExecutingTask, n *pipeline.FromNode, l *log.Logger) (*FromN name: n.Measurement, } sn.node.runF = sn.runStream - sn.allDimensions, sn.dimensions = determineDimensions(n.Dimensions) + sn.allDimensions, sn.tagNames = determineTagNames(n.Dimensions, nil) if n.Lambda != nil { expr, err := stateful.NewExpression(n.Lambda.Expression) @@ -74,49 +76,65 @@ func newFromNode(et *ExecutingTask, n *pipeline.FromNode, l *log.Logger) (*FromN return sn, nil } -func (s *FromNode) runStream([]byte) error { - dims := models.Dimensions{ - ByName: s.s.GroupByMeasurementFlag, - } - for pt, ok := s.ins[0].NextPoint(); ok; pt, ok = s.ins[0].NextPoint() { - s.timer.Start() - if s.matches(pt) { - if s.s.Truncate != 0 { - pt.Time = pt.Time.Truncate(s.s.Truncate) - } - if s.s.Round != 0 { - pt.Time = pt.Time.Round(s.s.Round) - } - dims.TagNames = s.dimensions - pt = setGroupOnPoint(pt, s.allDimensions, dims, nil) - s.timer.Pause() - for _, child := range s.outs { - err := child.CollectPoint(pt) - if err != nil { - return err - } - } - s.timer.Resume() +func (n *FromNode) runStream([]byte) error { + consumer := edge.NewConsumerWithReceiver( + n.ins[0], + edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n), + ), + ) + return consumer.Consume() +} +func (n *FromNode) BeginBatch(edge.BeginBatchMessage) (edge.Message, error) { + return nil, errors.New("from does not support batch data") +} +func (n *FromNode) BatchPoint(edge.BatchPointMessage) (edge.Message, error) { + return nil, errors.New("from does not support batch data") +} +func (n *FromNode) EndBatch(edge.EndBatchMessage) (edge.Message, error) { + return nil, errors.New("from does not support batch data") +} + +func (n *FromNode) Point(p edge.PointMessage) (edge.Message, error) { + if n.matches(p) { + p = p.ShallowCopy() + if n.s.Truncate != 0 { + p.SetTime(p.Time().Truncate(n.s.Truncate)) } - s.timer.Stop() + if n.s.Round != 0 { + p.SetTime(p.Time().Round(n.s.Round)) + } + p.SetDimensions(models.Dimensions{ + ByName: n.s.GroupByMeasurementFlag, + TagNames: computeTagNames(p.Tags(), n.allDimensions, n.tagNames, nil), + }) + return p, nil } - return nil + return nil, nil +} + +func (n *FromNode) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (n *FromNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil } -func (s *FromNode) matches(p models.Point) bool { - if s.db != "" && p.Database != s.db { +func (n *FromNode) matches(p edge.PointMessage) bool { + if n.db != "" && p.Database() != n.db { return false } - if s.rp != "" && p.RetentionPolicy != s.rp { + if n.rp != "" && p.RetentionPolicy() != n.rp { return false } - if s.name != "" && p.Name != s.name { + if n.name != "" && p.Name() != n.name { return false } - if s.expression != nil { - if pass, err := EvalPredicate(s.expression, s.scopePool, p.Time, p.Fields, p.Tags); err != nil { - s.incrementErrorCount() - s.logger.Println("E! error while evaluating WHERE expression:", err) + if n.expression != nil { + if pass, err := EvalPredicate(n.expression, n.scopePool, p); err != nil { + n.incrementErrorCount() + n.logger.Println("E! error while evaluating WHERE expression:", err) return false } else { return pass diff --git a/task.go b/task.go index 868f2cf4d..49a746ebe 100644 --- a/task.go +++ b/task.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/pipeline" ) @@ -188,7 +189,7 @@ func (et *ExecutingTask) link() error { } // Start the task. -func (et *ExecutingTask) start(ins []*Edge, snapshot *TaskSnapshot) error { +func (et *ExecutingTask) start(ins []edge.StatsEdge, snapshot *TaskSnapshot) error { for _, in := range ins { et.source.addParentEdge(in) diff --git a/task_master.go b/task_master.go index ad0974e13..7604344c2 100644 --- a/task_master.go +++ b/task_master.go @@ -10,6 +10,7 @@ import ( imodels "github.com/influxdata/influxdb/models" "github.com/influxdata/kapacitor/alert" "github.com/influxdata/kapacitor/command" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/influxdb" "github.com/influxdata/kapacitor/models" @@ -163,7 +164,7 @@ type TaskMaster struct { // We are mapping from (db, rp, measurement) to map of task ids to their edges // The outer map (from dbrp&measurement) is for fast access on forkPoint // While the inner map is for handling fork deletions better (see taskToForkKeys) - forks map[forkKey]map[string]*Edge + forks map[forkKey]map[string]edge.Edge // Stats for number of points each fork has received forkStats map[forkKey]*expvar.Int @@ -199,7 +200,7 @@ type forkKey struct { func NewTaskMaster(id string, info vars.Infoer, l LogService) *TaskMaster { return &TaskMaster{ id: id, - forks: make(map[forkKey]map[string]*Edge), + forks: make(map[forkKey]map[string]edge.Edge), forkStats: make(map[forkKey]*expvar.Int), taskToForkKeys: make(map[string][]forkKey), batches: make(map[string][]BatchCollector), @@ -433,24 +434,24 @@ func (tm *TaskMaster) StartTask(t *Task) (*ExecutingTask, error) { return nil, err } - var ins []*Edge + var ins []edge.StatsEdge switch et.Task.Type { case StreamTask: e, err := tm.newFork(et.Task.ID, et.Task.DBRPs, et.Task.Measurements()) if err != nil { return nil, err } - ins = []*Edge{e} + ins = []edge.StatsEdge{e} case BatchTask: count, err := et.BatchCount() if err != nil { return nil, err } - ins = make([]*Edge, count) + ins = make([]edge.StatsEdge, count) for i := 0; i < count; i++ { in := newEdge(t.ID, "batch", fmt.Sprintf("batch%d", i), pipeline.BatchEdge, defaultEdgeBufferSize, tm.LogService) ins[i] = in - tm.batches[t.ID] = append(tm.batches[t.ID], in) + tm.batches[t.ID] = append(tm.batches[t.ID], &batchCollector{edge: in}) } } @@ -572,21 +573,55 @@ func (tm *TaskMaster) stream(name string) (StreamCollector, error) { return nil, ErrTaskMasterClosed } in := newEdge(fmt.Sprintf("task_master:%s", tm.id), name, "stream", pipeline.StreamEdge, defaultEdgeBufferSize, tm.LogService) + se := &streamEdge{edge: in} tm.wg.Add(1) go func() { defer tm.wg.Done() - tm.runForking(in) + tm.runForking(se) }() - return in, nil + return se, nil } -func (tm *TaskMaster) runForking(in *Edge) { - for p, ok := in.NextPoint(); ok; p, ok = in.NextPoint() { +type StreamCollector interface { + CollectPoint(edge.PointMessage) error + Close() error +} + +type StreamEdge interface { + CollectPoint(edge.PointMessage) error + EmitPoint() (edge.PointMessage, bool) + Close() error +} + +type streamEdge struct { + edge edge.Edge +} + +func (s *streamEdge) CollectPoint(p edge.PointMessage) error { + return s.edge.Collect(p) +} +func (s *streamEdge) EmitPoint() (edge.PointMessage, bool) { + m, ok := s.edge.Emit() + if !ok { + return nil, false + } + p, ok := m.(edge.PointMessage) + if !ok { + panic("impossible to receive non PointMessage message") + } + return p, true +} +func (s *streamEdge) Close() error { + return s.edge.Close() +} + +func (tm *TaskMaster) runForking(in StreamEdge) { + for p, ok := in.EmitPoint(); ok; p, ok = in.EmitPoint() { tm.forkPoint(p) } } -func (tm *TaskMaster) forkPoint(p models.Point) { +func (tm *TaskMaster) forkPoint(p edge.PointMessage) { tm.mu.RLock() locked := true defer func() { @@ -597,26 +632,26 @@ func (tm *TaskMaster) forkPoint(p models.Point) { // Create the fork keys - which is (db, rp, measurement) key := forkKey{ - Database: p.Database, - RetentionPolicy: p.RetentionPolicy, - Measurement: p.Name, + Database: p.Database(), + RetentionPolicy: p.RetentionPolicy(), + Measurement: p.Name(), } // If we have empty measurement in this db,rp we need to send it all // the points emptyMeasurementKey := forkKey{ - Database: p.Database, - RetentionPolicy: p.RetentionPolicy, + Database: p.Database(), + RetentionPolicy: p.RetentionPolicy(), Measurement: "", } // Merge the results to the forks map for _, edge := range tm.forks[key] { - _ = edge.CollectPoint(p) + _ = edge.Collect(p) } for _, edge := range tm.forks[emptyMeasurementKey] { - _ = edge.CollectPoint(p) + _ = edge.Collect(p) } c, ok := tm.forkStats[key] @@ -658,15 +693,15 @@ func (tm *TaskMaster) WritePoints(database, retentionPolicy string, consistencyL retentionPolicy = tm.DefaultRetentionPolicy } for _, mp := range points { - p := models.Point{ - Database: database, - RetentionPolicy: retentionPolicy, - Name: mp.Name(), - Group: models.NilGroup, - Tags: models.Tags(mp.Tags().Map()), - Fields: models.Fields(mp.Fields()), - Time: mp.Time(), - } + p := edge.NewPointMessage( + mp.Name(), + database, + retentionPolicy, + models.Dimensions{}, + models.Fields(mp.Fields()), + models.Tags(mp.Tags().Map()), + mp.Time(), + ) err := tm.writePointsIn.CollectPoint(p) if err != nil { return err @@ -675,19 +710,18 @@ func (tm *TaskMaster) WritePoints(database, retentionPolicy string, consistencyL return nil } -func (tm *TaskMaster) WriteKapacitorPoint(p models.Point) error { +func (tm *TaskMaster) WriteKapacitorPoint(p edge.PointMessage) error { tm.writesMu.RLock() defer tm.writesMu.RUnlock() if tm.writesClosed { return ErrTaskMasterClosed } - - p.Group = models.NilGroup - p.Dimensions = models.Dimensions{} + p = p.ShallowCopy() + p.SetDimensions(models.Dimensions{}) return tm.writePointsIn.CollectPoint(p) } -func (tm *TaskMaster) NewFork(taskName string, dbrps []DBRP, measurements []string) (*Edge, error) { +func (tm *TaskMaster) NewFork(taskName string, dbrps []DBRP, measurements []string) (edge.StatsEdge, error) { tm.mu.Lock() defer tm.mu.Unlock() return tm.newFork(taskName, dbrps, measurements) @@ -712,7 +746,7 @@ func forkKeys(dbrps []DBRP, measurements []string) []forkKey { } // internal newFork, must have acquired lock before calling. -func (tm *TaskMaster) newFork(taskName string, dbrps []DBRP, measurements []string) (*Edge, error) { +func (tm *TaskMaster) newFork(taskName string, dbrps []DBRP, measurements []string) (edge.StatsEdge, error) { if tm.closed { return nil, ErrTaskMasterClosed } @@ -725,7 +759,7 @@ func (tm *TaskMaster) newFork(taskName string, dbrps []DBRP, measurements []stri // Add the task to the tasksMap if it doesn't exists tasksMap, ok := tm.forks[key] if !ok { - tasksMap = make(map[string]*Edge, 0) + tasksMap = make(map[string]edge.Edge, 0) } // Add the edge to task map @@ -822,3 +856,19 @@ func (tml *TaskMasterLookup) Delete(tm *TaskMaster) { defer tml.Unlock() delete(tml.taskMasters, tm.id) } + +type BatchCollector interface { + CollectBatch(edge.BufferedBatchMessage) error + Close() error +} + +type batchCollector struct { + edge edge.Edge +} + +func (c *batchCollector) CollectBatch(batch edge.BufferedBatchMessage) error { + return c.edge.Collect(batch) +} +func (c *batchCollector) Close() error { + return c.edge.Close() +} diff --git a/udf.go b/udf.go index 23c26cbff..41abf4e1e 100644 --- a/udf.go +++ b/udf.go @@ -11,7 +11,7 @@ import ( "github.com/cenkalti/backoff" "github.com/influxdata/kapacitor/command" - "github.com/influxdata/kapacitor/models" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/pipeline" "github.com/influxdata/kapacitor/udf" "github.com/influxdata/kapacitor/udf/agent" @@ -54,65 +54,47 @@ func newUDFNode(et *ExecutingTask, n *pipeline.UDFNode, l *log.Logger) (*UDFNode var errNodeAborted = errors.New("node aborted") -func (u *UDFNode) stopUDF() { - u.mu.Lock() - defer u.mu.Unlock() - if !u.stopped { - u.stopped = true - if u.udf != nil { - u.udf.Abort(errNodeAborted) +func (n *UDFNode) stopUDF() { + n.mu.Lock() + defer n.mu.Unlock() + if !n.stopped { + n.stopped = true + if n.udf != nil { + n.udf.Abort(errNodeAborted) } } } -func (u *UDFNode) runUDF(snapshot []byte) (err error) { +func (n *UDFNode) runUDF(snapshot []byte) (err error) { defer func() { - u.mu.Lock() - defer u.mu.Unlock() + n.mu.Lock() + defer n.mu.Unlock() //Ignore stopped errors if the udf was stopped externally - if u.stopped && (err == udf.ErrServerStopped || err == errNodeAborted) { + if n.stopped && (err == udf.ErrServerStopped || err == errNodeAborted) { err = nil } - u.stopped = true + n.stopped = true }() - err = u.udf.Open() - if err != nil { - return + + if err := n.udf.Open(); err != nil { + return err } - err = u.udf.Init(u.u.Options) - if err != nil { - return + if err := n.udf.Init(n.u.Options); err != nil { + return err } if snapshot != nil { - err = u.udf.Restore(snapshot) - if err != nil { - return + if err := n.udf.Restore(snapshot); err != nil { + return err } } + forwardErr := make(chan error, 1) go func() { - switch u.Provides() { - case pipeline.StreamEdge: - pointOut := u.udf.PointOut() - for p := range pointOut { - for _, out := range u.outs { - err := out.CollectPoint(p) - if err != nil { - forwardErr <- err - return - } - } - } - case pipeline.BatchEdge: - batchOut := u.udf.BatchOut() - for b := range batchOut { - for _, out := range u.outs { - err := out.CollectBatch(b) - if err != nil { - forwardErr <- err - return - } - } + out := n.udf.Out() + for m := range out { + if err := edge.Forward(n.outs, m); err != nil { + forwardErr <- err + return } } forwardErr <- nil @@ -120,55 +102,41 @@ func (u *UDFNode) runUDF(snapshot []byte) (err error) { // The abort callback needs to know when we are done writing // so we wrap in a wait group. - u.wg.Add(1) + n.wg.Add(1) go func() { - defer u.wg.Done() - switch u.Wants() { - case pipeline.StreamEdge: - pointIn := u.udf.PointIn() - for p, ok := u.ins[0].NextPoint(); ok; p, ok = u.ins[0].NextPoint() { - u.timer.Start() - select { - case pointIn <- p: - case <-u.aborted: - return - } - u.timer.Stop() - } - case pipeline.BatchEdge: - batchIn := u.udf.BatchIn() - for b, ok := u.ins[0].NextBatch(); ok; b, ok = u.ins[0].NextBatch() { - u.timer.Start() - select { - case batchIn <- b: - case <-u.aborted: - return - } - u.timer.Stop() + defer n.wg.Done() + in := n.udf.In() + for m, ok := n.ins[0].Emit(); ok; m, ok = n.ins[0].Emit() { + n.timer.Start() + select { + case in <- m: + case <-n.aborted: + return } + n.timer.Stop() } }() + // wait till we are done writing - u.wg.Wait() + n.wg.Wait() // Close the udf - err = u.udf.Close() - if err != nil { - return + if err := n.udf.Close(); err != nil { + return err } + // Wait/Return any error from the forwarding goroutine - err = <-forwardErr - return + return <-forwardErr } -func (u *UDFNode) abortedCallback() { - close(u.aborted) +func (n *UDFNode) abortedCallback() { + close(n.aborted) // wait till we are done writing - u.wg.Wait() + n.wg.Wait() } -func (u *UDFNode) snapshot() ([]byte, error) { - return u.udf.Snapshot() +func (n *UDFNode) snapshot() ([]byte, error) { + return n.udf.Snapshot() } // UDFProcess wraps an external process and sends and receives data @@ -296,10 +264,8 @@ func (p *UDFProcess) Abort(err error) { p.server.Abort(err) } func (p *UDFProcess) Init(options []*agent.Option) error { return p.server.Init(options) } func (p *UDFProcess) Snapshot() ([]byte, error) { return p.server.Snapshot() } func (p *UDFProcess) Restore(snapshot []byte) error { return p.server.Restore(snapshot) } -func (p *UDFProcess) PointIn() chan<- models.Point { return p.server.PointIn() } -func (p *UDFProcess) BatchIn() chan<- models.Batch { return p.server.BatchIn() } -func (p *UDFProcess) PointOut() <-chan models.Point { return p.server.PointOut() } -func (p *UDFProcess) BatchOut() <-chan models.Batch { return p.server.BatchOut() } +func (p *UDFProcess) In() chan<- edge.Message { return p.server.In() } +func (p *UDFProcess) Out() <-chan edge.Message { return p.server.Out() } func (p *UDFProcess) Info() (udf.Info, error) { return p.server.Info() } type UDFSocket struct { @@ -368,10 +334,8 @@ func (s *UDFSocket) Abort(err error) { s.server.Abort(err) } func (s *UDFSocket) Init(options []*agent.Option) error { return s.server.Init(options) } func (s *UDFSocket) Snapshot() ([]byte, error) { return s.server.Snapshot() } func (s *UDFSocket) Restore(snapshot []byte) error { return s.server.Restore(snapshot) } -func (s *UDFSocket) PointIn() chan<- models.Point { return s.server.PointIn() } -func (s *UDFSocket) BatchIn() chan<- models.Batch { return s.server.BatchIn() } -func (s *UDFSocket) PointOut() <-chan models.Point { return s.server.PointOut() } -func (s *UDFSocket) BatchOut() <-chan models.Batch { return s.server.BatchOut() } +func (s *UDFSocket) In() chan<- edge.Message { return s.server.In() } +func (s *UDFSocket) Out() <-chan edge.Message { return s.server.Out() } func (s *UDFSocket) Info() (udf.Info, error) { return s.server.Info() } type socket struct { diff --git a/udf/server.go b/udf/server.go index 7ce2ee204..0c0ea76bb 100644 --- a/udf/server.go +++ b/udf/server.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/udf/agent" ) @@ -47,11 +48,9 @@ type Server struct { // If abort fails after sometime this will be called killCallback func() - pointIn chan models.Point - batchIn chan models.Batch + inMsg chan edge.Message - pointOut chan models.Point - batchOut chan models.Batch + outMsg chan edge.Message stopped bool stopping chan struct{} @@ -83,7 +82,9 @@ type Server struct { snapshotResponse chan *agent.Response restoreResponse chan *agent.Response - batch *models.Batch + // Buffer up batch messages + begin *agent.BeginBatch + points []edge.BatchPointMessage } func NewServer( @@ -103,10 +104,8 @@ func NewServer( keepaliveTimeout: timeout, abortCallback: abortCallback, killCallback: killCallback, - pointIn: make(chan models.Point), - batchIn: make(chan models.Batch), - pointOut: make(chan models.Point), - batchOut: make(chan models.Batch), + inMsg: make(chan edge.Message), + outMsg: make(chan edge.Message), infoResponse: make(chan *agent.Response, 1), initResponse: make(chan *agent.Response, 1), snapshotResponse: make(chan *agent.Response, 1), @@ -116,17 +115,11 @@ func NewServer( return s } -func (s *Server) PointIn() chan<- models.Point { - return s.pointIn +func (s *Server) In() chan<- edge.Message { + return s.inMsg } -func (s *Server) BatchIn() chan<- models.Batch { - return s.batchIn -} -func (s *Server) PointOut() <-chan models.Point { - return s.pointOut -} -func (s *Server) BatchOut() <-chan models.Batch { - return s.batchOut +func (s *Server) Out() <-chan edge.Message { + return s.outMsg } func (s *Server) setError(err error) { @@ -223,8 +216,7 @@ func (s *Server) stop() error { close(s.requests) - close(s.pointIn) - close(s.batchIn) + close(s.inMsg) s.ioGroup.Wait() @@ -436,25 +428,40 @@ func (s *Server) watchKeepalive() { // Write Requests func (s *Server) writeData() error { defer s.out.Close() + var begin edge.BeginBatchMessage for { select { - case pt, ok := <-s.pointIn: - if ok { - err := s.writePoint(pt) + case m, ok := <-s.inMsg: + if !ok { + s.inMsg = nil + } + switch msg := m.(type) { + case edge.PointMessage: + err := s.writePoint(msg) if err != nil { return err } - } else { - s.pointIn = nil - } - case bt, ok := <-s.batchIn: - if ok { - err := s.writeBatch(bt) + case edge.BeginBatchMessage: + begin = msg + err := s.writeBeginBatch(msg) + if err != nil { + return err + } + case edge.BatchPointMessage: + err := s.writeBatchPoint(begin.GroupID(), msg) + if err != nil { + return err + } + case edge.EndBatchMessage: + err := s.writeEndBatch(begin.Name(), begin.Time(), begin.GroupInfo(), msg) + if err != nil { + return err + } + case edge.BufferedBatchMessage: + err := s.writeBufferedBatch(msg) if err != nil { return err } - } else { - s.batchIn = nil } case req, ok := <-s.requests: if ok { @@ -468,30 +475,30 @@ func (s *Server) writeData() error { case <-s.aborting: return s.err } - if s.pointIn == nil && s.batchIn == nil && s.requests == nil { + if s.inMsg == nil && s.requests == nil { break } } return nil } -func (s *Server) writePoint(pt models.Point) error { - strs, floats, ints := s.fieldsToTypedMaps(pt.Fields) +func (s *Server) writePoint(p edge.PointMessage) error { + strs, floats, ints := s.fieldsToTypedMaps(p.Fields()) udfPoint := &agent.Point{ - Time: pt.Time.UnixNano(), - Name: pt.Name, - Database: pt.Database, - RetentionPolicy: pt.RetentionPolicy, - Group: string(pt.Group), - Dimensions: pt.Dimensions.TagNames, - ByName: pt.Dimensions.ByName, - Tags: pt.Tags, + Time: p.Time().UnixNano(), + Name: p.Name(), + Database: p.Database(), + RetentionPolicy: p.RetentionPolicy(), + Group: string(p.GroupID()), + Dimensions: p.Dimensions().TagNames, + ByName: p.Dimensions().ByName, + Tags: p.Tags(), FieldsDouble: floats, FieldsInt: ints, FieldsString: strs, } req := &agent.Request{ - Message: &agent.Request_Point{udfPoint}, + Message: &agent.Request_Point{Point: udfPoint}, } return s.writeRequest(req) } @@ -543,50 +550,63 @@ func (s *Server) typeMapsToFields( return fields } -func (s *Server) writeBatch(b models.Batch) error { +func (s *Server) writeBeginBatch(begin edge.BeginBatchMessage) error { req := &agent.Request{ - Message: &agent.Request_Begin{&agent.BeginBatch{ - Name: b.Name, - Group: string(b.Group), - Tags: b.Tags, - Size: int64(len(b.Points)), - ByName: b.ByName, - }}, - } - err := s.writeRequest(req) - if err != nil { - return err + Message: &agent.Request_Begin{ + Begin: &agent.BeginBatch{ + Name: begin.Name(), + Group: string(begin.GroupID()), + Tags: begin.Tags(), + Size: int64(begin.SizeHint()), + ByName: begin.Dimensions().ByName, + }}, } - rp := &agent.Request_Point{} - req.Message = rp - for _, pt := range b.Points { - strs, floats, ints := s.fieldsToTypedMaps(pt.Fields) - udfPoint := &agent.Point{ - Time: pt.Time.UnixNano(), - Group: string(b.Group), - Tags: pt.Tags, - FieldsDouble: floats, - FieldsInt: ints, - FieldsString: strs, - } - rp.Point = udfPoint - err := s.writeRequest(req) - if err != nil { - return err - } + return s.writeRequest(req) +} + +func (s *Server) writeBatchPoint(group models.GroupID, bp edge.BatchPointMessage) error { + strs, floats, ints := s.fieldsToTypedMaps(bp.Fields()) + req := &agent.Request{ + Message: &agent.Request_Point{ + Point: &agent.Point{ + Time: bp.Time().UnixNano(), + Group: string(group), + Tags: bp.Tags(), + FieldsDouble: floats, + FieldsInt: ints, + FieldsString: strs, + }, + }, } + return s.writeRequest(req) +} - req.Message = &agent.Request_End{ - &agent.EndBatch{ - Name: b.Name, - Group: string(b.Group), - Tmax: b.TMax.UnixNano(), - Tags: b.Tags, +func (s *Server) writeEndBatch(name string, tmax time.Time, groupInfo edge.GroupInfo, end edge.EndBatchMessage) error { + req := &agent.Request{ + Message: &agent.Request_End{ + End: &agent.EndBatch{ + Name: name, + Group: string(groupInfo.ID), + Tmax: tmax.UnixNano(), + Tags: groupInfo.Tags, + }, }, } return s.writeRequest(req) } +func (s *Server) writeBufferedBatch(batch edge.BufferedBatchMessage) error { + if err := s.writeBeginBatch(batch.Begin()); err != nil { + return err + } + for _, bp := range batch.Points() { + if err := s.writeBatchPoint(batch.GroupID(), bp); err != nil { + return err + } + } + return s.writeEndBatch(batch.Name(), batch.Time(), batch.GroupInfo(), batch.End()) +} + func (s *Server) writeRequest(req *agent.Request) error { err := agent.WriteMessage(req, s.out) if err != nil { @@ -598,8 +618,7 @@ func (s *Server) writeRequest(req *agent.Request) error { // Read Responses from STDOUT of the process. func (s *Server) readData() error { defer func() { - close(s.pointOut) - close(s.batchOut) + close(s.outMsg) }() for { response, err := s.readResponse() @@ -652,54 +671,60 @@ func (s *Server) handleResponse(response *agent.Response) error { s.logger.Println("E!", msg.Error.Error) return errors.New(msg.Error.Error) case *agent.Response_Begin: - s.batch = &models.Batch{ - ByName: msg.Begin.ByName, - Points: make([]models.BatchPoint, 0, msg.Begin.Size), - } + s.begin = msg.Begin + s.points = make([]edge.BatchPointMessage, 0, msg.Begin.Size) case *agent.Response_Point: - if s.batch != nil { - pt := models.BatchPoint{ - Time: time.Unix(0, msg.Point.Time).UTC(), - Tags: msg.Point.Tags, - Fields: s.typeMapsToFields( + if s.points != nil { + bp := edge.NewBatchPointMessage( + s.typeMapsToFields( msg.Point.FieldsString, msg.Point.FieldsDouble, msg.Point.FieldsInt, ), - } - s.batch.Points = append(s.batch.Points, pt) + msg.Point.Tags, + time.Unix(0, msg.Point.Time).UTC(), + ) + s.points = append(s.points, bp) } else { - pt := models.Point{ - Time: time.Unix(0, msg.Point.Time).UTC(), - Name: msg.Point.Name, - Database: msg.Point.Database, - RetentionPolicy: msg.Point.RetentionPolicy, - Group: models.GroupID(msg.Point.Group), - Dimensions: models.Dimensions{ByName: msg.Point.ByName, TagNames: msg.Point.Dimensions}, - Tags: msg.Point.Tags, - Fields: s.typeMapsToFields( + p := edge.NewPointMessage( + msg.Point.Name, + msg.Point.Database, + msg.Point.RetentionPolicy, + models.Dimensions{ByName: msg.Point.ByName, TagNames: msg.Point.Dimensions}, + s.typeMapsToFields( msg.Point.FieldsString, msg.Point.FieldsDouble, msg.Point.FieldsInt, ), - } + msg.Point.Tags, + time.Unix(0, msg.Point.Time).UTC(), + ) select { - case s.pointOut <- pt: + case s.outMsg <- p: case <-s.aborting: return s.err } } case *agent.Response_End: - s.batch.Name = msg.End.Name - s.batch.TMax = time.Unix(0, msg.End.Tmax).UTC() - s.batch.Group = models.GroupID(msg.End.Group) - s.batch.Tags = msg.End.Tags + begin := edge.NewBeginBatchMessage( + msg.End.Name, + msg.End.Tags, + s.begin.ByName, + time.Unix(0, msg.End.Tmax).UTC(), + len(s.points), + ) + bufferedBatch := edge.NewBufferedBatchMessage( + begin, + s.points, + edge.NewEndBatchMessage(), + ) select { - case s.batchOut <- *s.batch: + case s.outMsg <- bufferedBatch: case <-s.aborting: return s.err } - s.batch = nil + s.begin = nil + s.points = nil default: panic(fmt.Sprintf("unexpected response message %T", msg)) } diff --git a/udf/server_test.go b/udf/server_test.go index 72371dabd..7c4ca6b84 100644 --- a/udf/server_test.go +++ b/udf/server_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/udf" "github.com/influxdata/kapacitor/udf/agent" @@ -413,18 +414,19 @@ func TestUDF_StartInitPointStop(t *testing.T) { } // Write point to server - pt := models.Point{ - Name: "test", - Database: "db", - RetentionPolicy: "rp", - Tags: models.Tags{"t1": "v1", "t2": "v2"}, - Fields: models.Fields{"f1": 1.0, "f2": 2.0}, - Time: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), - } - s.PointIn() <- pt - rpt := <-s.PointOut() - if !reflect.DeepEqual(rpt, pt) { - t.Errorf("unexpected returned point got: %v exp %v", rpt, pt) + p := edge.NewPointMessage( + "test", + "db", + "rp", + models.Dimensions{}, + models.Fields{"f1": 1.0, "f2": 2.0}, + models.Tags{"t1": "v1", "t2": "v2"}, + time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), + ) + s.In() <- p + rp := <-s.Out() + if !reflect.DeepEqual(rp, p) { + t.Errorf("unexpected returned point got: %v exp %v", rp, p) } s.Stop() @@ -501,18 +503,25 @@ func TestUDF_StartInitBatchStop(t *testing.T) { } // Write point to server - b := models.Batch{ - Name: "test", - Tags: models.Tags{"t1": "v1"}, - TMax: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), - Points: []models.BatchPoint{{ - Fields: models.Fields{"f1": 1.0, "f2": 2.0, "f3": int64(1), "f4": "str"}, - Time: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), - Tags: models.Tags{"t1": "v1", "t2": "v2"}, - }}, - } - s.BatchIn() <- b - rb := <-s.BatchOut() + b := edge.NewBufferedBatchMessage( + edge.NewBeginBatchMessage( + "test", + models.Tags{"t1": "v1"}, + false, + time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), + 1, + ), + []edge.BatchPointMessage{ + edge.NewBatchPointMessage( + models.Fields{"f1": 1.0, "f2": 2.0, "f3": int64(1), "f4": "str"}, + models.Tags{"t1": "v1", "t2": "v2"}, + time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), + ), + }, + edge.NewEndBatchMessage(), + ) + s.In() <- b + rb := <-s.Out() if !reflect.DeepEqual(b, rb) { t.Errorf("unexpected returned batch got: %v exp %v", rb, b) } diff --git a/udf/udf.go b/udf/udf.go index 9f7df5939..bd1cf8915 100644 --- a/udf/udf.go +++ b/udf/udf.go @@ -1,7 +1,7 @@ package udf import ( - "github.com/influxdata/kapacitor/models" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/udf/agent" ) @@ -16,8 +16,6 @@ type Interface interface { Snapshot() ([]byte, error) Restore(snapshot []byte) error - PointIn() chan<- models.Point - BatchIn() chan<- models.Batch - PointOut() <-chan models.Point - BatchOut() <-chan models.Batch + In() chan<- edge.Message + Out() <-chan edge.Message } diff --git a/udf_test.go b/udf_test.go index f9235a180..378badeca 100644 --- a/udf_test.go +++ b/udf_test.go @@ -12,6 +12,7 @@ import ( "github.com/influxdata/kapacitor" "github.com/influxdata/kapacitor/command" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/udf" "github.com/influxdata/kapacitor/udf/agent" @@ -104,18 +105,19 @@ func testUDF_WritePoint(u udf.Interface, uio *udf_test.IO, t *testing.T) { } // Write point to server - pt := models.Point{ - Name: "test", - Database: "db", - RetentionPolicy: "rp", - Tags: models.Tags{"t1": "v1", "t2": "v2"}, - Fields: models.Fields{"f1": 1.0, "f2": 2.0}, - Time: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), - } - u.PointIn() <- pt - rpt := <-u.PointOut() - if !reflect.DeepEqual(rpt, pt) { - t.Errorf("unexpected returned point got: %v exp %v", rpt, pt) + p := edge.NewPointMessage( + "test", + "db", + "rp", + models.Dimensions{}, + models.Fields{"f1": 1.0, "f2": 2.0}, + models.Tags{"t1": "v1", "t2": "v2"}, + time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), + ) + u.In() <- p + rp := <-u.Out() + if !reflect.DeepEqual(rp, p) { + t.Errorf("unexpected returned point got: %v exp %v", rp, p) } u.Close() @@ -203,18 +205,25 @@ func testUDF_WriteBatch(u udf.Interface, uio *udf_test.IO, t *testing.T) { } // Write point to server - b := models.Batch{ - Name: "test", - Tags: models.Tags{"t1": "v1"}, - TMax: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), - Points: []models.BatchPoint{{ - Fields: models.Fields{"f1": 1.0, "f2": 2.0, "f3": int64(1), "f4": "str"}, - Time: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), - Tags: models.Tags{"t1": "v1", "t2": "v2"}, - }}, - } - u.BatchIn() <- b - rb := <-u.BatchOut() + b := edge.NewBufferedBatchMessage( + edge.NewBeginBatchMessage( + "test", + models.Tags{"t1": "v1"}, + false, + time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), + 1, + ), + []edge.BatchPointMessage{ + edge.NewBatchPointMessage( + models.Fields{"f1": 1.0, "f2": 2.0, "f3": int64(1), "f4": "str"}, + models.Tags{"t1": "v1", "t2": "v2"}, + time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), + ), + }, + edge.NewEndBatchMessage(), + ) + u.In() <- b + rb := <-u.Out() if !reflect.DeepEqual(b, rb) { t.Errorf("unexpected returned batch got: %v exp %v", rb, b) } diff --git a/union.go b/union.go index cf50613c2..23650c75d 100644 --- a/union.go +++ b/union.go @@ -4,7 +4,7 @@ import ( "log" "time" - "github.com/influxdata/kapacitor/models" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/pipeline" ) @@ -12,90 +12,90 @@ type UnionNode struct { node u *pipeline.UnionNode - // Buffer of points/batches from each parent - sources [][]models.PointInterface + // Buffer of points/batches from each source. + sources [][]timeMessage // the low water marks for each source. lowMarks []time.Time rename string } +type timeMessage interface { + edge.Message + edge.TimeGetter +} + // Create a new UnionNode which combines all parent data streams into a single stream. // No transformation of any kind is performed. func newUnionNode(et *ExecutingTask, n *pipeline.UnionNode, l *log.Logger) (*UnionNode, error) { un := &UnionNode{ - u: n, - node: node{Node: n, et: et, logger: l}, + u: n, + node: node{Node: n, et: et, logger: l}, + rename: n.Rename, } un.node.runF = un.runUnion return un, nil } -func (u *UnionNode) runUnion([]byte) error { - union := make(chan srcPoint) - u.rename = u.u.Rename - // Spawn goroutine for each parent - errors := make(chan error, len(u.ins)) - for i, in := range u.ins { - go func(index int, e *Edge) { - for p, ok := e.Next(); ok; p, ok = e.Next() { - union <- srcPoint{ - src: index, - p: p, - } - } - errors <- nil - }(i, in) - } +func (n *UnionNode) runUnion([]byte) error { + // Keep buffer of values from parents so they can be ordered. - // Channel for returning the first if any errors, from parent goroutines. - errC := make(chan error, 1) + n.sources = make([][]timeMessage, len(n.ins)) + n.lowMarks = make([]time.Time, len(n.ins)) - go func() { - for range u.ins { - err := <-errors - if err != nil { - errC <- err - return - } - } - // Close the union channel once all parents are done writing - close(union) - }() + consumer := edge.NewMultiConsumerWithStats(n.ins, n) + return consumer.Consume() +} - // - // Emit values received from parents ordered by time. - // +func (n *UnionNode) BufferedBatch(src int, batch edge.BufferedBatchMessage) error { + n.timer.Start() + defer n.timer.Stop() - // Keep buffer of values from parents so they can be ordered. - u.sources = make([][]models.PointInterface, len(u.ins)) - u.lowMarks = make([]time.Time, len(u.ins)) - - for { - select { - case err := <-errC: - // One of the parents errored out, return the error. - return err - case source, ok := <-union: - u.timer.Start() - if !ok { - // We are done, emit all buffered - return u.emitReady(true) - } - // Add newest point to buffer - u.sources[source.src] = append(u.sources[source.src], source.p) + if n.rename != "" { + batch = batch.ShallowCopy() + batch.SetBegin(batch.Begin().ShallowCopy()) + batch.Begin().SetName(n.rename) + } - // Emit the next values - err := u.emitReady(false) - if err != nil { - return err - } - u.timer.Stop() - } + // Add newest point to buffer + n.sources[src] = append(n.sources[src], batch) + + // Emit the next values + return n.emitReady(false) +} + +func (n *UnionNode) Point(src int, p edge.PointMessage) error { + n.timer.Start() + defer n.timer.Stop() + if n.rename != "" { + p = p.ShallowCopy() + p.SetName(n.rename) } + + // Add newest point to buffer + n.sources[src] = append(n.sources[src], p) + + // Emit the next values + return n.emitReady(false) +} + +func (n *UnionNode) Barrier(src int, b edge.BarrierMessage) error { + n.timer.Start() + defer n.timer.Stop() + + // Add newest point to buffer + n.sources[src] = append(n.sources[src], b) + + // Emit the next values + return n.emitReady(false) +} + +func (n *UnionNode) Finish() error { + // We are done, emit all buffered + return n.emitReady(true) } -func (u *UnionNode) emitReady(drain bool) error { +func (n *UnionNode) emitReady(drain bool) error { emitted := true // Emit all points until nothing changes for emitted { @@ -103,16 +103,16 @@ func (u *UnionNode) emitReady(drain bool) error { // Find low water mark var mark time.Time validSources := 0 - for i, values := range u.sources { - sourceMark := u.lowMarks[i] + for i, values := range n.sources { + sourceMark := n.lowMarks[i] if len(values) > 0 { - t := values[0].PointTime() + t := values[0].Time() if mark.IsZero() || t.Before(mark) { mark = t } sourceMark = t } - u.lowMarks[i] = sourceMark + n.lowMarks[i] = sourceMark if !sourceMark.IsZero() { validSources++ // Only consider the sourceMark if we are not draining @@ -121,7 +121,7 @@ func (u *UnionNode) emitReady(drain bool) error { } } } - if !drain && validSources != len(u.sources) { + if !drain && validSources != len(n.sources) { // We can't continue processing until we have // at least one value from each parent. // Unless we are draining the buffer than we can continue. @@ -129,12 +129,12 @@ func (u *UnionNode) emitReady(drain bool) error { } // Emit all values that are at or below the mark. - for i, values := range u.sources { + for i, values := range n.sources { var j int l := len(values) for j = 0; j < l; j++ { - if !values[j].PointTime().After(mark) { - err := u.emit(values[j]) + if !values[j].Time().After(mark) { + err := n.emit(values[j]) if err != nil { return err } @@ -145,38 +145,14 @@ func (u *UnionNode) emitReady(drain bool) error { } } // Drop values that were emitted - u.sources[i] = values[j:] + n.sources[i] = values[j:] } } return nil } -func (u *UnionNode) emit(v models.PointInterface) error { - u.timer.Pause() - defer u.timer.Resume() - switch u.Provides() { - case pipeline.StreamEdge: - p := v.(models.Point) - if u.rename != "" { - p.Name = u.rename - } - for _, child := range u.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } - case pipeline.BatchEdge: - b := v.(models.Batch) - if u.rename != "" { - b.Name = u.rename - } - for _, child := range u.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } - } - return nil +func (n *UnionNode) emit(m edge.Message) error { + n.timer.Pause() + defer n.timer.Resume() + return edge.Forward(n.outs, m) } diff --git a/where.go b/where.go index 3be233a27..634d0684d 100644 --- a/where.go +++ b/where.go @@ -4,10 +4,8 @@ import ( "errors" "fmt" "log" - "sync" - "github.com/influxdata/kapacitor/expvar" - "github.com/influxdata/kapacitor/models" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/pipeline" "github.com/influxdata/kapacitor/tick/ast" "github.com/influxdata/kapacitor/tick/stateful" @@ -18,18 +16,24 @@ type WhereNode struct { w *pipeline.WhereNode endpoint string - expressions map[models.GroupID]stateful.Expression - scopePools map[models.GroupID]stateful.ScopePool + expression stateful.Expression + scopePool stateful.ScopePool } // Create a new WhereNode which filters down the batch or stream by a condition func newWhereNode(et *ExecutingTask, n *pipeline.WhereNode, l *log.Logger) (wn *WhereNode, err error) { wn = &WhereNode{ - node: node{Node: n, et: et, logger: l}, - w: n, - expressions: make(map[models.GroupID]stateful.Expression), - scopePools: make(map[models.GroupID]stateful.ScopePool), + node: node{Node: n, et: et, logger: l}, + w: n, } + + expr, err := stateful.NewExpression(n.Lambda.Expression) + if err != nil { + return nil, fmt.Errorf("Failed to compile expression in where clause: %v", err) + } + wn.expression = expr + wn.scopePool = stateful.NewScopePool(ast.FindReferenceVariables(n.Lambda.Expression)) + wn.runF = wn.runWhere if n.Lambda == nil { return nil, errors.New("nil expression passed to WhereNode") @@ -37,95 +41,69 @@ func newWhereNode(et *ExecutingTask, n *pipeline.WhereNode, l *log.Logger) (wn * return } -func (w *WhereNode) runWhere(snapshot []byte) error { - var mu sync.RWMutex - valueF := func() int64 { - mu.RLock() - l := len(w.expressions) - mu.RUnlock() - return int64(l) +func (n *WhereNode) runWhere(snapshot []byte) error { + consumer := edge.NewGroupedConsumer( + n.ins[0], + n, + ) + n.statMap.Set(statCardinalityGauge, consumer.CardinalityVar()) + + return consumer.Consume() +} + +func (n *WhereNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) { + return edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, n.newGroup()), + ), nil +} + +func (n *WhereNode) newGroup() *whereGroup { + return &whereGroup{ + n: n, + expr: n.expression.CopyReset(), + } +} + +type whereGroup struct { + n *WhereNode + expr stateful.Expression +} + +func (g *whereGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + begin = begin.ShallowCopy() + begin.SetSizeHint(0) + return begin, nil +} + +func (g *whereGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + return g.doWhere(bp) +} + +func (g *whereGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + return end, nil +} + +func (g *whereGroup) Point(p edge.PointMessage) (edge.Message, error) { + return g.doWhere(p) +} + +func (g *whereGroup) doWhere(p edge.FieldsTagsTimeGetterMessage) (edge.Message, error) { + pass, err := EvalPredicate(g.expr, g.n.scopePool, p) + if err != nil { + g.n.incrementErrorCount() + g.n.logger.Println("E! error while evaluating expression:", err) + return nil, nil } - w.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - - switch w.Wants() { - case pipeline.StreamEdge: - for p, ok := w.ins[0].NextPoint(); ok; p, ok = w.ins[0].NextPoint() { - w.timer.Start() - mu.RLock() - expr := w.expressions[p.Group] - mu.RUnlock() - scopePool := w.scopePools[p.Group] - - if expr == nil { - compiledExpr, err := stateful.NewExpression(w.w.Lambda.Expression) - if err != nil { - return fmt.Errorf("Failed to compile expression in where clause: %v", err) - } - - expr = compiledExpr - mu.Lock() - w.expressions[p.Group] = expr - mu.Unlock() - - scopePool = stateful.NewScopePool(ast.FindReferenceVariables(w.w.Lambda.Expression)) - w.scopePools[p.Group] = scopePool - } - if pass, err := EvalPredicate(expr, scopePool, p.Time, p.Fields, p.Tags); pass { - w.timer.Pause() - for _, child := range w.outs { - err := child.CollectPoint(p) - if err != nil { - return err - } - } - w.timer.Resume() - } else if err != nil { - w.incrementErrorCount() - w.logger.Println("E! error while evaluating expression:", err) - } - w.timer.Stop() - } - case pipeline.BatchEdge: - for b, ok := w.ins[0].NextBatch(); ok; b, ok = w.ins[0].NextBatch() { - w.timer.Start() - mu.RLock() - expr := w.expressions[b.Group] - mu.RUnlock() - scopePool := w.scopePools[b.Group] - - if expr == nil { - compiledExpr, err := stateful.NewExpression(w.w.Lambda.Expression) - if err != nil { - return fmt.Errorf("Failed to compile expression in where clause: %v", err) - } - - expr = compiledExpr - mu.Lock() - w.expressions[b.Group] = expr - mu.Unlock() - - scopePool = stateful.NewScopePool(ast.FindReferenceVariables(w.w.Lambda.Expression)) - w.scopePools[b.Group] = scopePool - } - points := b.Points - b.Points = make([]models.BatchPoint, 0, len(b.Points)) - for _, p := range points { - if pass, err := EvalPredicate(expr, scopePool, p.Time, p.Fields, p.Tags); pass { - if err != nil { - w.incrementErrorCount() - w.logger.Println("E! error while evaluating WHERE expression:", err) - } - b.Points = append(b.Points, p) - } - } - w.timer.Stop() - for _, child := range w.outs { - err := child.CollectBatch(b) - if err != nil { - return err - } - } - } + if pass { + return p, nil } - return nil + return nil, nil +} + +func (g *whereGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (g *whereGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil } diff --git a/window.go b/window.go index e4f95d893..3031eaab2 100644 --- a/window.go +++ b/window.go @@ -4,10 +4,9 @@ import ( "errors" "fmt" "log" - "sync" "time" - "github.com/influxdata/kapacitor/expvar" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" ) @@ -19,6 +18,9 @@ type WindowNode struct { // Create a new WindowNode, which windows data for a period of time and emits the window. func newWindowNode(et *ExecutingTask, n *pipeline.WindowNode, l *log.Logger) (*WindowNode, error) { + if n.Period == 0 && n.PeriodCount == 0 { + return nil, errors.New("window node must have either a non zero period or non zero period count") + } wn := &WindowNode{ w: n, node: node{Node: n, et: et, logger: l}, @@ -27,113 +29,86 @@ func newWindowNode(et *ExecutingTask, n *pipeline.WindowNode, l *log.Logger) (*W return wn, nil } -type window interface { - Insert(p models.Point) (models.Batch, bool) +func (n *WindowNode) runWindow([]byte) error { + consumer := edge.NewGroupedConsumer(n.ins[0], n) + n.statMap.Set(statCardinalityGauge, consumer.CardinalityVar()) + return consumer.Consume() } -func (w *WindowNode) runWindow([]byte) error { - var mu sync.RWMutex - windows := make(map[models.GroupID]window) - valueF := func() int64 { - mu.RLock() - l := len(windows) - mu.RUnlock() - return int64(l) +func (n *WindowNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) { + r, err := n.newWindow(group, first) + if err != nil { + return nil, err } - w.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF)) - - // Loops through points windowing by group - for p, ok := w.ins[0].NextPoint(); ok; p, ok = w.ins[0].NextPoint() { - w.timer.Start() - mu.RLock() - wnd := windows[p.Group] - mu.RUnlock() - if wnd == nil { - tags := make(map[string]string, len(p.Dimensions.TagNames)) - for _, dim := range p.Dimensions.TagNames { - tags[dim] = p.Tags[dim] - } - switch { - case w.w.Period != 0: - // Window by time - wnd = newWindowByTime( - p.Time, - w.w.Period, - w.w.Every, - p.Name, - p.Group, - w.w.AlignFlag, - p.Dimensions.ByName, - w.w.FillPeriodFlag, - tags, - w.logger, - ) - case w.w.PeriodCount != 0: - wnd = newWindowByCount( - p.Name, - p.Group, - tags, - p.Dimensions.ByName, - int(w.w.PeriodCount), - int(w.w.EveryCount), - w.w.FillPeriodFlag, - w.logger, - ) - default: - // This should not be possible, but just in case. - return errors.New("invalid window, no period specified for either time or count") - } - mu.Lock() - windows[p.Group] = wnd - mu.Unlock() - } - batch, ok := wnd.Insert(p) - if ok { - // Send window to all children - w.timer.Pause() - for _, child := range w.outs { - err := child.CollectBatch(batch) - if err != nil { - return err - } - } - w.timer.Resume() - } - w.timer.Stop() + return edge.NewReceiverFromForwardReceiverWithStats( + n.outs, + edge.NewTimedForwardReceiver(n.timer, r), + ), nil +} + +func (n *WindowNode) DeleteGroup(group models.GroupID) { + // Nothing to do +} + +func (n *WindowNode) newWindow(group edge.GroupInfo, first edge.PointMeta) (edge.ForwardReceiver, error) { + switch { + case n.w.Period != 0: + return newWindowByTime( + first.Name(), + first.Time(), + group, + n.w.Period, + n.w.Every, + n.w.AlignFlag, + n.w.FillPeriodFlag, + n.logger, + ), nil + case n.w.PeriodCount != 0: + return newWindowByCount( + first.Name(), + group, + int(n.w.PeriodCount), + int(n.w.EveryCount), + n.w.FillPeriodFlag, + n.logger, + ), nil + default: + return nil, errors.New("unreachable code, window node should have a non-zero period or period count") } - return nil } type windowByTime struct { - buf *windowTimeBuffer - align bool + name string + group edge.GroupInfo + nextEmit time.Time - period time.Duration - every time.Duration - name string - group models.GroupID - byName bool - tags map[string]string - logger *log.Logger + + buf *windowTimeBuffer + + align, + fillPeriod bool + + period time.Duration + every time.Duration + + logger *log.Logger } func newWindowByTime( - now time.Time, + name string, + t time.Time, + group edge.GroupInfo, period, every time.Duration, - name string, - group models.GroupID, align, - byName, fillPeriod bool, - tags models.Tags, logger *log.Logger, ) *windowByTime { - // Determine first nextEmit time. + // Determine nextEmit time. var nextEmit time.Time if fillPeriod { - nextEmit = now.Add(period) + nextEmit = t.Add(period) if align { firstPeriod := nextEmit // Needs to be aligned with Every and be greater than now+Period @@ -144,56 +119,70 @@ func newWindowByTime( } } } else { - nextEmit = now.Add(every) + nextEmit = t.Add(every) if align { nextEmit = nextEmit.Truncate(every) } } return &windowByTime{ - buf: &windowTimeBuffer{logger: logger}, - nextEmit: nextEmit, - align: align, - period: period, - every: every, - name: name, - group: group, - byName: byName, - tags: tags, - logger: logger, + name: name, + group: group, + nextEmit: nextEmit, + buf: &windowTimeBuffer{logger: logger}, + align: align, + fillPeriod: fillPeriod, + period: period, + every: every, + logger: logger, } } -func (w *windowByTime) Insert(p models.Point) (b models.Batch, ok bool) { +func (w *windowByTime) BeginBatch(edge.BeginBatchMessage) (edge.Message, error) { + return nil, errors.New("window does not support batch data") +} +func (w *windowByTime) BatchPoint(edge.BatchPointMessage) (edge.Message, error) { + return nil, errors.New("window does not support batch data") +} +func (w *windowByTime) EndBatch(edge.EndBatchMessage) (edge.Message, error) { + return nil, errors.New("window does not support batch data") +} +func (w *windowByTime) Barrier(b edge.BarrierMessage) (edge.Message, error) { + //TODO(nathanielc): Implement barrier messages to flush window + return b, nil +} +func (w *windowByTime) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil +} + +func (w *windowByTime) Point(p edge.PointMessage) (msg edge.Message, err error) { if w.every == 0 { // Insert point before. w.buf.insert(p) // Since we are emitting every point we can use a right aligned window (oldest, now] - if !p.Time.Before(w.nextEmit) { + if !p.Time().Before(w.nextEmit) { // purge old points - oldest := p.Time.Add(-1 * w.period) + oldest := p.Time().Add(-1 * w.period) w.buf.purge(oldest, false) // get current batch - b = w.batch(p.Time) - ok = true + msg = w.batch(p.Time()) // Next emit time is now - w.nextEmit = p.Time + w.nextEmit = p.Time() } } else { // Since more points can arrive with the same time we need to use a left aligned window [oldest, now). - if !p.Time.Before(w.nextEmit) { + if !p.Time().Before(w.nextEmit) { // purge old points oldest := w.nextEmit.Add(-1 * w.period) w.buf.purge(oldest, true) // get current batch - b = w.batch(w.nextEmit) - ok = true + msg = w.batch(w.nextEmit) // Determine next emit time. // This is dependent on the current time not the last time we emitted. - w.nextEmit = p.Time.Add(w.every) + w.nextEmit = p.Time().Add(w.every) if w.align { w.nextEmit = w.nextEmit.Truncate(w.every) } @@ -204,20 +193,26 @@ func (w *windowByTime) Insert(p models.Point) (b models.Batch, ok bool) { return } -func (w *windowByTime) batch(tmax time.Time) models.Batch { - return models.Batch{ - Name: w.name, - Group: w.group, - Tags: w.tags, - TMax: tmax, - ByName: w.byName, - Points: w.buf.points(), - } +// batch returns the current window buffer as a batch message. +// TODO(nathanielc): A possible optimization could be to not buffer the data at all if we know that we do not have overlapping windows. +func (w *windowByTime) batch(tmax time.Time) edge.BufferedBatchMessage { + points := w.buf.points() + return edge.NewBufferedBatchMessage( + edge.NewBeginBatchMessage( + w.name, + w.group.Tags, + w.group.Dimensions.ByName, + tmax, + len(points), + ), + points, + edge.NewEndBatchMessage(), + ) } // implements a purpose built ring buffer for the window of points type windowTimeBuffer struct { - window []models.Point + window []edge.PointMessage start int stop int size int @@ -225,11 +220,11 @@ type windowTimeBuffer struct { } // Insert a single point into the buffer. -func (b *windowTimeBuffer) insert(p models.Point) { +func (b *windowTimeBuffer) insert(p edge.PointMessage) { if b.size == cap(b.window) { //Increase our buffer c := 2 * (b.size + 1) - w := make([]models.Point, b.size+1, c) + w := make([]edge.PointMessage, b.size+1, c) if b.size == 0 { //do nothing } else if b.stop > b.start { @@ -279,22 +274,22 @@ func (b *windowTimeBuffer) purge(oldest time.Time, inclusive bool) { } if b.start < b.stop { for ; b.start < b.stop; b.start++ { - if include(b.window[b.start].Time) { + if include(b.window[b.start].Time()) { break } } b.size = b.stop - b.start } else { - if include(b.window[l-1].Time) { + if include(b.window[l-1].Time()) { for ; b.start < l; b.start++ { - if include(b.window[b.start].Time) { + if include(b.window[b.start].Time()) { break } } b.size = l - b.start + b.stop } else { for b.start = 0; b.start < b.stop; b.start++ { - if include(b.window[b.start].Time) { + if include(b.window[b.start].Time()) { break } } @@ -304,26 +299,27 @@ func (b *windowTimeBuffer) purge(oldest time.Time, inclusive bool) { } // Returns a copy of the current buffer. -func (b *windowTimeBuffer) points() []models.BatchPoint { +// TODO(nathanielc): Optimize this function use buffered vs unbuffered batch messages. +func (b *windowTimeBuffer) points() []edge.BatchPointMessage { if b.size == 0 { return nil } - points := make([]models.BatchPoint, b.size) + points := make([]edge.BatchPointMessage, b.size) if b.stop > b.start { for i, p := range b.window[b.start:b.stop] { - points[i] = models.BatchPointFromPoint(p) + points[i] = edge.BatchPointFromPoint(p) } } else { j := 0 l := len(b.window) for i := b.start; i < l; i++ { p := b.window[i] - points[j] = models.BatchPointFromPoint(p) + points[j] = edge.BatchPointFromPoint(p) j++ } for i := 0; i < b.stop; i++ { p := b.window[i] - points[j] = models.BatchPointFromPoint(p) + points[j] = edge.BatchPointFromPoint(p) j++ } } @@ -331,12 +327,10 @@ func (b *windowTimeBuffer) points() []models.BatchPoint { } type windowByCount struct { - name string - group models.GroupID - tags models.Tags - byName bool + name string + group edge.GroupInfo - buf []models.BatchPoint + buf []edge.BatchPointMessage start int stop int period int @@ -350,9 +344,7 @@ type windowByCount struct { func newWindowByCount( name string, - group models.GroupID, - tags models.Tags, - byName bool, + group edge.GroupInfo, period, every int, fillPeriod bool, @@ -365,22 +357,32 @@ func newWindowByCount( return &windowByCount{ name: name, group: group, - tags: tags, - byName: byName, - buf: make([]models.BatchPoint, period), + buf: make([]edge.BatchPointMessage, period), period: period, every: every, nextEmit: nextEmit, logger: logger, } } +func (w *windowByCount) BeginBatch(edge.BeginBatchMessage) (edge.Message, error) { + return nil, errors.New("window does not support batch data") +} +func (w *windowByCount) BatchPoint(edge.BatchPointMessage) (edge.Message, error) { + return nil, errors.New("window does not support batch data") +} +func (w *windowByCount) EndBatch(edge.EndBatchMessage) (edge.Message, error) { + return nil, errors.New("window does not support batch data") +} +func (w *windowByCount) Barrier(b edge.BarrierMessage) (edge.Message, error) { + //TODO(nathanielc): Implement barrier messages to flush window + return b, nil +} +func (w *windowByCount) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil +} -func (w *windowByCount) Insert(p models.Point) (b models.Batch, ok bool) { - w.buf[w.stop] = models.BatchPoint{ - Time: p.Time, - Fields: p.Fields, - Tags: p.Tags, - } +func (w *windowByCount) Point(p edge.PointMessage) (msg edge.Message, err error) { + w.buf[w.stop] = edge.BatchPointFromPoint(p) w.stop = (w.stop + 1) % w.period if w.size == w.period { w.start = (w.start + 1) % w.period @@ -390,31 +392,33 @@ func (w *windowByCount) Insert(p models.Point) (b models.Batch, ok bool) { w.count++ //Check if its time to emit if w.count == w.nextEmit { - b = w.batch() - ok = true + w.nextEmit += w.every + msg = w.batch() } return } -func (w *windowByCount) batch() models.Batch { - w.nextEmit += w.every +func (w *windowByCount) batch() edge.BufferedBatchMessage { points := w.points() - return models.Batch{ - Name: w.name, - Group: w.group, - Tags: w.tags, - TMax: points[len(points)-1].Time, - ByName: w.byName, - Points: points, - } + return edge.NewBufferedBatchMessage( + edge.NewBeginBatchMessage( + w.name, + w.group.Tags, + w.group.Dimensions.ByName, + points[len(points)-1].Time(), + len(points), + ), + points, + edge.NewEndBatchMessage(), + ) } // Returns a copy of the current buffer. -func (w *windowByCount) points() []models.BatchPoint { +func (w *windowByCount) points() []edge.BatchPointMessage { if w.size == 0 { return nil } - points := make([]models.BatchPoint, w.size) + points := make([]edge.BatchPointMessage, w.size) if w.stop > w.start { copy(points, w.buf[w.start:w.stop]) } else { diff --git a/window_test.go b/window_test.go index 4583f227e..2f5caea89 100644 --- a/window_test.go +++ b/window_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/influxdata/kapacitor/edge" "github.com/influxdata/kapacitor/models" "github.com/stretchr/testify/assert" ) @@ -23,9 +24,13 @@ func TestWindowBufferByTime(t *testing.T) { for i := 1; i <= size; i++ { t := time.Unix(int64(i), 0) - p := models.Point{ - Time: t, - } + p := edge.NewPointMessage( + "name", "db", "rp", + models.Dimensions{}, + nil, + nil, + t, + ) buf.insert(p) assert.Equal(i, buf.size) @@ -46,7 +51,7 @@ func TestWindowBufferByTime(t *testing.T) { points := buf.points() if assert.Equal(size-i, len(points)) { for _, p := range points { - assert.True(!p.Time.Before(oldest), "Point %s is not after oldest time %s", p.Time, oldest) + assert.True(!p.Time().Before(oldest), "Point %s is not after oldest time %s", p.Time(), oldest) } } } @@ -58,9 +63,13 @@ func TestWindowBufferByTime(t *testing.T) { for i := 1; i <= size*2; i++ { t := time.Unix(int64(i+size), 0) - p := models.Point{ - Time: t, - } + p := edge.NewPointMessage( + "name", "db", "rp", + models.Dimensions{}, + nil, + nil, + t, + ) buf.insert(p) assert.Equal(i, buf.size) @@ -69,7 +78,7 @@ func TestWindowBufferByTime(t *testing.T) { if assert.Equal(i, len(points)) { for _, p := range points { if assert.NotNil(p, "i:%d", i) { - assert.True(!p.Time.Before(oldest), "Point %s is not after oldest time %s", p.Time, oldest) + assert.True(!p.Time().Before(oldest), "Point %s is not after oldest time %s", p.Time(), oldest) } } } @@ -118,9 +127,7 @@ func TestWindowBufferByCount(t *testing.T) { t.Logf("Starting test size %d period %d every %d", tc.size, tc.period, tc.every) w := newWindowByCount( "test", - models.NilGroup, - nil, - false, + edge.GroupInfo{}, tc.period, tc.every, tc.fillPeriod, @@ -129,16 +136,26 @@ func TestWindowBufferByCount(t *testing.T) { // fill buffer for i := 1; i <= tc.size; i++ { - p := models.Point{ - Time: time.Unix(int64(i), 0).UTC(), + p := edge.NewPointMessage( + "name", "db", "rp", + models.Dimensions{}, + nil, + nil, + time.Unix(int64(i), 0).UTC(), + ) + msg, err := w.Point(p) + if err != nil { + t.Fatal(err) } - b, emit := w.Insert(p) expEmit := tc.every == 0 || i%tc.every == 0 if tc.fillPeriod { expEmit = i > tc.period && expEmit } - if got, exp := emit, expEmit; got != exp { - t.Errorf("%d unexpected emit: got %t exp %t %d %d %d", i, got, exp, w.period, w.nextEmit, w.stop) + if expEmit && msg == nil { + t.Errorf("%d unexpected nil forward message: got nil message, expected non nil message", i) + } + if !expEmit && msg != nil { + t.Errorf("%d unexpected forward message: got non-nil message %v, expected nil message", i, msg) } size := i @@ -159,18 +176,22 @@ func TestWindowBufferByCount(t *testing.T) { t.Errorf("%d unexpected stop: got %d exp %d", i, got, exp) } - if emit { + if msg != nil { + if msg.Type() != edge.BufferedBatch { + t.Fatalf("unexpected message type %v", msg.Type()) + } + b := msg.(edge.BufferedBatchMessage) l := i if l > tc.period { l = tc.period } - points := b.Points + points := b.Points() if got, exp := len(points), l; got != exp { t.Fatalf("%d unexpected number of points got %d exp %d", i, got, exp) } for j, p := range points { - if got, exp := p.Time, time.Unix(int64(i+j-len(points)+1), 0).UTC(); !got.Equal(exp) { + if got, exp := p.Time(), time.Unix(int64(i+j-len(points)+1), 0).UTC(); !got.Equal(exp) { t.Errorf("%d unexpected point[%d].Time: got %v exp %v", i, j, got, exp) } }