forked from influxdata/kapacitor
-
Notifications
You must be signed in to change notification settings - Fork 0
/
group_by.go
117 lines (109 loc) · 2.7 KB
/
group_by.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
package kapacitor
import (
"log"
"sort"
"time"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
"github.com/influxdata/kapacitor/tick"
)
type GroupByNode struct {
node
g *pipeline.GroupByNode
dimensions []string
allDimensions bool
}
// Create a new GroupByNode which splits the stream dynamically based on the specified dimensions.
func newGroupByNode(et *ExecutingTask, n *pipeline.GroupByNode, l *log.Logger) (*GroupByNode, error) {
gn := &GroupByNode{
node: node{Node: n, et: et, logger: l},
g: n,
}
gn.node.runF = gn.runGroupBy
gn.allDimensions, gn.dimensions = determineDimensions(n.Dimensions)
return gn, nil
}
func (g *GroupByNode) runGroupBy([]byte) error {
switch g.Wants() {
case pipeline.StreamEdge:
for pt, ok := g.ins[0].NextPoint(); ok; pt, ok = g.ins[0].NextPoint() {
g.timer.Start()
pt = setGroupOnPoint(pt, g.allDimensions, g.dimensions)
g.timer.Stop()
for _, child := range g.outs {
err := child.CollectPoint(pt)
if err != nil {
return err
}
}
}
default:
var lastTime time.Time
groups := make(map[models.GroupID]*models.Batch)
for b, ok := g.ins[0].NextBatch(); ok; b, ok = g.ins[0].NextBatch() {
g.timer.Start()
if !b.TMax.Equal(lastTime) {
lastTime = b.TMax
// Emit all groups
for id, group := range groups {
for _, child := range g.outs {
err := child.CollectBatch(*group)
if err != nil {
return err
}
}
// Remove from groups
delete(groups, id)
}
}
for _, p := range b.Points {
var dims []string
if g.allDimensions {
dims = models.SortedKeys(p.Tags)
} else {
dims = g.dimensions
}
groupID := models.TagsToGroupID(dims, p.Tags)
group, ok := groups[groupID]
if !ok {
tags := make(map[string]string, len(dims))
for _, dim := range dims {
tags[dim] = p.Tags[dim]
}
group = &models.Batch{
Name: b.Name,
Group: groupID,
TMax: b.TMax,
Tags: tags,
}
groups[groupID] = group
}
group.Points = append(group.Points, p)
}
g.timer.Stop()
}
}
return nil
}
func determineDimensions(dimensions []interface{}) (allDimensions bool, realDimensions []string) {
DIMS:
for _, dim := range dimensions {
switch d := dim.(type) {
case string:
realDimensions = append(realDimensions, d)
case *tick.StarNode:
allDimensions = true
break DIMS
}
}
sort.Strings(realDimensions)
return
}
func setGroupOnPoint(p models.Point, allDimensions bool, dimensions []string) models.Point {
if allDimensions {
dimensions = models.SortedKeys(p.Tags)
}
p.Group = models.TagsToGroupID(dimensions, p.Tags)
p.Dimensions = dimensions
return p
}