forked from influxdata/kapacitor
-
Notifications
You must be signed in to change notification settings - Fork 0
/
task_master.go
137 lines (117 loc) · 2.71 KB
/
task_master.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
package kapacitor
import (
"log"
"net"
"os"
"github.com/influxdb/influxdb/client"
"github.com/influxdb/kapacitor/pipeline"
"github.com/influxdb/kapacitor/services/httpd"
"github.com/influxdb/kapacitor/wlog"
)
// An execution framework for a set of tasks.
type TaskMaster struct {
//Stream map[DBRP]StreamCollector
Stream StreamCollector
HTTPDService interface {
AddRoutes([]httpd.Route) error
DelRoutes([]httpd.Route)
Addr() net.Addr
}
InfluxDBService interface {
NewClient() (*client.Client, error)
}
SMTPService interface {
SendMail(from string, to []string, subject string, msg string)
}
// Incoming stream and forks
in *Edge
forks map[string]*Edge
// Set on incoming batches
batches map[string]*Edge
// Executing tasks
tasks map[string]*ExecutingTask
logger *log.Logger
}
// Create a new Executor with a given clock.
func NewTaskMaster() *TaskMaster {
src := newEdge("src->stream", pipeline.StreamEdge)
return &TaskMaster{
Stream: src,
in: src,
forks: make(map[string]*Edge),
batches: make(map[string]*Edge),
tasks: make(map[string]*ExecutingTask),
logger: wlog.New(os.Stderr, "[tm] ", log.LstdFlags),
}
}
func (tm *TaskMaster) Open() error {
go tm.runForking()
return nil
}
func (tm *TaskMaster) Close() error {
tm.in.Close()
for _, et := range tm.tasks {
tm.StopTask(et.Task.Name)
}
return nil
}
func (tm *TaskMaster) StartTask(t *Task) (*ExecutingTask, error) {
et := NewExecutingTask(tm, t)
var in *Edge
switch et.Task.Type {
case StreamerTask:
in = tm.NewFork(et.Task.Name)
case BatcherTask:
in = newEdge("batch->"+et.Task.Name, pipeline.BatchEdge)
tm.batches[t.Name] = in
}
err := et.start(in)
if err != nil {
return nil, err
}
tm.tasks[et.Task.Name] = et
tm.logger.Println("I! Started task:", t.Name)
return et, nil
}
func (tm *TaskMaster) BatchCollector(name string) BatchCollector {
return tm.batches[name]
}
func (tm *TaskMaster) StopTask(name string) {
if et, ok := tm.tasks[name]; ok {
delete(tm.tasks, name)
if et.Task.Type == StreamerTask {
tm.DelFork(et.Task.Name)
}
et.stop()
tm.logger.Println("I! Stopped task:", name)
}
}
func (tm *TaskMaster) runForking() {
for p, ok := tm.in.NextPoint(); ok; p, ok = tm.in.NextPoint() {
for name, out := range tm.forks {
err := out.CollectPoint(p)
if err != nil {
tm.StopTask(name)
}
}
}
for _, out := range tm.forks {
out.Close()
}
}
func (tm *TaskMaster) NewFork(name string) *Edge {
short := name
if len(short) > 8 {
short = short[:8]
}
e := newEdge("stream->"+name, pipeline.StreamEdge)
tm.forks[name] = e
return e
}
func (tm *TaskMaster) DelFork(name string) {
fork := tm.forks[name]
delete(tm.forks, name)
if fork != nil {
fork.Close()
}
}