Skip to content

Commit

Permalink
Add bash completetion, fix a few oddities with the cli (influxdata#611)
Browse files Browse the repository at this point in the history
* add bash completetion, fix a few oddities with the cli
  • Loading branch information
Nathaniel Cook committed Jun 6, 2016
1 parent d244ff3 commit 80d4e71
Show file tree
Hide file tree
Showing 4 changed files with 291 additions and 101 deletions.
4 changes: 3 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ kapacitor define-template generic_mean_alert -tick path/to/above/script.tick -ty
Next define a task that uses the template:

```
kapacitor define cpu_alert -template-id generic_mean_alert -vars cpu_vars.json -dbrp telegraf.default
kapacitor define cpu_alert -template generic_mean_alert -vars cpu_vars.json -dbrp telegraf.default
```

Where `cpu_vars.json` would like like this:
Expand Down Expand Up @@ -168,6 +168,8 @@ batch
- [#606](https://github.com/influxdata/kapacitor/pull/606): Add Holt-Winters forecasting method.
- [#605](https://github.com/influxdata/kapacitor/pull/605): BREAKING: StatsNode for batch edge now count the number of points in a batch instead of count batches as a whole.
This is only breaking if you have a deadman switch configured on a batch edge.
- [#611](https://github.com/influxdata/kapacitor/pull/611): Adds bash completion to the kapacitor CLI tool.


### Bugfixes

Expand Down
4 changes: 4 additions & 0 deletions build.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
POSTINST_SCRIPT = "scripts/post-install.sh"
POSTUNINST_SCRIPT = "scripts/post-uninstall.sh"
LOGROTATE_CONFIG = "etc/logrotate.d/kapacitor"
BASH_COMPLETION_SH = "usr/share/bash-completion/completions/kapacitor"
DEFAULT_CONFIG = "etc/kapacitor/kapacitor.conf"
PREINST_SCRIPT = None

Expand Down Expand Up @@ -68,6 +69,7 @@
MAINTAINER,
DEFAULT_CONFIG,
LOGROTATE_CONFIG,
BASH_COMPLETION_SH,
' --directories '.join([
LOG_DIR[1:],
DATA_DIR[1:],
Expand Down Expand Up @@ -126,6 +128,7 @@ def create_package_fs(build_root):
os.makedirs(os.path.join(build_root, SCRIPT_DIR[1:]))
os.makedirs(os.path.join(build_root, os.path.dirname(DEFAULT_CONFIG)))
os.makedirs(os.path.join(build_root, os.path.dirname(LOGROTATE_CONFIG)))
os.makedirs(os.path.join(build_root, os.path.dirname(BASH_COMPLETION_SH)))

def package_scripts(build_root, config_only=False):
"""Copy the necessary scripts and configuration files to the package
Expand All @@ -141,6 +144,7 @@ def package_scripts(build_root, config_only=False):
shutil.copy(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
shutil.copy(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
shutil.copy(LOGROTATE_CONFIG, os.path.join(build_root, LOGROTATE_CONFIG))
shutil.copy(BASH_COMPLETION_SH, os.path.join(build_root, BASH_COMPLETION_SH))
shutil.copy(DEFAULT_CONFIG, os.path.join(build_root, DEFAULT_CONFIG))

def run_generate():
Expand Down
204 changes: 104 additions & 100 deletions cmd/kapacitor/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -489,7 +489,7 @@ var (
defineFlags = flag.NewFlagSet("define", flag.ExitOnError)
dtick = defineFlags.String("tick", "", "Path to the TICKscript")
dtype = defineFlags.String("type", "", "The task type (stream|batch)")
dtemplate = defineFlags.String("template-id", "", "Optional template ID")
dtemplate = defineFlags.String("template", "", "Optional template ID")
dvars = defineFlags.String("vars", "", "Optional path to a JSON vars file")
dnoReload = defineFlags.Bool("no-reload", false, "Do not reload the task even if it is enabled")
ddbrp = make(dbrps, 0)
Expand Down Expand Up @@ -876,19 +876,6 @@ Replay data to a task directly without saving a recording.
The command is a hybrid of the 'kapacitor record batch|query' and 'kapacitor replay' commands.
See either 'kapacitor replay-live batch' or 'kapacitor replay-live query' for more details
Examples:
$ kapacitor replay-live batch -task cpu_idle -start 2015-09-01T00:00:00Z -stop 2015-09-02T00:00:00Z
This replays the result of the query defined in task 'cpu_idle' and runs the query
until the queries reaches the stop time, starting at time 'start' and incrementing
by the schedule defined in the task.
$ kapacitor replay-liave batch -task cpu_idle -past 10h
This replays the result of the query defined in task 'cpu_idle' and runs the query
until the queries reaches the present time.
The starting time for the queries is 'now - 10h' and increments by the schedule defined in the task.
`
fmt.Fprintln(os.Stderr, u)
}
Expand All @@ -902,10 +889,17 @@ This is similar to 'kapacitor record batch ...' but without saving a recording.
Examples:
$ kapacitor replay-live query -task cpu_alert -rec-time -query 'select value from "telegraf"."default"."cpu_idle" where time > now() - 1h and time < now()'
$ kapacitor replay-live batch -task cpu_idle -start 2015-09-01T00:00:00Z -stop 2015-09-02T00:00:00Z
This replays the result of the query against the cpu_alert task.
This replays the result of the query defined in task 'cpu_idle' and runs the query
until the queries reaches the stop time, starting at time 'start' and incrementing
by the schedule defined in the task.
$ kapacitor replay-live batch -task cpu_idle -past 10h
This replays the result of the query defined in task 'cpu_idle' and runs the query
until the queries reaches the present time.
The starting time for the queries is 'now - 10h' and increments by the schedule defined in the task.
Options:
`
Expand All @@ -920,6 +914,13 @@ Replay the result of a querty against a task.
This is similar to 'kapacitor record query...' but without saving a recording.
Examples:
$ kapacitor replay-live query -task cpu_alert -rec-time -query 'select value from "telegraf"."default"."cpu_idle" where time > now() - 1h and time < now()'
This replays the result of the query against the cpu_alert task.
Options:
`
fmt.Fprintln(os.Stderr, u)
Expand Down Expand Up @@ -986,7 +987,7 @@ func doReplayLive(args []string) error {
case "query":
replayLiveQueryFlags.Parse(args[1:])
if *rlqQuery == "" || *rlqTask == "" {
recordQueryFlags.Usage()
replayLiveQueryFlags.Usage()
return errors.New("both query and task are required")
}
noWait = *rlqNowait
Expand Down Expand Up @@ -1317,7 +1318,7 @@ func doShowTemplate(args []string) error {
// List

func listUsage() {
var u = `Usage: kapacitor list (tasks|templates|recordings|replays) [(task|template|recording|replay) ID or pattern]
var u = `Usage: kapacitor list (tasks|templates|recordings|replays) [ID or pattern]...
List tasks, templates, recordings, or replays and their current state.
Expand All @@ -1327,22 +1328,17 @@ If no ID or pattern is given then all items will be listed.
}

func doList(args []string) error {

if len(args) == 0 {
fmt.Fprintln(os.Stderr, "Must specify 'tasks', 'recordings', or 'replays'")
listUsage()
os.Exit(2)
}

if len(args) > 2 {
fmt.Fprintln(os.Stderr, "Invalid usage of list")
listUsage()
os.Exit(2)
}

var pattern string
if len(args) == 2 {
pattern = args[1]
var patterns []string
if len(args) >= 2 {
patterns = args[1:]
} else {
patterns = []string{""}
}

limit := 100
Expand All @@ -1351,99 +1347,107 @@ func doList(args []string) error {
case "tasks":
outFmt := "%-30s%-10v%-10v%-10v%s\n"
fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Status", "Executing", "Databases and Retention Policies")
offset := 0
for {
tasks, err := cli.ListTasks(&client.ListTasksOptions{
Pattern: pattern,
Fields: []string{"type", "status", "executing", "dbrps"},
Offset: offset,
Limit: limit,
})
if err != nil {
return err
}
for _, pattern := range patterns {
offset := 0
for {
tasks, err := cli.ListTasks(&client.ListTasksOptions{
Pattern: pattern,
Fields: []string{"type", "status", "executing", "dbrps"},
Offset: offset,
Limit: limit,
})
if err != nil {
return err
}

for _, t := range tasks {
fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Type, t.Status, t.Executing, t.DBRPs)
}
if len(tasks) != limit {
break
for _, t := range tasks {
fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Type, t.Status, t.Executing, t.DBRPs)
}
if len(tasks) != limit {
break
}
offset += limit
}
offset += limit
}
case "templates":
outFmt := "%-30s%-10v%-40v\n"
fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Vars")
offset := 0
for {
templates, err := cli.ListTemplates(&client.ListTemplatesOptions{
Pattern: pattern,
Fields: []string{"type", "vars"},
Offset: offset,
Limit: limit,
})
if err != nil {
return err
}
for _, pattern := range patterns {
offset := 0
for {
templates, err := cli.ListTemplates(&client.ListTemplatesOptions{
Pattern: pattern,
Fields: []string{"type", "vars"},
Offset: offset,
Limit: limit,
})
if err != nil {
return err
}

for _, t := range templates {
vars := make([]string, 0, len(t.Vars))
for name := range t.Vars {
vars = append(vars, name)
for _, t := range templates {
vars := make([]string, 0, len(t.Vars))
for name := range t.Vars {
vars = append(vars, name)
}
sort.Strings(vars)
fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Type, strings.Join(vars, ","))
}
sort.Strings(vars)
fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Type, strings.Join(vars, ","))
}
if len(templates) != limit {
break
if len(templates) != limit {
break
}
offset += limit
}
offset += limit
}
case "recordings":
outFmt := "%-40s%-8v%-10s%-10s%-23s\n"
fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Status", "Size", "Date")
offset := 0
for {
recordings, err := cli.ListRecordings(&client.ListRecordingsOptions{
Pattern: pattern,
Fields: []string{"type", "size", "date", "status"},
Offset: offset,
Limit: limit,
})
if err != nil {
return err
}
for _, pattern := range patterns {
offset := 0
for {
recordings, err := cli.ListRecordings(&client.ListRecordingsOptions{
Pattern: pattern,
Fields: []string{"type", "size", "date", "status"},
Offset: offset,
Limit: limit,
})
if err != nil {
return err
}

for _, r := range recordings {
fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Type, r.Status, humanize.Bytes(uint64(r.Size)), r.Date.Local().Format(time.RFC822))
}
if len(recordings) != limit {
break
for _, r := range recordings {
fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Type, r.Status, humanize.Bytes(uint64(r.Size)), r.Date.Local().Format(time.RFC822))
}
if len(recordings) != limit {
break
}
offset += limit
}
offset += limit
}
case "replays":
outFmt := "%-40v%-20v%-40v%-9v%-8v%-23v\n"
fmt.Fprintf(os.Stdout, outFmt, "ID", "Task", "Recording", "Status", "Clock", "Date")
offset := 0
for {
replays, err := cli.ListReplays(&client.ListReplaysOptions{
Pattern: pattern,
Fields: []string{"task", "recording", "status", "clock", "date"},
Offset: offset,
Limit: limit,
})
if err != nil {
return err
}
for _, pattern := range patterns {
offset := 0
for {
replays, err := cli.ListReplays(&client.ListReplaysOptions{
Pattern: pattern,
Fields: []string{"task", "recording", "status", "clock", "date"},
Offset: offset,
Limit: limit,
})
if err != nil {
return err
}

for _, r := range replays {
fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Task, r.Recording, r.Status, r.Clock, r.Date.Local().Format(time.RFC822))
}
if len(replays) != limit {
break
for _, r := range replays {
fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Task, r.Recording, r.Status, r.Clock, r.Date.Local().Format(time.RFC822))
}
if len(replays) != limit {
break
}
offset += limit
}
offset += limit
}
default:
return fmt.Errorf("cannot list '%s' did you mean 'tasks', 'recordings' or 'replays'?", kind)
Expand Down
Loading

0 comments on commit 80d4e71

Please sign in to comment.