diff --git a/CHANGELOG.md b/CHANGELOG.md index 62c33e6cd..539132b41 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,7 +54,7 @@ kapacitor define-template generic_mean_alert -tick path/to/above/script.tick -ty Next define a task that uses the template: ``` -kapacitor define cpu_alert -template-id generic_mean_alert -vars cpu_vars.json -dbrp telegraf.default +kapacitor define cpu_alert -template generic_mean_alert -vars cpu_vars.json -dbrp telegraf.default ``` Where `cpu_vars.json` would like like this: @@ -168,6 +168,8 @@ batch - [#606](https://github.com/influxdata/kapacitor/pull/606): Add Holt-Winters forecasting method. - [#605](https://github.com/influxdata/kapacitor/pull/605): BREAKING: StatsNode for batch edge now count the number of points in a batch instead of count batches as a whole. This is only breaking if you have a deadman switch configured on a batch edge. +- [#611](https://github.com/influxdata/kapacitor/pull/611): Adds bash completion to the kapacitor CLI tool. + ### Bugfixes diff --git a/build.py b/build.py index 09c2bd2e6..212ca1430 100755 --- a/build.py +++ b/build.py @@ -31,6 +31,7 @@ POSTINST_SCRIPT = "scripts/post-install.sh" POSTUNINST_SCRIPT = "scripts/post-uninstall.sh" LOGROTATE_CONFIG = "etc/logrotate.d/kapacitor" +BASH_COMPLETION_SH = "usr/share/bash-completion/completions/kapacitor" DEFAULT_CONFIG = "etc/kapacitor/kapacitor.conf" PREINST_SCRIPT = None @@ -68,6 +69,7 @@ MAINTAINER, DEFAULT_CONFIG, LOGROTATE_CONFIG, + BASH_COMPLETION_SH, ' --directories '.join([ LOG_DIR[1:], DATA_DIR[1:], @@ -126,6 +128,7 @@ def create_package_fs(build_root): os.makedirs(os.path.join(build_root, SCRIPT_DIR[1:])) os.makedirs(os.path.join(build_root, os.path.dirname(DEFAULT_CONFIG))) os.makedirs(os.path.join(build_root, os.path.dirname(LOGROTATE_CONFIG))) + os.makedirs(os.path.join(build_root, os.path.dirname(BASH_COMPLETION_SH))) def package_scripts(build_root, config_only=False): """Copy the necessary scripts and configuration files to the package @@ -141,6 +144,7 @@ def package_scripts(build_root, config_only=False): shutil.copy(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) shutil.copy(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) shutil.copy(LOGROTATE_CONFIG, os.path.join(build_root, LOGROTATE_CONFIG)) + shutil.copy(BASH_COMPLETION_SH, os.path.join(build_root, BASH_COMPLETION_SH)) shutil.copy(DEFAULT_CONFIG, os.path.join(build_root, DEFAULT_CONFIG)) def run_generate(): diff --git a/cmd/kapacitor/main.go b/cmd/kapacitor/main.go index 8e37eb70b..f2902a328 100644 --- a/cmd/kapacitor/main.go +++ b/cmd/kapacitor/main.go @@ -489,7 +489,7 @@ var ( defineFlags = flag.NewFlagSet("define", flag.ExitOnError) dtick = defineFlags.String("tick", "", "Path to the TICKscript") dtype = defineFlags.String("type", "", "The task type (stream|batch)") - dtemplate = defineFlags.String("template-id", "", "Optional template ID") + dtemplate = defineFlags.String("template", "", "Optional template ID") dvars = defineFlags.String("vars", "", "Optional path to a JSON vars file") dnoReload = defineFlags.Bool("no-reload", false, "Do not reload the task even if it is enabled") ddbrp = make(dbrps, 0) @@ -876,19 +876,6 @@ Replay data to a task directly without saving a recording. The command is a hybrid of the 'kapacitor record batch|query' and 'kapacitor replay' commands. See either 'kapacitor replay-live batch' or 'kapacitor replay-live query' for more details -Examples: - - $ kapacitor replay-live batch -task cpu_idle -start 2015-09-01T00:00:00Z -stop 2015-09-02T00:00:00Z - - This replays the result of the query defined in task 'cpu_idle' and runs the query - until the queries reaches the stop time, starting at time 'start' and incrementing - by the schedule defined in the task. - - $ kapacitor replay-liave batch -task cpu_idle -past 10h - - This replays the result of the query defined in task 'cpu_idle' and runs the query - until the queries reaches the present time. - The starting time for the queries is 'now - 10h' and increments by the schedule defined in the task. ` fmt.Fprintln(os.Stderr, u) } @@ -902,10 +889,17 @@ This is similar to 'kapacitor record batch ...' but without saving a recording. Examples: - $ kapacitor replay-live query -task cpu_alert -rec-time -query 'select value from "telegraf"."default"."cpu_idle" where time > now() - 1h and time < now()' + $ kapacitor replay-live batch -task cpu_idle -start 2015-09-01T00:00:00Z -stop 2015-09-02T00:00:00Z - This replays the result of the query against the cpu_alert task. + This replays the result of the query defined in task 'cpu_idle' and runs the query + until the queries reaches the stop time, starting at time 'start' and incrementing + by the schedule defined in the task. + + $ kapacitor replay-live batch -task cpu_idle -past 10h + This replays the result of the query defined in task 'cpu_idle' and runs the query + until the queries reaches the present time. + The starting time for the queries is 'now - 10h' and increments by the schedule defined in the task. Options: ` @@ -920,6 +914,13 @@ Replay the result of a querty against a task. This is similar to 'kapacitor record query...' but without saving a recording. +Examples: + + $ kapacitor replay-live query -task cpu_alert -rec-time -query 'select value from "telegraf"."default"."cpu_idle" where time > now() - 1h and time < now()' + + This replays the result of the query against the cpu_alert task. + + Options: ` fmt.Fprintln(os.Stderr, u) @@ -986,7 +987,7 @@ func doReplayLive(args []string) error { case "query": replayLiveQueryFlags.Parse(args[1:]) if *rlqQuery == "" || *rlqTask == "" { - recordQueryFlags.Usage() + replayLiveQueryFlags.Usage() return errors.New("both query and task are required") } noWait = *rlqNowait @@ -1317,7 +1318,7 @@ func doShowTemplate(args []string) error { // List func listUsage() { - var u = `Usage: kapacitor list (tasks|templates|recordings|replays) [(task|template|recording|replay) ID or pattern] + var u = `Usage: kapacitor list (tasks|templates|recordings|replays) [ID or pattern]... List tasks, templates, recordings, or replays and their current state. @@ -1327,22 +1328,17 @@ If no ID or pattern is given then all items will be listed. } func doList(args []string) error { - if len(args) == 0 { fmt.Fprintln(os.Stderr, "Must specify 'tasks', 'recordings', or 'replays'") listUsage() os.Exit(2) } - if len(args) > 2 { - fmt.Fprintln(os.Stderr, "Invalid usage of list") - listUsage() - os.Exit(2) - } - - var pattern string - if len(args) == 2 { - pattern = args[1] + var patterns []string + if len(args) >= 2 { + patterns = args[1:] + } else { + patterns = []string{""} } limit := 100 @@ -1351,99 +1347,107 @@ func doList(args []string) error { case "tasks": outFmt := "%-30s%-10v%-10v%-10v%s\n" fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Status", "Executing", "Databases and Retention Policies") - offset := 0 - for { - tasks, err := cli.ListTasks(&client.ListTasksOptions{ - Pattern: pattern, - Fields: []string{"type", "status", "executing", "dbrps"}, - Offset: offset, - Limit: limit, - }) - if err != nil { - return err - } + for _, pattern := range patterns { + offset := 0 + for { + tasks, err := cli.ListTasks(&client.ListTasksOptions{ + Pattern: pattern, + Fields: []string{"type", "status", "executing", "dbrps"}, + Offset: offset, + Limit: limit, + }) + if err != nil { + return err + } - for _, t := range tasks { - fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Type, t.Status, t.Executing, t.DBRPs) - } - if len(tasks) != limit { - break + for _, t := range tasks { + fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Type, t.Status, t.Executing, t.DBRPs) + } + if len(tasks) != limit { + break + } + offset += limit } - offset += limit } case "templates": outFmt := "%-30s%-10v%-40v\n" fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Vars") - offset := 0 - for { - templates, err := cli.ListTemplates(&client.ListTemplatesOptions{ - Pattern: pattern, - Fields: []string{"type", "vars"}, - Offset: offset, - Limit: limit, - }) - if err != nil { - return err - } + for _, pattern := range patterns { + offset := 0 + for { + templates, err := cli.ListTemplates(&client.ListTemplatesOptions{ + Pattern: pattern, + Fields: []string{"type", "vars"}, + Offset: offset, + Limit: limit, + }) + if err != nil { + return err + } - for _, t := range templates { - vars := make([]string, 0, len(t.Vars)) - for name := range t.Vars { - vars = append(vars, name) + for _, t := range templates { + vars := make([]string, 0, len(t.Vars)) + for name := range t.Vars { + vars = append(vars, name) + } + sort.Strings(vars) + fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Type, strings.Join(vars, ",")) } - sort.Strings(vars) - fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Type, strings.Join(vars, ",")) - } - if len(templates) != limit { - break + if len(templates) != limit { + break + } + offset += limit } - offset += limit } case "recordings": outFmt := "%-40s%-8v%-10s%-10s%-23s\n" fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Status", "Size", "Date") - offset := 0 - for { - recordings, err := cli.ListRecordings(&client.ListRecordingsOptions{ - Pattern: pattern, - Fields: []string{"type", "size", "date", "status"}, - Offset: offset, - Limit: limit, - }) - if err != nil { - return err - } + for _, pattern := range patterns { + offset := 0 + for { + recordings, err := cli.ListRecordings(&client.ListRecordingsOptions{ + Pattern: pattern, + Fields: []string{"type", "size", "date", "status"}, + Offset: offset, + Limit: limit, + }) + if err != nil { + return err + } - for _, r := range recordings { - fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Type, r.Status, humanize.Bytes(uint64(r.Size)), r.Date.Local().Format(time.RFC822)) - } - if len(recordings) != limit { - break + for _, r := range recordings { + fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Type, r.Status, humanize.Bytes(uint64(r.Size)), r.Date.Local().Format(time.RFC822)) + } + if len(recordings) != limit { + break + } + offset += limit } - offset += limit } case "replays": outFmt := "%-40v%-20v%-40v%-9v%-8v%-23v\n" fmt.Fprintf(os.Stdout, outFmt, "ID", "Task", "Recording", "Status", "Clock", "Date") - offset := 0 - for { - replays, err := cli.ListReplays(&client.ListReplaysOptions{ - Pattern: pattern, - Fields: []string{"task", "recording", "status", "clock", "date"}, - Offset: offset, - Limit: limit, - }) - if err != nil { - return err - } + for _, pattern := range patterns { + offset := 0 + for { + replays, err := cli.ListReplays(&client.ListReplaysOptions{ + Pattern: pattern, + Fields: []string{"task", "recording", "status", "clock", "date"}, + Offset: offset, + Limit: limit, + }) + if err != nil { + return err + } - for _, r := range replays { - fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Task, r.Recording, r.Status, r.Clock, r.Date.Local().Format(time.RFC822)) - } - if len(replays) != limit { - break + for _, r := range replays { + fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Task, r.Recording, r.Status, r.Clock, r.Date.Local().Format(time.RFC822)) + } + if len(replays) != limit { + break + } + offset += limit } - offset += limit } default: return fmt.Errorf("cannot list '%s' did you mean 'tasks', 'recordings' or 'replays'?", kind) diff --git a/usr/share/bash-completion/completions/kapacitor b/usr/share/bash-completion/completions/kapacitor new file mode 100644 index 000000000..ef24c31ac --- /dev/null +++ b/usr/share/bash-completion/completions/kapacitor @@ -0,0 +1,180 @@ + +# Bash completion for the kapacitor command. +_kapacitor() +{ + local cur prev words + + COMPREPLY=() + cur=${COMP_WORDS[COMP_CWORD]} + prev=${COMP_WORDS[COMP_CWORD-1]} + + case "${COMP_WORDS[1]}" in + record) + case "${COMP_WORDS[2]}" in + batch) + case "$prev" in + -task) + words=$(_kapacitor_list tasks "$cur") + ;; + *) + words='-no-wait -past -recording-id -start -stop -task' + ;; + esac + ;; + stream) + case "$prev" in + -task) + words=$(_kapacitor_list tasks "$cur") + ;; + *) + words='-duration -no-wait -recording-id -task' + ;; + esac + ;; + query) + words='-query -type -cluster -no-wait -recording-id' + ;; + *) + words='batch stream query' + ;; + esac + ;; + define) + if [[ -z "${COMP_WORDS[2]}" || ("$cur" = "${COMP_WORDS[2]}" && -z "${COMP_WORDS[3]}") ]] + then + words=$(_kapacitor_list tasks "$cur") + else + case "$prev" in + -dbrp) + words=$(_kapacitor_dbrps) + ;; + -tick) + COMPREPLY=($(compgen -o filenames -A file -X '!*.tick' -- "$cur")) + ;; + -type) + words='batch stream' + ;; + -template) + words=$(_kapacitor_list templates "$cur") + ;; + -vars) + COMPREPLY=($(compgen -o filenames -A file -- "$cur")) + ;; + *) + words='-dbrp -no-reload -tick -type -template -vars' + ;; + esac + fi + ;; + define-template) + if [[ -z "${COMP_WORDS[2]}" || ("$cur" = "${COMP_WORDS[2]}" && -z "${COMP_WORDS[3]}") ]] + then + words=$(_kapacitor_list templates "$cur") + else + case "$prev" in + -tick) + COMPREPLY=($(compgen -o filenames -A file -X '!*.tick' -- "$cur")) + ;; + -type) + words='batch stream' + ;; + *) + words='-tick -type' + ;; + esac + fi + ;; + replay) + case "$prev" in + -recording) + words=$(_kapacitor_list recordings "$cur") + ;; + -task) + words=$(_kapacitor_list tasks "$cur") + ;; + *) + words='-no-wait -real-clock -rec-time -recording -replay-id -task' + ;; + esac + ;; + replay-live) + case "${COMP_WORDS[2]}" in + batch) + case "$prev" in + -task) + words=$(_kapacitor_list tasks "$cur") + ;; + *) + words='-no-wait -past -real-clock -rec-time -replay-id -start -stop -task' + ;; + esac + ;; + query) + case "$prev" in + -task) + words=$(_kapacitor_list tasks "$cur") + ;; + *) + words='-cluster -no-wait -real-clock -rec-time -replay-id -query -task' + ;; + esac + ;; + *) + words='batch query' + ;; + esac + ;; + enable|disable|reload|show) + words=$(_kapacitor_list tasks "$cur") + ;; + delete|list) + case "${COMP_WORDS[2]}" in + tasks|templates|recordings|replays) + words=$(_kapacitor_list "${COMP_WORDS[2]}" "$cur") + ;; + *) + words='tasks templates recordings replays' + ;; + esac + ;; + show-template) + words=$(_kapacitor_list templates "$cur") + ;; + level) + words='debug info warn error' + ;; + stats) + case "${COMP_WORDS[2]}" in + ingress|general) + ;; + *) + words='general ingress' + ;; + esac + ;; + *) + words='record define define-template replay replay-live enable disable \ + reload delete list show show-template level stats version vars help' + ;; + esac + if [ -z "$COMPREPLY" ] + then + COMPREPLY=($(compgen -W "$words" -- "$cur")) + fi + + return 0 +} + +_kapacitor_list() +{ + # List a certain kind of object and return a set of IDs + kapacitor list "$1" "$2*" 2>/dev/null | awk 'NR>1 {print $1}' 2>/dev/null +} + +_kapacitor_dbrps() +{ + # Determine set of DBRPs from the ingress stats + kapacitor stats ingress 2>/dev/null | awk 'NR>1 {printf "%s.%s\n", $1, $2}' 2>/dev/null +} + +complete -F _kapacitor kapacitor