diff --git a/CHANGELOG.md b/CHANGELOG.md index b37430ebd..bcda58f1e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,16 +4,41 @@ ### Release Notes -### Features +New K8sAutoscale node that allows you to auotmatically scale Kubernetes deployments driven by any metrics Kapacitor consumes. +For example, to scale a deployment `myapp` based off requests per second: -### Bugfixes +``` +// The target requests per second per host +var target = 100.0 + +stream + |from() + .measurement('requests') + .where(lambda: "deployment" == 'myapp') + // Compute the moving average of the last 5 minutes + |movingAverage('requests', 5*60) + .as('mean_requests_per_second') + |k8sAutoscale() + .resourceName('app') + .kind('deployments') + .min(4) + .max(100) + // Compute the desired number of replicas based on target. + .replicas(lambda: int(ceil("mean_requests_per_second" / target))) +``` + + +New API endpoints have been added to be able to configure InfluxDB clusters and alert handlers dynamically without needing to restart the Kapacitor daemon. +See the API docs for more details. + +### Features -- [#980](https://github.com/influxdata/kapacitor/pull/980): Upgrade to using go 1.7 - [#931](https://github.com/influxdata/kapacitor/issues/931): Add a Kubernetes autoscaler node. You can now autoscale your Kubernetes deployments via Kapacitor. +- [#928](https://github.com/influxdata/kapacitor/issues/928): Add new API endpoint for dynamically overriding sections of the configuration. +- [#980](https://github.com/influxdata/kapacitor/pull/980): Upgrade to using go 1.7 ### Bugfixes - ## v1.0.2 [2016-10-06] ### Release Notes @@ -454,7 +479,6 @@ If you have existing tasks which do not match this pattern they should continue ### Features - ### Bugfixes - [#545](https://github.com/influxdata/kapacitor/issues/545): Fixes inconsistancy with API docs for creating a task. diff --git a/LICENSE_OF_DEPENDENCIES.md b/LICENSE_OF_DEPENDENCIES.md index 00d72e901..f9c0971bc 100644 --- a/LICENSE_OF_DEPENDENCIES.md +++ b/LICENSE_OF_DEPENDENCIES.md @@ -1,8 +1,8 @@ Dependencies ============ -* github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE) * github.com/BurntSushi/toml [WTFPL](https://github.com/BurntSushi/toml/blob/master/COPYING) +* github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE) * github.com/cenkalti/backoff [MIT](https://github.com/cenkalti/backoff/blob/master/LICENSE) * github.com/dgrijalva/jwt-go [MIT](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) * github.com/dustin/go-humanize [MIT](https://github.com/dustin/go-humanize/blob/master/LICENSE) @@ -10,6 +10,8 @@ Dependencies * github.com/gorhill/cronexpr [APLv2](https://github.com/gorhill/cronexpr/blob/master/APLv2) * github.com/kimor79/gollectd [BSD](https://github.com/kimor79/gollectd/blob/master/LICENSE) * github.com/mattn/go-runewidth [MIT](https://github.com/mattn/go-runewidth/blob/master/README.mkd) +* github.com/mitchellh/copystructure[MIT](https://github.com/mitchellh/copystructure/blob/master/LICENSE) +* github.com/mitchellh/reflectwalk [MIT](https://github.com/mitchellh/reflectwalk/blob/master/LICENSE) * github.com/pkg/errors [BSD](https://github.com/pkg/errors/blob/master/LICENSE) * github.com/russross/blackfriday [BSD](https://github.com/russross/blackfriday/blob/master/LICENSE.txt) * github.com/serenize/snaker [MIT](https://github.com/serenize/snaker/blob/master/LICENSE.txt) diff --git a/alert.go b/alert.go index eefd69cfe..8c2be8836 100644 --- a/alert.go +++ b/alert.go @@ -950,13 +950,8 @@ func (a *AlertNode) handleTcp(tcp *pipeline.TcpHandler, ad *AlertData) { } func (a *AlertNode) handleEmail(email *pipeline.EmailHandler, ad *AlertData) { - if a.et.tm.SMTPService != nil { - err := a.et.tm.SMTPService.SendMail(email.ToList, ad.Message, ad.Details) - if err != nil { - a.logger.Println("E!", err) - } - } else { - a.logger.Println("E! smtp service not enabled, cannot send email.") + if err := a.et.tm.SMTPService.SendMail(email.ToList, ad.Message, ad.Details); err != nil { + a.logger.Println("E! failed to send email:", err) } } @@ -1001,10 +996,6 @@ func (a *AlertNode) handleLog(l *pipeline.LogHandler, ad *AlertData) { } func (a *AlertNode) handleVictorOps(vo *pipeline.VictorOpsHandler, ad *AlertData) { - if a.et.tm.VictorOpsService == nil { - a.logger.Println("E! failed to send VictorOps alert. VictorOps is not enabled") - return - } var messageType string switch ad.Level { case OKAlert: @@ -1027,10 +1018,6 @@ func (a *AlertNode) handleVictorOps(vo *pipeline.VictorOpsHandler, ad *AlertData } func (a *AlertNode) handlePagerDuty(pd *pipeline.PagerDutyHandler, ad *AlertData) { - if a.et.tm.PagerDutyService == nil { - a.logger.Println("E! failed to send PagerDuty alert. PagerDuty is not enabled") - return - } err := a.et.tm.PagerDutyService.Alert( pd.ServiceKey, ad.ID, @@ -1045,11 +1032,6 @@ func (a *AlertNode) handlePagerDuty(pd *pipeline.PagerDutyHandler, ad *AlertData } func (a *AlertNode) handleSensu(sensu *pipeline.SensuHandler, ad *AlertData) { - if a.et.tm.SensuService == nil { - a.logger.Println("E! failed to send Sensu message. Sensu is not enabled") - return - } - err := a.et.tm.SensuService.Alert( ad.ID, ad.Message, @@ -1062,10 +1044,6 @@ func (a *AlertNode) handleSensu(sensu *pipeline.SensuHandler, ad *AlertData) { } func (a *AlertNode) handleSlack(slack *pipeline.SlackHandler, ad *AlertData) { - if a.et.tm.SlackService == nil { - a.logger.Println("E! failed to send Slack message. Slack is not enabled") - return - } err := a.et.tm.SlackService.Alert( slack.Channel, ad.Message, @@ -1078,10 +1056,6 @@ func (a *AlertNode) handleSlack(slack *pipeline.SlackHandler, ad *AlertData) { } func (a *AlertNode) handleTelegram(telegram *pipeline.TelegramHandler, ad *AlertData) { - if a.et.tm.TelegramService == nil { - a.logger.Println("E! failed to send Telegram message. Telegram is not enabled") - return - } err := a.et.tm.TelegramService.Alert( telegram.ChatId, telegram.ParseMode, @@ -1096,10 +1070,6 @@ func (a *AlertNode) handleTelegram(telegram *pipeline.TelegramHandler, ad *Alert } func (a *AlertNode) handleHipChat(hipchat *pipeline.HipChatHandler, ad *AlertData) { - if a.et.tm.HipChatService == nil { - a.logger.Println("E! failed to send HipChat message. HipChat is not enabled") - return - } err := a.et.tm.HipChatService.Alert( hipchat.Room, hipchat.Token, @@ -1123,11 +1093,6 @@ type alertaHandler struct { } func (a *AlertNode) handleAlerta(alerta alertaHandler, ad *AlertData) { - if a.et.tm.AlertaService == nil { - a.logger.Println("E! failed to send Alerta message. Alerta is not enabled") - return - } - var severity string switch ad.Level { @@ -1215,10 +1180,6 @@ func (a *AlertNode) handleAlerta(alerta alertaHandler, ad *AlertData) { } func (a *AlertNode) handleOpsGenie(og *pipeline.OpsGenieHandler, ad *AlertData) { - if a.et.tm.OpsGenieService == nil { - a.logger.Println("E! failed to send OpsGenie alert. OpsGenie is not enabled") - return - } var messageType string switch ad.Level { case OKAlert: @@ -1243,11 +1204,6 @@ func (a *AlertNode) handleOpsGenie(og *pipeline.OpsGenieHandler, ad *AlertData) } func (a *AlertNode) handleTalk(talk *pipeline.TalkHandler, ad *AlertData) { - if a.et.tm.TalkService == nil { - a.logger.Println("E! failed to send Talk message. Talk is not enabled") - return - } - err := a.et.tm.TalkService.Alert( ad.ID, ad.Message, diff --git a/batch.go b/batch.go index a47bd3962..2700885b9 100644 --- a/batch.go +++ b/batch.go @@ -2,7 +2,6 @@ package kapacitor import ( "bytes" - "errors" "fmt" "log" "sync" @@ -14,11 +13,11 @@ import ( "github.com/influxdata/kapacitor/influxdb" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" + "github.com/pkg/errors" ) const ( statsQueryErrors = "query_errors" - statsConnectErrors = "connect_errors" statsBatchesQueried = "batches_queried" statsPointsQueried = "points_queried" ) @@ -139,7 +138,6 @@ type QueryNode struct { aborting chan struct{} queryErrors *expvar.Int - connectErrors *expvar.Int batchesQueried *expvar.Int pointsQueried *expvar.Int byName bool @@ -265,12 +263,10 @@ func (b *QueryNode) Queries(start, stop time.Time) ([]*Query, error) { func (b *QueryNode) doQuery() error { defer b.ins[0].Close() b.queryErrors = &expvar.Int{} - b.connectErrors = &expvar.Int{} b.batchesQueried = &expvar.Int{} b.pointsQueried = &expvar.Int{} b.statMap.Set(statsQueryErrors, b.queryErrors) - b.statMap.Set(statsConnectErrors, b.connectErrors) b.statMap.Set(statsBatchesQueried, b.batchesQueried) b.statMap.Set(statsPointsQueried, b.pointsQueried) @@ -278,7 +274,10 @@ func (b *QueryNode) doQuery() error { return errors.New("InfluxDB not configured, cannot query InfluxDB for batch query") } - var con influxdb.Client + con, err := b.et.tm.InfluxDBService.NewNamedClient(b.b.Cluster) + if err != nil { + return errors.Wrap(err, "failed to get InfluxDB client") + } tickC := b.ticker.Start() for { select { @@ -288,48 +287,22 @@ func (b *QueryNode) doQuery() error { return errors.New("batch doQuery aborted") case now := <-tickC: b.timer.Start() - // Update times for query stop := now.Add(-1 * b.b.Offset) b.query.SetStartTime(stop.Add(-1 * b.b.Period)) b.query.SetStopTime(stop) - b.logger.Println("D! starting next batch query:", b.query.String()) + qStr := b.query.String() + b.logger.Println("D! starting next batch query:", qStr) - var err error - if con == nil { - if b.b.Cluster != "" { - con, err = b.et.tm.InfluxDBService.NewNamedClient(b.b.Cluster) - } else { - con, err = b.et.tm.InfluxDBService.NewDefaultClient() - } - if err != nil { - b.logger.Println("E! failed to connect to InfluxDB:", err) - b.connectErrors.Add(1) - // Ensure connection is nil - con = nil - b.timer.Stop() - break - } - } + // Execute query q := influxdb.Query{ - Command: b.query.String(), + Command: qStr, } - - // Execute query resp, err := con.Query(q) if err != nil { - b.logger.Println("E! query failed:", err) - b.queryErrors.Add(1) - // Get a new connection - con = nil - b.timer.Stop() - break - } - - if err := resp.Error(); err != nil { - b.logger.Println("E! query returned error response:", err) b.queryErrors.Add(1) + b.logger.Println("E!", err) b.timer.Stop() break } diff --git a/build.py b/build.py index 46a233a52..969721b89 100755 --- a/build.py +++ b/build.py @@ -287,10 +287,10 @@ def run(command, allow_failure=False, shell=False, printOutput=False): out = out.decode('utf-8').strip() if p.returncode != 0: if allow_failure: - logging.warn("Command '{}' failed with error: {}".format(command, out)) + logging.warn(u"Command '{}' failed with error: {}".format(command, out)) return None else: - logging.error("Command '{}' failed with error: {}".format(command, out)) + logging.error(u"Command '{}' failed with error: {}".format(command, out)) sys.exit(1) except OSError as e: if allow_failure: diff --git a/client/API.md b/client/API.md index fb29f603a..5e396f4c9 100644 --- a/client/API.md +++ b/client/API.md @@ -6,6 +6,7 @@ * [Templates](#templates) * [Recordings](#recordings) * [Replays](#replays) +* [Configuration](#configuration) * [Miscellaneous](#miscellaneous) ## General Information @@ -117,7 +118,7 @@ When using PATCH, if any option is missing it will be left unmodified. The vars object has the form: -``` +```json { "field_name" : { "value": , @@ -166,7 +167,7 @@ POST /kapacitor/v1/tasks Response with task id and link. -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/tasks/TASK_ID"}, "id" : "TASK_ID", @@ -235,7 +236,7 @@ POST /kapacitor/v1/tasks Response with task id and link. -``` +```json { "id" : "TASK_ID", "link" : {"rel": "self", "href": "/kapacitor/v1/tasks/TASK_ID"} @@ -280,7 +281,7 @@ Get information about a task using defaults. GET /kapacitor/v1/tasks/TASK_ID ``` -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/tasks/TASK_ID"}, "id" : "TASK_ID", @@ -304,7 +305,7 @@ Get information about a task using only labels in the DOT content and skip the f GET /kapacitor/v1/tasks/TASK_ID?dot-view=labels&script-format=raw ``` -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/tasks/TASK_ID"}, "id" : "TASK_ID", @@ -368,7 +369,7 @@ Get all tasks. GET /kapacitor/v1/tasks ``` -``` +```json { "tasks" : [ { @@ -405,7 +406,7 @@ Optionally specify a glob `pattern` to list only matching tasks. GET /kapacitor/v1/task?pattern=TASK* ``` -``` +```json { "tasks" : [ { @@ -430,7 +431,7 @@ Get all tasks, but only the status, executing and error fields. GET /kapacitor/v1/tasks?fields=status&fields=executing&fields=error ``` -``` +```json { "tasks" : [ { @@ -482,7 +483,7 @@ stream GET /kapacitor/v1/tasks/TASK_ID/mycustom_endpoint ``` -``` +```json { "series": [ { @@ -561,7 +562,7 @@ POST /kapacitor/v1/templates Response with template id and link. -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/templates/TASK_ID"}, "id" : "TASK_ID", @@ -617,7 +618,7 @@ Get information about a template using defaults. GET /kapacitor/v1/templates/TEMPLATE_ID ``` -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/templates/TEMPLATE_ID"}, "id" : "TASK_ID", @@ -679,7 +680,7 @@ Get all templates. GET /kapacitor/v1/templates ``` -``` +```json { "templates" : [ { @@ -708,7 +709,7 @@ Optionally specify a glob `pattern` to list only matching templates. GET /kapacitor/v1/template?pattern=TEMPLATE* ``` -``` +```json { "templates" : [ { @@ -729,7 +730,7 @@ Get all templates, but only the script and error fields. GET /kapacitor/v1/templates?fields=status&fields=executing&fields=error ``` -``` +```json { "templates" : [ { @@ -862,7 +863,7 @@ POST /kapacitor/v1/recordings/query All recordings are assigned an ID which is returned in this format with a link. -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0"}, "id" : "e24db07d-1646-4bb3-a445-828f5049bea0", @@ -900,7 +901,7 @@ A recording has these read only properties. GET /kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0 ``` -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0"}, "id" : "e24db07d-1646-4bb3-a445-828f5049bea0", @@ -919,7 +920,7 @@ Once the recording is complete. GET /kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0 ``` -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0"}, "id" : "e24db07d-1646-4bb3-a445-828f5049bea0", @@ -938,7 +939,7 @@ Or if the recording fails. GET /kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0 ``` -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0"}, "id" : "e24db07d-1646-4bb3-a445-828f5049bea0", @@ -993,7 +994,7 @@ Recordings are sorted by date. GET /kapacitor/v1/recordings ``` -``` +```json { "recordings" : [ { @@ -1080,7 +1081,7 @@ POST /kapacitor/v1/replays/ The request returns once the replay is started and provides a replay ID and link. -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c"}, "id" : "ad95677b-096b-40c8-82a8-912706f41d4c", @@ -1171,7 +1172,7 @@ POST /kapacitor/v1/replays/query All replays are assigned an ID which is returned in this format with a link. -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/replays/e24db07d-1646-4bb3-a445-828f5049bea0"}, "id" : "e24db07d-1646-4bb3-a445-828f5049bea0", @@ -1214,7 +1215,7 @@ Get the status of a replay. GET /kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c ``` -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c"}, "id" : "ad95677b-096b-40c8-82a8-912706f41d4c", @@ -1234,7 +1235,7 @@ Once the replay is complete. GET /kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c ``` -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c"}, "id" : "ad95677b-096b-40c8-82a8-912706f41d4c", @@ -1254,7 +1255,7 @@ Or if the replay fails. GET /kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c ``` -``` +```json { "link" : {"rel": "self", "href": "/kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c"}, "id" : "ad95677b-096b-40c8-82a8-912706f41d4c", @@ -1310,9 +1311,9 @@ You can list replays for a given recording by making a GET request to `/kapacito GET /kapacitor/v1/replays ``` -``` +```json { - "replays" [ + "replays": [ { "link" : {"rel": "self", "href": "/kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c"}, "id" : "ad95677b-096b-40c8-82a8-912706f41d4c", @@ -1339,6 +1340,365 @@ GET /kapacitor/v1/replays } ``` +## Configuration + +You can set configuration overrides via the API for certain sections of the config. +The overrides set via the API always take precedent over what may exist in the configuration file. +The sections available for overriding include the InfluxDB clusters and the alert handler sections. + + +The intent of the API is to allow for dynamic configuration of sensitive credentials without requiring that the Kapacitor process be restarted. +As such, it is recommended to use either the configuration file or the API to manage these configuration sections, but not both. +This will help to eliminate any confusion that may arise as to the source of a given configuration option. + + +### Enabling/Disabling Configuration Overrides + +By default the ability to override the configuration is enabled. +If you do not wish to enable this feature it can be disabled via the `config-override` configuration section. + +``` +[config-override] + enabled = false +``` + +If the `config-override` service is disabled then the relevant API endpoints will return 403 forbidden errors. + +### Recovering from bad configuration + +If somehow you have created a configuration that causes Kapacitor to crash or otherwise not function, +you can disable applying overrides during startup with the `skip-config-overrides` top level configuration option. + +``` +# This configuration option is only a safe guard and should not be needed in practice. +skip-config-overrides = true +``` + +This allows you to still access the API to fix any unwanted configuration without applying that configuration during statup. + +>NOTE: It is probably easiest and safest to set this option as an environment variable `KAPACITOR_SKIP_CONFIG_OVERRIDES=true`, since it is meant to be temporary. +That way you do not have to modify your on disk configuration file or accidentally leave it in place causing issues later on. + +### Overview + +The paths for the configuration API endpoints are as follows: + +`/kapacitor/v1/config/
/[]` + +Example: + +``` +/kapacitor/v1/config/smtp/ +/kapacitor/v1/config/influxdb/localhost +/kapacitor/v1/config/influxdb/remote +``` + +The optional `element name` path element corresponds to a specific item from a list of entries. + +For example the above paths correspond to the following configuration sections: + +``` +[smtp] + # SMTP config here + +[[influxdb]] + name = "localhost" + # InfluxDB config here for the "localhost" cluster + +[[influxdb]] + name = "remote" + # InfluxDB config here for the "remote" cluster +``` + + +### Retrieving the current configuration + +To retrieve the current configuration perform a GET request to the desired path. +The returned configuration will be the merged values from the configuration file and what has been stored in the overrides. +The returned content will be JSON encoded version of the configuration objects. + +All sensitive information will not be returned in the request body. +Instead a boolean value will be in its place indicating whether the value is empty or not. + +#### Example + +Retrieve all the configuration sections which can be overridden. + +``` +GET /kapacitor/v1/config +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config"}, + "sections": { + "influxdb": { + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb"}, + "elements": [ + { + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb/localhost"}, + "options": { + "name": "localhost", + "urls": ["http://localhost:8086"], + "default": true, + "username": "", + "password": false + }, + }, + { + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb/remote"}, + "options": { + "name": "remote", + "urls": ["http://influxdb.example.com:8086"], + "default": false, + "username": "jim", + "password": true + } + } + ] + }, + "smtp": { + "link" : {"rel": "self", "href": "/kapacitor/v1/config/smtp"}, + "elements": [{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config/smtp/"}, + "options": { + "enabled": true, + "host": "smtp.example.com", + "port": 587, + "username": "bob", + "password": true, + "no-verify": false, + "global": false, + "to": [ "oncall@example.com"], + "from": "kapacitor@example.com", + "idle-timeout": "30s" + } + }] + } + } +} +``` + + +Retrieve only the SMTP section. + +``` +GET /kapacitor/v1/config/smtp +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config/smtp"}, + "elements": [{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config/smtp/"}, + "options": { + "enabled": true, + "host": "smtp.example.com", + "port": 587, + "username": "bob", + "password": true, + "no-verify": false, + "global": false, + "to": ["oncall@example.com"], + "from": "kapacitor@example.com", + "idle-timeout": "30s" + } + }] +} +``` + +Retrieve the single element from the SMTP section. + +``` +GET /kapacitor/v1/config/smtp/ +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config/smtp/"}, + "options": { + "enabled": true, + "host": "smtp.example.com", + "port": 587, + "username": "bob", + "password": true, + "no-verify": false, + "global": false, + "to": ["oncall@example.com"], + "from": "kapacitor@example.com", + "idle-timeout": "30s" + } +} +``` + +>NOTE: Sections that are not lists can be treated as having an empty string for their element name. + +Retrieve only the InfluxDB section. + +``` +GET /kapacitor/v1/config/influxdb +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb"}, + "elements" : [ + { + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb/localhost"}, + "options": { + "name": "localhost", + "urls": ["http://localhost:8086"], + "default": true, + "username": "", + "password": false + }, + }, + { + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb/remote"}, + "options": { + "name": "remote", + "urls": ["http://influxdb.example.com:8086"], + "default": false, + "username": "jim", + "password": true + } + } + ] +} +``` + +Retrieve only the `remote` element of the InfluxDB section. + +``` +GET /kapacitor/v1/config/influxdb/remote +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb/remote"}, + "options": { + "name": "remote", + "urls": ["http://influxdb.example.com:8086"], + "default": false, + "username": "jim", + "password": true + } +} +``` + +>NOTE: The password value is not returned, but the `true` value indicates that a non empty password has been set. + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success | +| 403 | Config override service not enabled | + +### Overriding the configuration + +To override a value in the configuration make a POST request to the desired path. +The request should contain a JSON object describing what should be modified. + +Use the following top level actions: + +| Key | Description | +| --- | ----------- | +| set | Set the value in the configuration overrides. | +| delete | Delete the value from the configuration overrides. | +| add | Add a new element to a list configuration section. | +| remove | Remove a previously added element from a list configuration section. | + +Configuration options not specified in the request will be left unmodified. + +#### Example + +To disable the SMTP alert handler: + +``` +POST /kapacitor/v1/config/smtp/ +{ + "set":{ + "enabled": false + } +} +``` + +To delete the override for the SMTP alert handler: + +``` +POST /kapacitor/v1/config/smtp/ +{ + "delete":[ + "enabled" + ] +} +``` + +Actions can be combined in a single request. +Enable the SMTP handler, set its host and remove the port override. + +``` +POST /kapacitor/v1/config/smtp/ +{ + "set":{ + "enabled": true, + "host": "smtp.example.com" + }, + "delete":[ + "port" + ] +} +``` + +Add a new InfluxDB cluster: + +``` +POST /kapacitor/v1/config/influxdb +{ + "add":{ + "name": "example", + "urls": ["https://influxdb.example.com:8086"], + "default": true, + "disable-subscriptions": true + } +} +``` + +Remove an existing InfluxDB cluster override: + +``` +POST /kapacitor/v1/config/influxdb +{ + "remove":[ + "example" + ] +} +``` + +>NOTE: Only the overrides can be removed, this means that InfluxDB clusters that exist in the configuration cannot be removed. + +Modify an existing InfluxDB cluster: + +``` +POST /kapacitor/v1/config/influxdb/remote +{ + "set":{ + "disable-subscriptions": false, + }, + "delete": [ + "default" + ] +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success | +| 403 | Config override service not enabled | +| 404 | The specified configuration section/option does not exist | ## Miscellaneous diff --git a/client/v1/client.go b/client/v1/client.go index 58403fca0..9c712ffe4 100644 --- a/client/v1/client.go +++ b/client/v1/client.go @@ -39,6 +39,7 @@ const ( replaysPath = basePath + "/replays" replayBatchPath = basePath + "/replays/batch" replayQueryPath = basePath + "/replays/query" + configPath = basePath + "/config" ) // HTTP configuration for connecting to Kapacitor @@ -687,6 +688,18 @@ func (c *Client) TemplateLink(id string) Link { return Link{Relation: Self, Href: path.Join(templatesPath, id)} } +func (c *Client) ConfigSectionLink(section string) Link { + return Link{Relation: Self, Href: path.Join(configPath, section)} +} + +func (c *Client) ConfigElementLink(section, element string) Link { + href := path.Join(configPath, section, element) + if element == "" { + href += "/" + } + return Link{Relation: Self, Href: href} +} + type CreateTaskOptions struct { ID string `json:"id,omitempty"` TemplateID string `json:"template-id,omitempty"` @@ -1505,6 +1518,115 @@ func (c *Client) ListReplays(opt *ListReplaysOptions) ([]Replay, error) { return r.Replays, nil } +type ConfigUpdateAction struct { + Set map[string]interface{} `json:"set,omitempty"` + Delete []string `json:"delete,omitempty"` + Add map[string]interface{} `json:"add,omitempty"` + Remove []string `json:"remove,omitempty"` +} + +// ConfigUpdate performs a given ConfigUpdateAction against a given section or element. +func (c *Client) ConfigUpdate(link Link, action ConfigUpdateAction) error { + if link.Href == "" { + return fmt.Errorf("invalid link %v", link) + } + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + err := enc.Encode(action) + if err != nil { + return err + } + + u := *c.url + u.Path = link.Href + + req, err := http.NewRequest("POST", u.String(), &buf) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + _, err = c.Do(req, nil, http.StatusNoContent) + return err +} + +type ConfigSections struct { + Link Link `json:"link"` + Sections map[string]ConfigSection `json:"sections"` +} + +type ConfigSection struct { + Link Link `json:"link"` + Elements []ConfigElement `json:"elements"` +} + +type ConfigElement struct { + Link Link `json:"link"` + Options map[string]interface{} `json:"options"` +} + +// ConfigSections returns all the running configuration sections that can be modified. +func (c *Client) ConfigSections() (ConfigSections, error) { + u := *c.url + u.Path = configPath + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return ConfigSections{}, err + } + + sections := ConfigSections{} + _, err = c.Do(req, §ions, http.StatusOK) + if err != nil { + return ConfigSections{}, err + } + return sections, nil +} + +// ConfigSection returns the running configuration for a section. +func (c *Client) ConfigSection(link Link) (ConfigSection, error) { + if link.Href == "" { + return ConfigSection{}, fmt.Errorf("invalid link %v", link) + } + + u := *c.url + u.Path = link.Href + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return ConfigSection{}, err + } + + section := ConfigSection{} + _, err = c.Do(req, §ion, http.StatusOK) + if err != nil { + return ConfigSection{}, err + } + return section, nil +} + +// ConfigElement returns the running configuration for a given section and element. +func (c *Client) ConfigElement(link Link) (ConfigElement, error) { + if link.Href == "" { + return ConfigElement{}, fmt.Errorf("invalid link %v", link) + } + + u := *c.url + u.Path = link.Href + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return ConfigElement{}, err + } + + element := ConfigElement{} + _, err = c.Do(req, &element, http.StatusOK) + if err != nil { + return ConfigElement{}, err + } + return element, nil +} + type LogLevelOptions struct { Level string `json:"level"` } diff --git a/client/v1/client_test.go b/client/v1/client_test.go index e7235bdcf..5f789fa2c 100644 --- a/client/v1/client_test.go +++ b/client/v1/client_test.go @@ -201,6 +201,34 @@ func Test_ReportsErrors(t *testing.T) { return err }, }, + { + name: "ConfigSections", + fnc: func(c *client.Client) error { + _, err := c.ConfigSections() + return err + }, + }, + { + name: "ConfigSection", + fnc: func(c *client.Client) error { + _, err := c.ConfigSection(c.ConfigSectionLink("")) + return err + }, + }, + { + name: "ConfigElement", + fnc: func(c *client.Client) error { + _, err := c.ConfigElement(c.ConfigElementLink("", "")) + return err + }, + }, + { + name: "ConfigUpdate", + fnc: func(c *client.Client) error { + err := c.ConfigUpdate(c.ConfigSectionLink(""), client.ConfigUpdateAction{}) + return err + }, + }, { name: "LogLevel", fnc: func(c *client.Client) error { @@ -1889,6 +1917,294 @@ func Test_ListReplays_Filter(t *testing.T) { } } +func Test_ConfigUpdate(t *testing.T) { + expUpdate := client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "option": "new value", + }, + } + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var update client.ConfigUpdateAction + body, _ := ioutil.ReadAll(r.Body) + json.Unmarshal(body, &update) + if r.URL.Path == "/kapacitor/v1/config/section" && r.Method == "POST" && + reflect.DeepEqual(update, expUpdate) { + w.WriteHeader(http.StatusNoContent) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + if err := c.ConfigUpdate(c.ConfigSectionLink("section"), expUpdate); err != nil { + t.Fatal(err) + } +} + +func Test_ConfigUpdate_Element(t *testing.T) { + expUpdate := client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "option": "new value", + }, + } + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var update client.ConfigUpdateAction + body, _ := ioutil.ReadAll(r.Body) + json.Unmarshal(body, &update) + if r.URL.Path == "/kapacitor/v1/config/section/element" && r.Method == "POST" && + reflect.DeepEqual(update, expUpdate) { + w.WriteHeader(http.StatusNoContent) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + if err := c.ConfigUpdate(c.ConfigElementLink("section", "element"), expUpdate); err != nil { + t.Fatal(err) + } +} + +func Test_ConfigSections(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/kapacitor/v1/config" && r.Method == "GET" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "link": {"rel":"self", "href":"/kapacitor/v1/config"}, + "sections":{ + "sectionA": { + "link": {"rel":"self", "href":"/kapacitor/v1/config/sectionA"}, + "elements": [ + { + "link": {"rel":"self", "href":"/kapacitor/v1/config/sectionA/A"}, + "options" :{ + "name": "A", + "optionA": "o1", + "optionB": "o2", + "optionC": "o3", + "optionD": "o4" + } + }, + { + "link": {"rel":"self", "href":"/kapacitor/v1/config/sectionA/B"}, + "options" :{ + "name": "B", + "optionA": "o5", + "optionB": "o6", + "optionC": "o7", + "optionD": "o8" + } + } + ] + }, + "sectionB": { + "link": {"rel":"self", "href":"/kapacitor/v1/config/sectionB"}, + "elements" :[ + { + "link": {"rel":"self", "href":"/kapacitor/v1/config/sectionB/X"}, + "options" :{ + "name": "X", + "optionA": "o1", + "optionB": "o2", + "optionC": "o3", + "optionD": "o4" + } + }, + { + "link": {"rel":"self", "href":"/kapacitor/v1/config/sectionB/Y"}, + "options" :{ + "name": "Y", + "optionH": "o5", + "optionJ": "o6", + "optionK": "o7", + "optionL": "o8" + } + } + ] + }, + "sectionC": { + "link": {"rel":"self", "href":"/kapacitor/v1/config/sectionC"}, + "elements" :[{ + "link": {"rel":"self", "href":"/kapacitor/v1/config/sectionC/"}, + "options" :{ + "optionA": "o1", + "optionB": "o2", + "optionC": "o3", + "optionD": "o4" + } + }] + } + } +}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + sections, err := c.ConfigSections() + if err != nil { + t.Fatal(err) + } + exp := client.ConfigSections{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config"}, + Sections: map[string]client.ConfigSection{ + "sectionA": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sectionA"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sectionA/A"}, + Options: map[string]interface{}{ + "name": "A", + "optionA": "o1", + "optionB": "o2", + "optionC": "o3", + "optionD": "o4", + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sectionA/B"}, + Options: map[string]interface{}{ + "name": "B", + "optionA": "o5", + "optionB": "o6", + "optionC": "o7", + "optionD": "o8", + }, + }, + }, + }, + "sectionB": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sectionB"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sectionB/X"}, + Options: map[string]interface{}{ + "name": "X", + "optionA": "o1", + "optionB": "o2", + "optionC": "o3", + "optionD": "o4", + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sectionB/Y"}, + Options: map[string]interface{}{ + "name": "Y", + "optionH": "o5", + "optionJ": "o6", + "optionK": "o7", + "optionL": "o8", + }, + }, + }, + }, + "sectionC": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sectionC"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sectionC/"}, + Options: map[string]interface{}{ + "optionA": "o1", + "optionB": "o2", + "optionC": "o3", + "optionD": "o4", + }, + }, + }, + }, + }, + } + if !reflect.DeepEqual(exp, sections) { + t.Errorf("unexpected config section:\ngot:\n%v\nexp:\n%v", sections, exp) + } +} + +func Test_ConfigSection(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/kapacitor/v1/config/section" && r.Method == "GET" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "link": {"rel":"self", "href":"/kapacitor/v1/config/section"}, + "elements" : [ + { + "link": {"rel":"self", "href":"/kapacitor/v1/config/section/A"}, + "options": { + "name": "A", + "optionA": "o1", + "optionB": "o2", + "optionC": "o3", + "optionD": "o4" + } + }, + { + "link": {"rel":"self", "href":"/kapacitor/v1/config/section/B"}, + "options": { + "name": "B", + "optionA": "o5", + "optionB": "o6", + "optionC": "o7", + "optionD": "o8" + } + } + ] +}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + section, err := c.ConfigSection(c.ConfigSectionLink("section")) + if err != nil { + t.Fatal(err) + } + exp := client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section/A"}, + Options: map[string]interface{}{ + "name": "A", + "optionA": "o1", + "optionB": "o2", + "optionC": "o3", + "optionD": "o4", + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section/B"}, + Options: map[string]interface{}{ + "name": "B", + "optionA": "o5", + "optionB": "o6", + "optionC": "o7", + "optionD": "o8", + }, + }, + }, + } + if !reflect.DeepEqual(exp, section) { + t.Errorf("unexpected config section:\ngot:\n%v\nexp:\n%v", section, exp) + } +} + func Test_LogLevel(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var opts client.LogLevelOptions diff --git a/cmd/kapacitord/run/command.go b/cmd/kapacitord/run/command.go index f0a9ca25f..72e8186a9 100644 --- a/cmd/kapacitord/run/command.go +++ b/cmd/kapacitord/run/command.go @@ -216,7 +216,6 @@ func (cmd *Command) ParseConfig(path string) (*server.Config, error) { if _, err := toml.DecodeFile(path, &config); err != nil { return nil, err } - config.PostInit() return config, nil } diff --git a/cmd/kapacitord/run/config_command.go b/cmd/kapacitord/run/config_command.go index 5627a5d08..786e8af71 100644 --- a/cmd/kapacitord/run/config_command.go +++ b/cmd/kapacitord/run/config_command.go @@ -109,7 +109,6 @@ func (cmd *PrintConfigCommand) parseConfig(path string) (*server.Config, error) if _, err := toml.DecodeFile(path, &config); err != nil { return nil, err } - config.PostInit() return config, nil } diff --git a/etc/kapacitor/kapacitor.conf b/etc/kapacitor/kapacitor.conf index 9e292f527..95572aa33 100644 --- a/etc/kapacitor/kapacitor.conf +++ b/etc/kapacitor/kapacitor.conf @@ -4,6 +4,11 @@ hostname = "localhost" # Directory for storing a small amount of metadata about the server. data_dir = "/var/lib/kapacitor" +# Do not apply configuration overrides during startup. +# Useful if the configuration overrides cause Kapacitor to fail startup. +# This option is intended as a safe guard and should not be needed in practice. +skip-config-overrides = false + [http] # HTTP API Server for Kapacitor # This server is always on, @@ -18,6 +23,10 @@ data_dir = "/var/lib/kapacitor" https-enabled = false https-certificate = "/etc/ssl/kapacitor.pem" +[config-override] + # Enable/Disable the service for overridding configuration via the HTTP API. + enabled = true + [logging] # Destination for logs # Can be a path to a file or 'STDOUT', 'STDERR'. diff --git a/influxdb/client.go b/influxdb/client.go index 3f541737c..fd8a79dbd 100644 --- a/influxdb/client.go +++ b/influxdb/client.go @@ -2,7 +2,7 @@ package influxdb import ( "bytes" - "crypto/tls" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,25 +10,31 @@ import ( "net/url" "strconv" "sync" + "sync/atomic" "time" imodels "github.com/influxdata/influxdb/models" "github.com/pkg/errors" ) -// Client is a HTTPClient interface for writing & querying the database +// Client is an interface for writing to and querying from an InfluxDB instance. type Client interface { // Ping checks that status of cluster - Ping(timeout time.Duration) (time.Duration, string, error) + // The provided context can be used to cancel the request. + Ping(ctx context.Context) (time.Duration, string, error) // Write takes a BatchPoints object and writes all Points to InfluxDB. Write(bp BatchPoints) error // Query makes an InfluxDB Query on the database. + // The response is checked for an error and the is returned + // if it exists Query(q Query) (*Response, error) +} - // Close releases any resources a Client may be using. - Close() error +type ClientUpdater interface { + Client + Update(new Config) error } // BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct @@ -54,26 +60,22 @@ type Query struct { } // HTTPConfig is the config data needed to create an HTTP Client -type HTTPConfig struct { +type Config struct { // The URL of the InfluxDB server. - URL string + URLs []string // Optional credentials for authenticating with the server. - Credentials *Credentials + Credentials Credentials // UserAgent is the http User Agent, defaults to "KapacitorInfluxDBClient" UserAgent string - // Timeout for influxdb writes, defaults to no timeout + // Timeout for requests, defaults to no timeout. Timeout time.Duration - // InsecureSkipVerify gets passed to the http HTTPClient, if true, it will - // skip https certificate verification. Defaults to false - InsecureSkipVerify bool - - // TLSConfig allows the user to set their own TLS config for the HTTP - // Client. If set, this option overrides InsecureSkipVerify. - TLSConfig *tls.Config + // Transport is the HTTP transport to use for requests + // If nil, a default transport will be used. + Transport *http.Transport } // AuthenticationMethod defines the type of authentication used. @@ -81,7 +83,7 @@ type AuthenticationMethod int // Supported authentication methods. const ( - _ AuthenticationMethod = iota + NoAuthentication AuthenticationMethod = iota UserAuthentication BearerAuthentication ) @@ -100,81 +102,136 @@ type Credentials struct { Token string } -// HTTPClient is safe for concurrent use as the fields are all read-only -// once the HTTPClient is instantiated. +// HTTPClient is safe for concurrent use. type HTTPClient struct { - // N.B - if url.UserInfo is accessed in future modifications to the - // methods on HTTPClient, you will need to syncronise access to url. - url url.URL - userAgent string - credMu sync.RWMutex - credentials *Credentials - httpClient *http.Client - transport *http.Transport + mu sync.RWMutex + config Config + urls []url.URL + client *http.Client + index int32 } // NewHTTPClient returns a new Client from the provided config. // Client is safe for concurrent use by multiple goroutines. -func NewHTTPClient(conf HTTPConfig) (*HTTPClient, error) { +func NewHTTPClient(conf Config) (*HTTPClient, error) { if conf.UserAgent == "" { conf.UserAgent = "KapacitorInfluxDBClient" } - - u, err := url.Parse(conf.URL) + urls, err := parseURLs(conf.URLs) if err != nil { - return nil, err - } else if u.Scheme != "http" && u.Scheme != "https" { - m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+ - " must start with http:// or https://", u.Scheme) - return nil, errors.New(m) + return nil, errors.Wrap(err, "invalid URLs") } - - tr := &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: conf.InsecureSkipVerify, + if conf.Transport == nil { + conf.Transport = &http.Transport{} + } + c := &HTTPClient{ + config: conf, + urls: urls, + client: &http.Client{ + Timeout: conf.Timeout, + Transport: conf.Transport, }, } - if conf.TLSConfig != nil { - tr.TLSClientConfig = conf.TLSConfig + return c, nil +} + +func parseURLs(urlStrs []string) ([]url.URL, error) { + urls := make([]url.URL, len(urlStrs)) + for i, urlStr := range urlStrs { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } else if u.Scheme != "http" && u.Scheme != "https" { + return nil, fmt.Errorf( + "Unsupported protocol scheme: %s, your address must start with http:// or https://", + u.Scheme, + ) + } + urls[i] = *u + } + return urls, nil +} + +func (c *HTTPClient) loadConfig() Config { + c.mu.RLock() + config := c.config + c.mu.RUnlock() + return config +} + +func (c *HTTPClient) loadURLs() []url.URL { + c.mu.RLock() + urls := c.urls + c.mu.RUnlock() + return urls +} + +func (c *HTTPClient) loadHTTPClient() *http.Client { + c.mu.RLock() + client := c.client + c.mu.RUnlock() + return client +} + +// UpdateURLs updates the running list of URLs. +func (c *HTTPClient) Update(new Config) error { + if new.UserAgent == "" { + new.UserAgent = "KapacitorInfluxDBClient" } - return &HTTPClient{ - url: *u, - userAgent: conf.UserAgent, - credentials: conf.Credentials, - httpClient: &http.Client{ - Timeout: conf.Timeout, + c.mu.Lock() + defer c.mu.Unlock() + old := c.config + c.config = new + // Replace urls + urls, err := parseURLs(new.URLs) + if err != nil { + return err + } + c.urls = urls + if old.Timeout != new.Timeout || old.Transport != new.Transport { + //Replace the client + tr := new.Transport + if tr == nil { + tr = old.Transport + } + c.client = &http.Client{ + Timeout: new.Timeout, Transport: tr, - }, - transport: tr, - }, nil -} - -func (c *HTTPClient) setAuth(req *http.Request) error { - if c.credentials != nil { - // Get read lock on credentials - c.credMu.RLock() - defer c.credMu.RUnlock() - - switch c.credentials.Method { - case UserAuthentication: - req.SetBasicAuth(c.credentials.Username, c.credentials.Password) - case BearerAuthentication: - req.Header.Set("Authorization", "Bearer "+c.credentials.Token) - default: - return errors.New("unknown authentication method set") } } return nil } +func (c *HTTPClient) url() url.URL { + urls := c.loadURLs() + i := atomic.LoadInt32(&c.index) + i = (i + 1) % int32(len(urls)) + atomic.StoreInt32(&c.index, i) + return urls[i] +} + func (c *HTTPClient) do(req *http.Request, result interface{}, codes ...int) (*http.Response, error) { - req.Header.Set("User-Agent", c.userAgent) - err := c.setAuth(req) - if err != nil { - return nil, err + // Get current config + config := c.loadConfig() + // Set auth credentials + cred := config.Credentials + switch cred.Method { + case NoAuthentication: + case UserAuthentication: + req.SetBasicAuth(cred.Username, cred.Password) + case BearerAuthentication: + req.Header.Set("Authorization", "Bearer "+cred.Token) + default: + return nil, errors.New("unknown authentication method set") } + // Set user agent + req.Header.Set("User-Agent", config.UserAgent) - resp, err := c.httpClient.Do(req) + // Get client + client := c.loadHTTPClient() + + // Do request + resp, err := client.Do(req) if err != nil { return nil, err } @@ -215,20 +272,25 @@ func (c *HTTPClient) do(req *http.Request, result interface{}, codes ...int) (*h // Ping will check to see if the server is up with an optional timeout on waiting for leader. // Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. -func (c *HTTPClient) Ping(timeout time.Duration) (time.Duration, string, error) { +func (c *HTTPClient) Ping(ctx context.Context) (time.Duration, string, error) { now := time.Now() - u := c.url + u := c.url() u.Path = "ping" - if timeout > 0 { - v := url.Values{} - v.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds())) - u.RawQuery = v.Encode() + if ctx != nil { + if dl, ok := ctx.Deadline(); ok { + v := url.Values{} + v.Set("wait_for_leader", fmt.Sprintf("%.0fs", time.Now().Sub(dl).Seconds())) + u.RawQuery = v.Encode() + } } req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return 0, "", err } + if ctx != nil { + req = req.WithContext(ctx) + } resp, err := c.do(req, nil, http.StatusNoContent) if err != nil { return 0, "", err @@ -250,7 +312,7 @@ func (c *HTTPClient) Write(bp BatchPoints) error { } } - u := c.url + u := c.url() u.Path = "write" v := url.Values{} v.Set("db", bp.Database()) @@ -303,7 +365,7 @@ type Result struct { // Query sends a command to the server and returns the Response func (c *HTTPClient) Query(q Query) (*Response, error) { - u := c.url + u := c.url() u.Path = "query" v := url.Values{} v.Set("q", q.Command) @@ -323,22 +385,10 @@ func (c *HTTPClient) Query(q Query) (*Response, error) { if err != nil { return nil, err } - - return response, nil -} - -// Close releases the HTTPClient's resources. -func (c *HTTPClient) Close() error { - c.transport.CloseIdleConnections() - return nil -} - -func (c *HTTPClient) SetToken(token string) { - if c.credentials != nil { - c.credMu.Lock() - c.credentials.Token = token - c.credMu.Unlock() + if err := response.Error(); err != nil { + return nil, err } + return response, nil } // BatchPoints is an interface into a batched grouping of points to write into @@ -483,6 +533,6 @@ func (p Point) Bytes(precision string) []byte { // Simple type to create github.com/influxdata/kapacitor/influxdb clients. type ClientCreator struct{} -func (ClientCreator) Create(config HTTPConfig) (Client, error) { +func (ClientCreator) Create(config Config) (ClientUpdater, error) { return NewHTTPClient(config) } diff --git a/influxdb/client_test.go b/influxdb/client_test.go index 6283c0b28..aac711d82 100644 --- a/influxdb/client_test.go +++ b/influxdb/client_test.go @@ -7,7 +7,6 @@ import ( "strings" "sync" "testing" - "time" ) func TestClient_Query(t *testing.T) { @@ -18,9 +17,8 @@ func TestClient_Query(t *testing.T) { })) defer ts.Close() - config := HTTPConfig{URL: ts.URL} + config := Config{URLs: []string{ts.URL}} c, _ := NewHTTPClient(config) - defer c.Close() query := Query{} _, err := c.Query(query) @@ -48,9 +46,8 @@ func TestClient_BasicAuth(t *testing.T) { })) defer ts.Close() - config := HTTPConfig{URL: ts.URL, Credentials: &Credentials{Method: UserAuthentication, Username: "username", Password: "password"}} + config := Config{URLs: []string{ts.URL}, Credentials: Credentials{Method: UserAuthentication, Username: "username", Password: "password"}} c, _ := NewHTTPClient(config) - defer c.Close() query := Query{} _, err := c.Query(query) @@ -67,11 +64,41 @@ func TestClient_Ping(t *testing.T) { })) defer ts.Close() - config := HTTPConfig{URL: ts.URL} + config := Config{URLs: []string{ts.URL}} c, _ := NewHTTPClient(config) - defer c.Close() - _, _, err := c.Ping(0) + _, _, err := c.Ping(nil) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Update(t *testing.T) { + ts0 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts0.Close() + + config := Config{URLs: []string{ts0.URL}} + c, _ := NewHTTPClient(config) + + _, _, err := c.Ping(nil) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + ts1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts1.Close() + config.URLs = []string{ts1.URL} + c.Update(config) + + _, _, err = c.Ping(nil) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } @@ -84,9 +111,8 @@ func TestClient_Concurrent_Use(t *testing.T) { })) defer ts.Close() - config := HTTPConfig{URL: ts.URL} + config := Config{URLs: []string{ts.URL}} c, _ := NewHTTPClient(config) - defer c.Close() var wg sync.WaitGroup wg.Add(3) @@ -119,7 +145,7 @@ func TestClient_Concurrent_Use(t *testing.T) { go func() { defer wg.Done() for i := 0; i < n; i++ { - c.Ping(time.Second) + c.Ping(nil) } }() wg.Wait() @@ -133,9 +159,8 @@ func TestClient_Write(t *testing.T) { })) defer ts.Close() - config := HTTPConfig{URL: ts.URL} + config := Config{URLs: []string{ts.URL}} c, _ := NewHTTPClient(config) - defer c.Close() bp, err := NewBatchPoints(BatchPointsConfig{}) if err != nil { @@ -179,9 +204,8 @@ func TestClient_UserAgent(t *testing.T) { for _, test := range tests { var err error - config := HTTPConfig{URL: ts.URL, UserAgent: test.userAgent} + config := Config{URLs: []string{ts.URL}, UserAgent: test.userAgent} c, _ := NewHTTPClient(config) - defer c.Close() receivedUserAgent = "" code = http.StatusOK @@ -207,7 +231,7 @@ func TestClient_UserAgent(t *testing.T) { receivedUserAgent = "" code = http.StatusNoContent - _, _, err = c.Ping(0) + _, _, err = c.Ping(nil) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } diff --git a/influxdb_out.go b/influxdb_out.go index 8ae8f0f6d..fb9890aee 100644 --- a/influxdb_out.go +++ b/influxdb_out.go @@ -2,7 +2,6 @@ package kapacitor import ( "bytes" - "errors" "log" "sync" "time" @@ -12,6 +11,7 @@ import ( "github.com/influxdata/kapacitor/influxdb" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" + "github.com/pkg/errors" ) const ( @@ -32,10 +32,14 @@ func newInfluxDBOutNode(et *ExecutingTask, n *pipeline.InfluxDBOutNode, l *log.L if et.tm.InfluxDBService == nil { return nil, errors.New("no InfluxDB cluster configured cannot use the InfluxDBOutNode") } + cli, err := et.tm.InfluxDBService.NewNamedClient(n.Cluster) + if err != nil { + return nil, errors.Wrap(err, "failed to get InfluxDB client") + } in := &InfluxDBOutNode{ node: node{Node: n, et: et, logger: l}, i: n, - wb: newWriteBuffer(int(n.Buffer), n.FlushInterval), + wb: newWriteBuffer(int(n.Buffer), n.FlushInterval, cli), } in.node.runF = in.runOut in.node.stopF = in.stopOut @@ -55,16 +59,11 @@ func (i *InfluxDBOutNode) runOut([]byte) error { // Create the database and retention policy if i.i.CreateFlag { - var err error - var conn influxdb.Client - if i.i.Cluster != "" { - conn, err = i.et.tm.InfluxDBService.NewNamedClient(i.i.Cluster) - } else { - conn, err = i.et.tm.InfluxDBService.NewDefaultClient() - } - if err != nil { - i.logger.Printf("E! failed to connect to InfluxDB cluster %q to create database", i.i.Cluster) - } else { + err := func() error { + cli, err := i.et.tm.InfluxDBService.NewNamedClient(i.i.Cluster) + if err != nil { + return err + } var createDb bytes.Buffer createDb.WriteString("CREATE DATABASE ") createDb.WriteString(influxql.QuoteIdent(i.i.Database)) @@ -72,13 +71,14 @@ func (i *InfluxDBOutNode) runOut([]byte) error { createDb.WriteString(" WITH NAME ") createDb.WriteString(influxql.QuoteIdent(i.i.RetentionPolicy)) } - resp, err := conn.Query(influxdb.Query{Command: createDb.String()}) + _, err = cli.Query(influxdb.Query{Command: createDb.String()}) if err != nil { - i.logger.Printf("E! failed to create database %q on cluster %q: %v", i.i.Database, i.i.Cluster, err) - } else if resp.Err != "" { - i.logger.Printf("E! failed to create database %q on cluster %q: %s", i.i.Database, i.i.Cluster, resp.Err) + return err } - conn.Close() + return nil + }() + if err != nil { + i.logger.Printf("E! failed to create database %q on cluster %q: %v", i.i.Database, i.i.Cluster, err) } } @@ -172,7 +172,7 @@ type writeBuffer struct { stopping chan struct{} wg sync.WaitGroup - conn influxdb.Client + cli influxdb.Client i *InfluxDBOutNode } @@ -182,8 +182,9 @@ type queueEntry struct { points []influxdb.Point } -func newWriteBuffer(size int, flushInterval time.Duration) *writeBuffer { +func newWriteBuffer(size int, flushInterval time.Duration, cli influxdb.Client) *writeBuffer { return &writeBuffer{ + cli: cli, size: size, flushInterval: flushInterval, flushing: make(chan struct{}), @@ -271,19 +272,7 @@ func (w *writeBuffer) writeAll() { } func (w *writeBuffer) write(bp influxdb.BatchPoints) error { - var err error - if w.conn == nil { - if w.i.i.Cluster != "" { - w.conn, err = w.i.et.tm.InfluxDBService.NewNamedClient(w.i.i.Cluster) - } else { - w.conn, err = w.i.et.tm.InfluxDBService.NewDefaultClient() - } - if err != nil { - w.i.writeErrors.Add(1) - return err - } - } - err = w.conn.Write(bp) + err := w.cli.Write(bp) if err != nil { w.i.writeErrors.Add(1) return err diff --git a/integrations/benchmark_test.go b/integrations/benchmark_test.go index aebf57a99..d02858da2 100644 --- a/integrations/benchmark_test.go +++ b/integrations/benchmark_test.go @@ -26,6 +26,7 @@ import ( "github.com/influxdata/kapacitor" "github.com/influxdata/kapacitor/services/httpd" + "github.com/influxdata/kapacitor/services/logging/loggingtest" "github.com/influxdata/kapacitor/services/noauth" ) @@ -196,7 +197,7 @@ func Bench(b *testing.B, tasksCount, pointCount, expectedProcessedCount int, tic for i := 0; i < b.N; i++ { // Do not time setup b.StopTimer() - tm := kapacitor.NewTaskMaster("bench", &LogService{}) + tm := kapacitor.NewTaskMaster("bench", loggingtest.New()) tm.HTTPDService = httpdService tm.UDFService = nil tm.TaskStore = taskStore{} diff --git a/integrations/helpers_test.go b/integrations/helpers_test.go index 1921c7b86..bbae3038b 100644 --- a/integrations/helpers_test.go +++ b/integrations/helpers_test.go @@ -3,11 +3,9 @@ package integrations import ( "errors" "fmt" - "io" "log" "net/http" "net/http/httptest" - "os" "reflect" "time" @@ -15,9 +13,7 @@ import ( "github.com/influxdata/kapacitor" "github.com/influxdata/kapacitor/influxdb" k8s "github.com/influxdata/kapacitor/services/k8s/client" - "github.com/influxdata/kapacitor/services/logging" "github.com/influxdata/kapacitor/udf" - "github.com/influxdata/wlog" ) type MockInfluxDBService struct { @@ -31,13 +27,10 @@ func NewMockInfluxDBService(h http.Handler) *MockInfluxDBService { } } -func (m *MockInfluxDBService) NewDefaultClient() (influxdb.Client, error) { - return influxdb.NewHTTPClient(influxdb.HTTPConfig{ - URL: m.ts.URL, - }) -} func (m *MockInfluxDBService) NewNamedClient(name string) (influxdb.Client, error) { - return m.NewDefaultClient() + return influxdb.NewHTTPClient(influxdb.Config{ + URLs: []string{m.ts.URL}, + }) } func compareResultsMetainfo(exp, got kapacitor.Result) (bool, string) { @@ -125,23 +118,6 @@ func compareAlertData(exp, got kapacitor.AlertData) (bool, string) { return compareResults(expData, gotData) } -type LogService struct{} - -func (l *LogService) NewLogger(prefix string, flag int) *log.Logger { - return wlog.New(os.Stderr, prefix, flag) -} -func (l *LogService) NewRawLogger(prefix string, flag int) *log.Logger { - return log.New(os.Stderr, prefix, flag) -} - -func (l *LogService) NewStaticLevelLogger(prefix string, flag int, level logging.Level) *log.Logger { - return log.New(wlog.NewStaticLevelWriter(os.Stderr, wlog.Level(level)), prefix, flag) -} - -func (l *LogService) NewStaticLevelWriter(level logging.Level) io.Writer { - return wlog.NewStaticLevelWriter(os.Stderr, wlog.Level(level)) -} - type UDFService struct { ListFunc func() []string InfoFunc func(name string) (udf.Info, bool) @@ -186,16 +162,27 @@ type k8sAutoscale struct { ScalesGetFunc func(kind, name string) (*k8s.Scale, error) ScalesUpdateFunc func(kind string, scale *k8s.Scale) error } +type k8sScales struct { + ScalesGetFunc func(kind, name string) (*k8s.Scale, error) + ScalesUpdateFunc func(kind string, scale *k8s.Scale) error +} -func (k k8sAutoscale) Client() k8s.Client { - return k +func (k k8sAutoscale) Client() (k8s.Client, error) { + return k, nil } func (k k8sAutoscale) Scales(namespace string) k8s.ScalesInterface { - return k + return k8sScales{ + ScalesGetFunc: k.ScalesGetFunc, + ScalesUpdateFunc: k.ScalesUpdateFunc, + } } -func (k k8sAutoscale) Get(kind, name string) (*k8s.Scale, error) { +func (k k8sAutoscale) Update(c k8s.Config) error { + return nil +} + +func (k k8sScales) Get(kind, name string) (*k8s.Scale, error) { return k.ScalesGetFunc(kind, name) } -func (k k8sAutoscale) Update(kind string, scale *k8s.Scale) error { +func (k k8sScales) Update(kind string, scale *k8s.Scale) error { return k.ScalesUpdateFunc(kind, scale) } diff --git a/integrations/streamer_test.go b/integrations/streamer_test.go index e32ac141a..c2cf613fc 100644 --- a/integrations/streamer_test.go +++ b/integrations/streamer_test.go @@ -28,6 +28,7 @@ import ( "github.com/influxdata/kapacitor/services/hipchat" "github.com/influxdata/kapacitor/services/httpd" k8s "github.com/influxdata/kapacitor/services/k8s/client" + "github.com/influxdata/kapacitor/services/logging/loggingtest" "github.com/influxdata/kapacitor/services/opsgenie" "github.com/influxdata/kapacitor/services/pagerduty" "github.com/influxdata/kapacitor/services/sensu" @@ -41,7 +42,7 @@ import ( ) var httpService *httpd.Service -var logService = &LogService{} +var logService = loggingtest.New() var dbrps = []kapacitor.DBRP{ { @@ -5747,6 +5748,7 @@ stream defer tm.Close() c := sensu.NewConfig() + c.Enabled = true c.Addr = listen.Addr().String() c.Source = "Kapacitor" sl := sensu.NewService(c, logService.NewLogger("[test_sensu] ", log.LstdFlags)) @@ -5839,6 +5841,7 @@ stream defer tm.Close() c := slack.NewConfig() + c.Enabled = true c.URL = ts.URL + "/test/slack/url" c.Channel = "#channel" sl := slack.NewService(c, logService.NewLogger("[test_slack] ", log.LstdFlags)) @@ -5935,6 +5938,7 @@ stream defer tm.Close() c := telegram.NewConfig() + c.Enabled = true c.URL = ts.URL + "/bot" c.Token = "TOKEN:AUTH" c.ChatId = "123456789" @@ -6018,6 +6022,7 @@ stream defer tm.Close() c := hipchat.NewConfig() + c.Enabled = true c.URL = ts.URL c.Room = "1231234" c.Token = "testtoken1231234" @@ -6143,6 +6148,7 @@ stream defer tm.Close() c := alerta.NewConfig() + c.Enabled = true c.URL = ts.URL c.Origin = "Kapacitor" sl := alerta.NewService(c, logService.NewLogger("[test_alerta] ", log.LstdFlags)) @@ -6262,6 +6268,7 @@ stream clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil) defer tm.Close() c := opsgenie.NewConfig() + c.Enabled = true c.URL = ts.URL c.APIKey = "api_key" og := opsgenie.NewService(c, logService.NewLogger("[test_og] ", log.LstdFlags)) @@ -6343,6 +6350,7 @@ stream clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil) defer tm.Close() c := pagerduty.NewConfig() + c.Enabled = true c.URL = ts.URL c.ServiceKey = "service_key" pd := pagerduty.NewService(c, logService.NewLogger("[test_pd] ", log.LstdFlags)) @@ -6428,6 +6436,7 @@ stream clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil) defer tm.Close() c := victorops.NewConfig() + c.Enabled = true c.URL = ts.URL c.APIKey = "api_key" c.RoutingKey = "routing_key" @@ -6439,8 +6448,8 @@ stream t.Error(err) } - if rc := atomic.LoadInt32(&requestCount); rc != 2 { - t.Errorf("unexpected requestCount got %d exp 1", rc) + if got, exp := atomic.LoadInt32(&requestCount), int32(2); got != exp { + t.Errorf("unexpected requestCount got %d exp %d", got, exp) } } @@ -6494,6 +6503,7 @@ stream defer tm.Close() c := talk.NewConfig() + c.Enabled = true c.URL = ts.URL c.AuthorName = "Kapacitor" sl := talk.NewService(c, logService.NewLogger("[test_talk] ", log.LstdFlags)) diff --git a/k8s_autoscale.go b/k8s_autoscale.go index 88fc4c1a6..199ee69e6 100644 --- a/k8s_autoscale.go +++ b/k8s_autoscale.go @@ -43,17 +43,17 @@ type K8sAutoscaleNode struct { // Create a new K8sAutoscaleNode which can trigger autoscale event for a Kubernetes cluster. func newK8sAutoscaleNode(et *ExecutingTask, n *pipeline.K8sAutoscaleNode, l *log.Logger) (*K8sAutoscaleNode, error) { - if et.tm.K8sService == nil { - return nil, errors.New("cannot use the k8sAutoscale node, the kubernetes service is not enabled") + client, err := et.tm.K8sService.Client() + if err != nil { + return nil, fmt.Errorf("cannot use the k8sAutoscale node, could not create kubernetes client: %v", err) } - kn := &K8sAutoscaleNode{ node: node{Node: n, et: et, logger: l}, k: n, resourceStates: make(map[string]resourceState), min: int(n.Min), max: int(n.Max), - client: et.tm.K8sService.Client(), + client: client, } if kn.min < 1 { return nil, fmt.Errorf("minimum count must be >= 1, got %d", kn.min) diff --git a/pipeline/batch.go b/pipeline/batch.go index e10685082..2b8fc0586 100644 --- a/pipeline/batch.go +++ b/pipeline/batch.go @@ -23,7 +23,6 @@ import ( // Available Statistics: // // * query_errors -- number of errors when querying -// * connect_errors -- number of errors connecting to InfluxDB // * batches_queried -- number of batches returned from queries // * points_queried -- total number of points in batches // diff --git a/server/config.go b/server/config.go index 9d391b25f..ed575f724 100644 --- a/server/config.go +++ b/server/config.go @@ -12,6 +12,7 @@ import ( "time" "github.com/influxdata/kapacitor/services/alerta" + "github.com/influxdata/kapacitor/services/config" "github.com/influxdata/kapacitor/services/deadman" "github.com/influxdata/kapacitor/services/hipchat" "github.com/influxdata/kapacitor/services/httpd" @@ -41,38 +42,43 @@ import ( // Config represents the configuration format for the kapacitord binary. type Config struct { - HTTP httpd.Config `toml:"http"` - Replay replay.Config `toml:"replay"` - Storage storage.Config `toml:"storage"` - Task task_store.Config `toml:"task"` - InfluxDB []influxdb.Config `toml:"influxdb"` - Logging logging.Config `toml:"logging"` - Kubernetes k8s.Config `toml:"kubernetes"` - + HTTP httpd.Config `toml:"http"` + Replay replay.Config `toml:"replay"` + Storage storage.Config `toml:"storage"` + Task task_store.Config `toml:"task"` + InfluxDB []influxdb.Config `toml:"influxdb" override:"influxdb,element-key=name"` + Logging logging.Config `toml:"logging"` + ConfigOverride config.Config `toml:"config-override"` + + // Input services Graphites []graphite.Config `toml:"graphite"` Collectd collectd.Config `toml:"collectd"` OpenTSDB opentsdb.Config `toml:"opentsdb"` UDPs []udp.Config `toml:"udp"` - SMTP smtp.Config `toml:"smtp"` - OpsGenie opsgenie.Config `toml:"opsgenie"` - VictorOps victorops.Config `toml:"victorops"` - PagerDuty pagerduty.Config `toml:"pagerduty"` - Sensu sensu.Config `toml:"sensu"` - Slack slack.Config `toml:"slack"` - Telegram telegram.Config `toml:"telegram"` - HipChat hipchat.Config `toml:"hipchat"` - Alerta alerta.Config `toml:"alerta"` - Reporting reporting.Config `toml:"reporting"` - Stats stats.Config `toml:"stats"` - UDF udf.Config `toml:"udf"` - Deadman deadman.Config `toml:"deadman"` - Talk talk.Config `toml:"talk"` - - Hostname string `toml:"hostname"` - DataDir string `toml:"data_dir"` - - // The index of the default InfluxDB config - defaultInfluxDB int + + // Alert handlers + Alerta alerta.Config `toml:"alerta" override:"alerta"` + HipChat hipchat.Config `toml:"hipchat" override:"hipchat"` + OpsGenie opsgenie.Config `toml:"opsgenie" override:"opsgenie"` + PagerDuty pagerduty.Config `toml:"pagerduty" override:"pagerduty"` + SMTP smtp.Config `toml:"smtp" override:"smtp"` + Sensu sensu.Config `toml:"sensu" override:"sensu"` + Slack slack.Config `toml:"slack" override:"slack"` + Talk talk.Config `toml:"talk" override:"talk"` + Telegram telegram.Config `toml:"telegram" override:"telegram"` + VictorOps victorops.Config `toml:"victorops" override:"victorops"` + + // Third-party integrations + Kubernetes k8s.Config `toml:"kubernetes" override:"kubernetes"` + + Reporting reporting.Config `toml:"reporting"` + Stats stats.Config `toml:"stats"` + UDF udf.Config `toml:"udf"` + Deadman deadman.Config `toml:"deadman"` + + Hostname string `toml:"hostname"` + DataDir string `toml:"data_dir"` + SkipConfigOverrides bool `toml:"skip-config-overrides"` } // NewConfig returns an instance of Config with reasonable defaults. @@ -85,53 +91,36 @@ func NewConfig() *Config { c.Storage = storage.NewConfig() c.Replay = replay.NewConfig() c.Task = task_store.NewConfig() + c.InfluxDB = []influxdb.Config{influxdb.NewConfig()} c.Logging = logging.NewConfig() c.Kubernetes = k8s.NewConfig() + c.ConfigOverride = config.NewConfig() c.Collectd = collectd.NewConfig() c.OpenTSDB = opentsdb.NewConfig() - c.SMTP = smtp.NewConfig() + + c.Alerta = alerta.NewConfig() + c.HipChat = hipchat.NewConfig() c.OpsGenie = opsgenie.NewConfig() - c.VictorOps = victorops.NewConfig() c.PagerDuty = pagerduty.NewConfig() + c.SMTP = smtp.NewConfig() c.Sensu = sensu.NewConfig() c.Slack = slack.NewConfig() + c.Talk = talk.NewConfig() c.Telegram = telegram.NewConfig() - c.HipChat = hipchat.NewConfig() - c.Alerta = alerta.NewConfig() + c.VictorOps = victorops.NewConfig() + c.Reporting = reporting.NewConfig() c.Stats = stats.NewConfig() c.UDF = udf.NewConfig() c.Deadman = deadman.NewConfig() - c.Talk = talk.NewConfig() return c } -// Once the config has been created and decoded, you can call this method -// to initialize ARRAY attributes. -// All ARRAY attributes have to be init after toml decode -// See: https://github.com/BurntSushi/toml/pull/68 -func (c *Config) PostInit() { - if len(c.InfluxDB) == 0 { - i := influxdb.NewConfig() - c.InfluxDB = []influxdb.Config{i} - c.InfluxDB[0].Name = "default" - c.InfluxDB[0].URLs = []string{"http://localhost:8086"} - } else if len(c.InfluxDB) == 1 && c.InfluxDB[0].Name == "" { - c.InfluxDB[0].Name = "default" - } - // Set default Values - for i, influx := range c.InfluxDB { - influx.SetDefaultValues() - c.InfluxDB[i] = influx - } -} - // NewDemoConfig returns the config that runs when no config is specified. func NewDemoConfig() (*Config, error) { c := NewConfig() - c.PostInit() var homeDir string // By default, store meta and data files in current users home directory @@ -160,69 +149,88 @@ func (c *Config) Validate() error { if c.DataDir == "" { return fmt.Errorf("must configure valid data dir") } - err := c.Replay.Validate() - if err != nil { + if err := c.Replay.Validate(); err != nil { return err } - err = c.Storage.Validate() - if err != nil { + if err := c.Storage.Validate(); err != nil { return err } - err = c.HTTP.Validate() - if err != nil { + if err := c.HTTP.Validate(); err != nil { return err } - err = c.Task.Validate() - if err != nil { + if err := c.Task.Validate(); err != nil { return err } - c.defaultInfluxDB = -1 + // Validate the set of InfluxDB configs. + // All names should be unique. names := make(map[string]bool, len(c.InfluxDB)) - for i := 0; i < len(c.InfluxDB); i++ { - config := c.InfluxDB[i] - if !config.Enabled { - c.InfluxDB = append(c.InfluxDB[0:i], c.InfluxDB[i+1:]...) - i-- - continue - } + // Should be exactly one default if at least one configs is enabled. + defaultInfluxDB := -1 + numEnabled := 0 + for i, config := range c.InfluxDB { + config.ApplyConditionalDefaults() if names[config.Name] { return fmt.Errorf("duplicate name %q for influxdb configs", config.Name) } names[config.Name] = true - err = config.Validate() - if err != nil { + if err := config.Validate(); err != nil { return err } - if config.Default { - if c.defaultInfluxDB != -1 { - return fmt.Errorf("More than one InfluxDB default was specified: %s %s", config.Name, c.InfluxDB[c.defaultInfluxDB].Name) + if config.Enabled && config.Default { + if defaultInfluxDB != -1 { + return fmt.Errorf("More than one InfluxDB default was specified: %s %s", config.Name, c.InfluxDB[defaultInfluxDB].Name) } - c.defaultInfluxDB = i + defaultInfluxDB = i + } + if config.Enabled { + numEnabled++ } } - // Set default if it is the only one - if len(c.InfluxDB) == 1 { - c.defaultInfluxDB = 0 + if numEnabled > 1 && defaultInfluxDB == -1 { + return errors.New("at least one of the enabled InfluxDB clusters must be marked as default.") } - if len(c.InfluxDB) > 0 && c.defaultInfluxDB == -1 { - return errors.New("at least one InfluxDB cluster must be marked as default.") + + // Validate inputs + for _, g := range c.Graphites { + if err := g.Validate(); err != nil { + return fmt.Errorf("invalid graphite config: %v", err) + } } - err = c.UDF.Validate() - if err != nil { + + // Validate alert handlers + if err := c.Alerta.Validate(); err != nil { return err } - err = c.Sensu.Validate() - if err != nil { + if err := c.HipChat.Validate(); err != nil { return err } - err = c.Talk.Validate() - if err != nil { + if err := c.OpsGenie.Validate(); err != nil { return err } - for _, g := range c.Graphites { - if err := g.Validate(); err != nil { - return fmt.Errorf("invalid graphite config: %v", err) - } + if err := c.PagerDuty.Validate(); err != nil { + return err + } + if err := c.SMTP.Validate(); err != nil { + return err + } + if err := c.Sensu.Validate(); err != nil { + return err + } + if err := c.Slack.Validate(); err != nil { + return err + } + if err := c.Talk.Validate(); err != nil { + return err + } + if err := c.Telegram.Validate(); err != nil { + return err + } + if err := c.VictorOps.Validate(); err != nil { + return err + } + + if err := c.UDF.Validate(); err != nil { + return err } return nil } diff --git a/server/server.go b/server/server.go index f035f8cce..5128611f6 100644 --- a/server/server.go +++ b/server/server.go @@ -20,6 +20,7 @@ import ( "github.com/influxdata/kapacitor/auth" iclient "github.com/influxdata/kapacitor/influxdb" "github.com/influxdata/kapacitor/services/alerta" + "github.com/influxdata/kapacitor/services/config" "github.com/influxdata/kapacitor/services/deadman" "github.com/influxdata/kapacitor/services/hipchat" "github.com/influxdata/kapacitor/services/httpd" @@ -70,12 +71,13 @@ type Server struct { TaskMaster *kapacitor.TaskMaster TaskMasterLookup *kapacitor.TaskMasterLookup - AuthService auth.Interface - HTTPDService *httpd.Service - StorageService *storage.Service - TaskStore *task_store.Service - ReplayService *replay.Service - InfluxDBService *influxdb.Service + AuthService auth.Interface + HTTPDService *httpd.Service + StorageService *storage.Service + TaskStore *task_store.Service + ReplayService *replay.Service + InfluxDBService *influxdb.Service + ConfigOverrideService *config.Service MetaClient *kapacitor.NoopMetaClient QueryExecutor *Queryexecutor @@ -85,6 +87,11 @@ type Server struct { // Map of service name to index in Services list ServicesByName map[string]int + // Map of services capable of receiving dynamic configuration updates. + DynamicServices map[string]Updater + // Channel of incoming configuration updates. + configUpdates chan config.ConfigUpdate + BuildInfo BuildInfo ClusterID string ServerID string @@ -105,16 +112,18 @@ func New(c *Config, buildInfo BuildInfo, logService logging.Interface) (*Server, } l := logService.NewLogger("[srv] ", log.LstdFlags) s := &Server{ - config: c, - BuildInfo: buildInfo, - dataDir: c.DataDir, - hostname: c.Hostname, - err: make(chan error), - LogService: logService, - MetaClient: &kapacitor.NoopMetaClient{}, - QueryExecutor: &Queryexecutor{}, - Logger: l, - ServicesByName: make(map[string]int), + config: c, + BuildInfo: buildInfo, + dataDir: c.DataDir, + hostname: c.Hostname, + err: make(chan error), + configUpdates: make(chan config.ConfigUpdate, 100), + LogService: logService, + MetaClient: &kapacitor.NoopMetaClient{}, + QueryExecutor: &Queryexecutor{}, + Logger: l, + ServicesByName: make(map[string]int), + DynamicServices: make(map[string]Updater), } s.Logger.Println("I! Kapacitor hostname:", s.hostname) @@ -123,6 +132,7 @@ func New(c *Config, buildInfo BuildInfo, logService logging.Interface) (*Server, if err != nil { return nil, err } + // Set published vars kapacitor.ClusterIDVar.Set(s.ClusterID) kapacitor.ServerIDVar.Set(s.ServerID) @@ -140,34 +150,40 @@ func New(c *Config, buildInfo BuildInfo, logService logging.Interface) (*Server, } // Append Kapacitor services. - s.appendUDFService() - s.appendDeadmanService() - s.appendSMTPService() - s.InitHTTPDService() + s.initHTTPDService() s.appendStorageService() s.appendAuthService() + s.appendUDFService() + s.appendDeadmanService() + + // Append config override service before any dynamic services + s.appendConfigOverrideService() + if err := s.appendInfluxDBService(); err != nil { return nil, errors.Wrap(err, "influxdb service") } + // Append these after InfluxDB because they depend on it s.appendTaskStoreService() s.appendReplayService() - if err := s.appendK8sService(); err != nil { - return nil, errors.Wrap(err, "kubernetes service") - } - // Append Alert integration services + s.appendAlertaService() + s.appendHipChatService() s.appendOpsGenieService() - s.appendVictorOpsService() s.appendPagerDutyService() - s.appendTelegramService() - s.appendHipChatService() - s.appendAlertaService() - s.appendSlackService() + s.appendSMTPService() s.appendSensuService() + s.appendSlackService() s.appendTalkService() + s.appendTelegramService() + s.appendVictorOpsService() + + // Append third-party integrations + if err := s.appendK8sService(); err != nil { + return nil, errors.Wrap(err, "kubernetes service") + } - // Append InfluxDB input services + // Append extra input services s.appendCollectdService() s.appendUDPServices() if err := s.appendOpenTSDBService(); err != nil { @@ -202,40 +218,51 @@ func (s *Server) appendStorageService() { s.AppendService("storage", srv) } +func (s *Server) appendConfigOverrideService() { + l := s.LogService.NewLogger("[config-override] ", log.LstdFlags) + srv := config.NewService(s.config.ConfigOverride, s.config, l, s.configUpdates) + srv.HTTPDService = s.HTTPDService + srv.StorageService = s.StorageService + + s.ConfigOverrideService = srv + s.AppendService("config", srv) +} + func (s *Server) appendSMTPService() { c := s.config.SMTP - if c.Enabled { - l := s.LogService.NewLogger("[smtp] ", log.LstdFlags) - srv := smtp.NewService(c, l) + l := s.LogService.NewLogger("[smtp] ", log.LstdFlags) + srv := smtp.NewService(c, l) - s.TaskMaster.SMTPService = srv - s.AppendService("smtp", srv) - } + s.TaskMaster.SMTPService = srv + s.AppendService("smtp", srv) + s.DynamicServices["smtp"] = srv } func (s *Server) appendInfluxDBService() error { c := s.config.InfluxDB - if len(c) > 0 { - l := s.LogService.NewLogger("[influxdb] ", log.LstdFlags) - httpPort, err := s.config.HTTP.Port() - if err != nil { - return errors.Wrap(err, "failed to get http port") - } - srv := influxdb.NewService(c, s.config.defaultInfluxDB, httpPort, s.config.Hostname, s.config.HTTP.AuthEnabled, l) - srv.HTTPDService = s.HTTPDService - srv.PointsWriter = s.TaskMaster - srv.LogService = s.LogService - srv.AuthService = s.AuthService - srv.ClientCreator = iclient.ClientCreator{} - - s.InfluxDBService = srv - s.TaskMaster.InfluxDBService = srv - s.AppendService("influxdb", srv) + l := s.LogService.NewLogger("[influxdb] ", log.LstdFlags) + httpPort, err := s.config.HTTP.Port() + if err != nil { + return errors.Wrap(err, "failed to get http port") + } + srv, err := influxdb.NewService(c, httpPort, s.config.Hostname, s.config.HTTP.AuthEnabled, l) + if err != nil { + return err } + srv.HTTPDService = s.HTTPDService + srv.PointsWriter = s.TaskMaster + srv.LogService = s.LogService + srv.AuthService = s.AuthService + srv.ClientCreator = iclient.ClientCreator{} + + s.InfluxDBService = srv + s.TaskMaster.InfluxDBService = srv + s.AppendService("influxdb", srv) + s.DynamicServices["influxdb"] = srv return nil } -func (s *Server) InitHTTPDService() { +func (s *Server) initHTTPDService() { l := s.LogService.NewLogger("[httpd] ", log.LstdFlags) srv := httpd.NewService(s.config.HTTP, s.hostname, l, s.LogService) @@ -278,16 +305,15 @@ func (s *Server) appendReplayService() { func (s *Server) appendK8sService() error { c := s.config.Kubernetes - if c.Enabled { - l := s.LogService.NewLogger("[kubernetes] ", log.LstdFlags) - srv, err := k8s.NewService(c, l) - if err != nil { - return err - } - - s.TaskMaster.K8sService = srv - s.AppendService("kubernetes", srv) + l := s.LogService.NewLogger("[kubernetes] ", log.LstdFlags) + srv, err := k8s.NewService(c, l) + if err != nil { + return err } + + s.TaskMaster.K8sService = srv + s.DynamicServices["kubernetes"] = srv + s.AppendService("kubernetes", srv) return nil } @@ -318,102 +344,93 @@ func (s *Server) appendAuthService() { func (s *Server) appendOpsGenieService() { c := s.config.OpsGenie - if c.Enabled { - l := s.LogService.NewLogger("[opsgenie] ", log.LstdFlags) - srv := opsgenie.NewService(c, l) - s.TaskMaster.OpsGenieService = srv + l := s.LogService.NewLogger("[opsgenie] ", log.LstdFlags) + srv := opsgenie.NewService(c, l) + s.TaskMaster.OpsGenieService = srv - s.AppendService("opsgenie", srv) - } + s.AppendService("opsgenie", srv) + s.DynamicServices["opsgenie"] = srv } func (s *Server) appendVictorOpsService() { c := s.config.VictorOps - if c.Enabled { - l := s.LogService.NewLogger("[victorops] ", log.LstdFlags) - srv := victorops.NewService(c, l) - s.TaskMaster.VictorOpsService = srv + l := s.LogService.NewLogger("[victorops] ", log.LstdFlags) + srv := victorops.NewService(c, l) + s.TaskMaster.VictorOpsService = srv - s.AppendService("victorops", srv) - } + s.AppendService("victorops", srv) + s.DynamicServices["victorops"] = srv } func (s *Server) appendPagerDutyService() { c := s.config.PagerDuty - if c.Enabled { - l := s.LogService.NewLogger("[pagerduty] ", log.LstdFlags) - srv := pagerduty.NewService(c, l) - srv.HTTPDService = s.HTTPDService - s.TaskMaster.PagerDutyService = srv + l := s.LogService.NewLogger("[pagerduty] ", log.LstdFlags) + srv := pagerduty.NewService(c, l) + srv.HTTPDService = s.HTTPDService + s.TaskMaster.PagerDutyService = srv - s.AppendService("pagerduty", srv) - } + s.AppendService("pagerduty", srv) + s.DynamicServices["pagerduty"] = srv } func (s *Server) appendSensuService() { c := s.config.Sensu - if c.Enabled { - l := s.LogService.NewLogger("[sensu] ", log.LstdFlags) - srv := sensu.NewService(c, l) - s.TaskMaster.SensuService = srv + l := s.LogService.NewLogger("[sensu] ", log.LstdFlags) + srv := sensu.NewService(c, l) + s.TaskMaster.SensuService = srv - s.AppendService("sensu", srv) - } + s.AppendService("sensu", srv) + s.DynamicServices["sensu"] = srv } func (s *Server) appendSlackService() { c := s.config.Slack - if c.Enabled { - l := s.LogService.NewLogger("[slack] ", log.LstdFlags) - srv := slack.NewService(c, l) - s.TaskMaster.SlackService = srv + l := s.LogService.NewLogger("[slack] ", log.LstdFlags) + srv := slack.NewService(c, l) + s.TaskMaster.SlackService = srv - s.AppendService("slack", srv) - } + s.AppendService("slack", srv) + s.DynamicServices["slack"] = srv } func (s *Server) appendTelegramService() { c := s.config.Telegram - if c.Enabled { - l := s.LogService.NewLogger("[telegram] ", log.LstdFlags) - srv := telegram.NewService(c, l) - s.TaskMaster.TelegramService = srv + l := s.LogService.NewLogger("[telegram] ", log.LstdFlags) + srv := telegram.NewService(c, l) + s.TaskMaster.TelegramService = srv - s.AppendService("telegram", srv) - } + s.AppendService("telegram", srv) + s.DynamicServices["telegram"] = srv } func (s *Server) appendHipChatService() { c := s.config.HipChat - if c.Enabled { - l := s.LogService.NewLogger("[hipchat] ", log.LstdFlags) - srv := hipchat.NewService(c, l) - s.TaskMaster.HipChatService = srv + l := s.LogService.NewLogger("[hipchat] ", log.LstdFlags) + srv := hipchat.NewService(c, l) + s.TaskMaster.HipChatService = srv - s.AppendService("hipchat", srv) - } + s.AppendService("hipchat", srv) + s.DynamicServices["hipchat"] = srv } func (s *Server) appendAlertaService() { c := s.config.Alerta - if c.Enabled { - l := s.LogService.NewLogger("[alerta] ", log.LstdFlags) - srv := alerta.NewService(c, l) - s.TaskMaster.AlertaService = srv + l := s.LogService.NewLogger("[alerta] ", log.LstdFlags) + srv := alerta.NewService(c, l) + s.TaskMaster.AlertaService = srv - s.AppendService("alerta", srv) - } + s.AppendService("alerta", srv) + s.DynamicServices["alerta"] = srv } func (s *Server) appendTalkService() { c := s.config.Talk - if c.Enabled { - l := s.LogService.NewLogger("[talk] ", log.LstdFlags) - srv := talk.NewService(c, l) - s.TaskMaster.TalkService = srv + l := s.LogService.NewLogger("[talk] ", log.LstdFlags) + srv := talk.NewService(c, l) + s.TaskMaster.TalkService = srv - s.AppendService("talk", srv) - } + s.AppendService("talk", srv) + s.DynamicServices["talk"] = srv } func (s *Server) appendCollectdService() { @@ -506,25 +523,49 @@ func (s *Server) Err() <-chan error { return s.err } // Open opens all the services. func (s *Server) Open() error { + + // Start profiling, if set. + s.startProfile(s.CPUProfile, s.MemProfile) + if err := s.startServices(); err != nil { s.Close() return err } go s.watchServices() + go s.watchConfigUpdates() return nil } func (s *Server) startServices() error { - // Start profiling, if set. - s.startProfile(s.CPUProfile, s.MemProfile) for _, service := range s.Services { s.Logger.Printf("D! opening service: %T", service) if err := service.Open(); err != nil { return fmt.Errorf("open service %T: %s", service, err) } s.Logger.Printf("D! opened service: %T", service) + + // Apply config overrides after the config override service has been opened and before any dynamic services. + if service == s.ConfigOverrideService && !s.config.SkipConfigOverrides { + // Apply initial config updates + s.Logger.Println("D! applying configuration overrides") + configs, err := s.ConfigOverrideService.Config() + if err != nil { + return errors.Wrap(err, "failed to apply config overrides") + } + for service, config := range configs { + if srv, ok := s.DynamicServices[service]; !ok { + return fmt.Errorf("found configuration override for unknown service %q", service) + } else { + s.Logger.Println("D! applying configuration overrides for", service) + if err := srv.Update(config); err != nil { + return errors.Wrapf(err, "failed to update configuration for service %s", service) + } + } + } + } + } return nil } @@ -538,6 +579,16 @@ func (s *Server) watchServices() { s.err <- err } +func (s *Server) watchConfigUpdates() { + for cu := range s.configUpdates { + if srv, ok := s.DynamicServices[cu.Name]; !ok { + cu.ErrC <- fmt.Errorf("received configuration update for unknown dynamic service %s", cu.Name) + } else { + cu.ErrC <- srv.Update(cu.NewConfig) + } + } +} + // Close shuts down the meta and data stores and all services. func (s *Server) Close() error { s.stopProfile() @@ -629,6 +680,11 @@ type Service interface { Close() error } +// Updater represents a service that can have its configuration updated while running. +type Updater interface { + Update(c []interface{}) error +} + // prof stores the file locations of active profiles. var prof struct { cpu *os.File diff --git a/server/server_helper_test.go b/server/server_helper_test.go index e81224ee8..74b32553a 100644 --- a/server/server_helper_test.go +++ b/server/server_helper_test.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "io/ioutil" - "log" "net/http" "net/http/httptest" "net/url" @@ -19,7 +18,7 @@ import ( iclient "github.com/influxdata/influxdb/client/v2" "github.com/influxdata/kapacitor/client/v1" "github.com/influxdata/kapacitor/server" - "github.com/influxdata/kapacitor/services/logging" + "github.com/influxdata/kapacitor/services/logging/loggingtest" "github.com/influxdata/wlog" ) @@ -38,7 +37,7 @@ func NewServer(c *server.Config) *Server { Branch: "testBranch", } c.HTTP.LogEnabled = testing.Verbose() - ls := &LogService{} + ls := loggingtest.New() srv, err := server.New(c, buildInfo, ls) if err != nil { panic(err) @@ -82,7 +81,7 @@ func Client(s *Server) *client.Client { func (s *Server) Close() { s.Server.Close() os.RemoveAll(s.Config.Replay.Dir) - os.Remove(s.Config.Storage.BoltDBPath) + os.RemoveAll(filepath.Dir(s.Config.Storage.BoltDBPath)) os.RemoveAll(s.Config.DataDir) } @@ -175,7 +174,6 @@ func (s *Server) Stats() (stats, error) { // NewConfig returns the default config with temporary paths. func NewConfig() *server.Config { c := server.NewConfig() - c.PostInit() c.Reporting.Enabled = false c.Replay.Dir = MustTempDir() c.Storage.BoltDBPath = filepath.Join(MustTempDir(), "bolt.db") @@ -203,23 +201,6 @@ func configureLogging() { } } -type LogService struct{} - -func (l *LogService) NewLogger(prefix string, flag int) *log.Logger { - return wlog.New(os.Stderr, prefix, flag) -} -func (l *LogService) NewRawLogger(prefix string, flag int) *log.Logger { - return log.New(os.Stderr, prefix, flag) -} - -func (l *LogService) NewStaticLevelLogger(prefix string, flag int, level logging.Level) *log.Logger { - return log.New(wlog.NewStaticLevelWriter(os.Stderr, wlog.Level(level)), prefix, flag) -} - -func (l *LogService) NewStaticLevelWriter(level logging.Level) io.Writer { - return wlog.NewStaticLevelWriter(os.Stderr, wlog.Level(level)) -} - type queryFunc func(q string) *iclient.Response type InfluxDB struct { diff --git a/server/server_test.go b/server/server_test.go index a28bf66c9..c7df5f75c 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -25,7 +25,12 @@ import ( "github.com/influxdata/influxdb/toml" "github.com/influxdata/kapacitor/client/v1" "github.com/influxdata/kapacitor/server" + "github.com/influxdata/kapacitor/services/opsgenie" + "github.com/influxdata/kapacitor/services/pagerduty" + "github.com/influxdata/kapacitor/services/telegram" "github.com/influxdata/kapacitor/services/udf" + "github.com/influxdata/kapacitor/services/victorops" + "github.com/pkg/errors" ) var udfDir string @@ -2486,6 +2491,156 @@ func TestServer_BatchTask(t *testing.T) { } } } +func TestServer_BatchTask_InfluxDBConfigUpdate(t *testing.T) { + c := NewConfig() + c.InfluxDB[0].Enabled = true + count := 0 + stopTimeC := make(chan time.Time, 1) + + badCount := 0 + + dbBad := NewInfluxDB(func(q string) *iclient.Response { + badCount++ + // Return empty results + return &iclient.Response{ + Results: []iclient.Result{}, + } + }) + defer dbBad.Close() + db := NewInfluxDB(func(q string) *iclient.Response { + stmt, err := influxql.ParseStatement(q) + if err != nil { + return &iclient.Response{Err: err.Error()} + } + slct, ok := stmt.(*influxql.SelectStatement) + if !ok { + return nil + } + cond, ok := slct.Condition.(*influxql.BinaryExpr) + if !ok { + return &iclient.Response{Err: "expected select condition to be binary expression"} + } + stopTimeExpr, ok := cond.RHS.(*influxql.BinaryExpr) + if !ok { + return &iclient.Response{Err: "expected select condition rhs to be binary expression"} + } + stopTL, ok := stopTimeExpr.RHS.(*influxql.StringLiteral) + if !ok { + return &iclient.Response{Err: "expected select condition rhs to be string literal"} + } + count++ + switch count { + case 1: + stopTime, err := time.Parse(time.RFC3339Nano, stopTL.Val) + if err != nil { + return &iclient.Response{Err: err.Error()} + } + stopTimeC <- stopTime + return &iclient.Response{ + Results: []iclient.Result{{ + Series: []models.Row{{ + Name: "cpu", + Columns: []string{"time", "value"}, + Values: [][]interface{}{ + { + stopTime.Add(-2 * time.Millisecond).Format(time.RFC3339Nano), + 1.0, + }, + { + stopTime.Add(-1 * time.Millisecond).Format(time.RFC3339Nano), + 1.0, + }, + }, + }}, + }}, + } + default: + return &iclient.Response{ + Results: []iclient.Result{{ + Series: []models.Row{{ + Name: "cpu", + Columns: []string{"time", "value"}, + Values: [][]interface{}{}, + }}, + }}, + } + } + }) + defer db.Close() + + // Set bad URL first + c.InfluxDB[0].URLs = []string{dbBad.URL()} + s := OpenServer(c) + defer s.Close() + cli := Client(s) + + id := "testBatchTask" + ttype := client.BatchTask + dbrps := []client.DBRP{{ + Database: "mydb", + RetentionPolicy: "myrp", + }} + tick := `batch + |query('SELECT value from mydb.myrp.cpu') + .period(5ms) + .every(5ms) + .align() + |count('value') + |where(lambda: "count" == 2) + |httpOut('count') +` + + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) + if err != nil { + t.Fatal(err) + } + + _, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Enabled, + }) + if err != nil { + t.Fatal(err) + } + + // Update InfluxDB config, while task is running + influxdbDefault := cli.ConfigElementLink("influxdb", "default") + if err := cli.ConfigUpdate(influxdbDefault, client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "urls": []string{db.URL()}, + }, + }); err != nil { + t.Fatal(err) + } + + endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id) + timeout := time.NewTicker(100 * time.Millisecond) + defer timeout.Stop() + select { + case <-timeout.C: + t.Fatal("timedout waiting for query") + case stopTime := <-stopTimeC: + exp := fmt.Sprintf(`{"series":[{"name":"cpu","columns":["time","count"],"values":[["%s",2]]}]}`, stopTime.Local().Format(time.RFC3339Nano)) + err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5) + if err != nil { + t.Error(err) + } + _, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Disabled, + }) + if err != nil { + t.Fatal(err) + } + } + if badCount == 0 { + t.Error("expected bad influxdb to be queried at least once") + } +} func TestServer_InvalidBatchTask(t *testing.T) { c := NewConfig() @@ -4632,3 +4787,1208 @@ func TestServer_CreateReplay_ValidIDs(t *testing.T) { } } } + +func TestServer_UpdateConfig(t *testing.T) { + type updateAction struct { + element string + updateAction client.ConfigUpdateAction + expSection client.ConfigSection + expElement client.ConfigElement + } + db := NewInfluxDB(func(q string) *iclient.Response { + return &iclient.Response{} + }) + testCases := []struct { + section string + element string + setDefaults func(*server.Config) + expDefaultSection client.ConfigSection + expDefaultElement client.ConfigElement + updates []updateAction + }{ + { + section: "influxdb", + element: "default", + setDefaults: func(c *server.Config) { + c.InfluxDB[0].Enabled = true + c.InfluxDB[0].Username = "bob" + c.InfluxDB[0].Password = "secret" + c.InfluxDB[0].URLs = []string{db.URL()} + // Set really long timeout since we shouldn't hit it + c.InfluxDB[0].StartUpTimeout = toml.Duration(time.Hour) + }, + expDefaultSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"}, + Options: map[string]interface{}{ + "default": false, + "disable-subscriptions": false, + "enabled": true, + "excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}}, + "http-port": float64(0), + "insecure-skip-verify": false, + "kapacitor-hostname": "", + "name": "default", + "password": true, + "ssl-ca": "", + "ssl-cert": "", + "ssl-key": "", + "startup-timeout": "1h0m0s", + "subscription-protocol": "http", + "subscriptions": nil, + "subscriptions-sync-interval": "1m0s", + "timeout": "0s", + "udp-bind": "", + "udp-buffer": float64(1e3), + "udp-read-buffer": float64(0), + "urls": []interface{}{db.URL()}, + "username": "bob", + }, + }}, + }, + expDefaultElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"}, + Options: map[string]interface{}{ + "default": false, + "disable-subscriptions": false, + "enabled": true, + "excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}}, + "http-port": float64(0), + "insecure-skip-verify": false, + "kapacitor-hostname": "", + "name": "default", + "password": true, + "ssl-ca": "", + "ssl-cert": "", + "ssl-key": "", + "startup-timeout": "1h0m0s", + "subscription-protocol": "http", + "subscriptions": nil, + "subscriptions-sync-interval": "1m0s", + "timeout": "0s", + "udp-bind": "", + "udp-buffer": float64(1e3), + "udp-read-buffer": float64(0), + "urls": []interface{}{db.URL()}, + "username": "bob", + }, + }, + updates: []updateAction{ + { + // Set Invalid URL to make sure we can fix it without waiting for connection timeouts + updateAction: client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "urls": []string{"http://192.0.2.0:8086"}, + }, + }, + element: "default", + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"}, + Options: map[string]interface{}{ + "default": false, + "disable-subscriptions": false, + "enabled": true, + "excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}}, + "http-port": float64(0), + "insecure-skip-verify": false, + "kapacitor-hostname": "", + "name": "default", + "password": true, + "ssl-ca": "", + "ssl-cert": "", + "ssl-key": "", + "startup-timeout": "1h0m0s", + "subscription-protocol": "http", + "subscriptions": nil, + "subscriptions-sync-interval": "1m0s", + "timeout": "0s", + "udp-bind": "", + "udp-buffer": float64(1e3), + "udp-read-buffer": float64(0), + "urls": []interface{}{"http://192.0.2.0:8086"}, + "username": "bob", + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"}, + Options: map[string]interface{}{ + "default": false, + "disable-subscriptions": false, + "enabled": true, + "excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}}, + "http-port": float64(0), + "insecure-skip-verify": false, + "kapacitor-hostname": "", + "name": "default", + "password": true, + "ssl-ca": "", + "ssl-cert": "", + "ssl-key": "", + "startup-timeout": "1h0m0s", + "subscription-protocol": "http", + "subscriptions": nil, + "subscriptions-sync-interval": "1m0s", + "timeout": "0s", + "udp-bind": "", + "udp-buffer": float64(1e3), + "udp-read-buffer": float64(0), + "urls": []interface{}{"http://192.0.2.0:8086"}, + "username": "bob", + }, + }, + }, + { + updateAction: client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "default": true, + "subscription-protocol": "https", + "subscriptions": map[string][]string{"_internal": []string{"monitor"}}, + }, + }, + element: "default", + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"}, + Options: map[string]interface{}{ + "default": true, + "disable-subscriptions": false, + "enabled": true, + "excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}}, + "http-port": float64(0), + "insecure-skip-verify": false, + "kapacitor-hostname": "", + "name": "default", + "password": true, + "ssl-ca": "", + "ssl-cert": "", + "ssl-key": "", + "startup-timeout": "1h0m0s", + "subscription-protocol": "https", + "subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}}, + "subscriptions-sync-interval": "1m0s", + "timeout": "0s", + "udp-bind": "", + "udp-buffer": float64(1e3), + "udp-read-buffer": float64(0), + "urls": []interface{}{"http://192.0.2.0:8086"}, + "username": "bob", + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"}, + Options: map[string]interface{}{ + "default": true, + "disable-subscriptions": false, + "enabled": true, + "excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}}, + "http-port": float64(0), + "insecure-skip-verify": false, + "kapacitor-hostname": "", + "name": "default", + "password": true, + "ssl-ca": "", + "ssl-cert": "", + "ssl-key": "", + "startup-timeout": "1h0m0s", + "subscription-protocol": "https", + "subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}}, + "subscriptions-sync-interval": "1m0s", + "timeout": "0s", + "udp-bind": "", + "udp-buffer": float64(1e3), + "udp-read-buffer": float64(0), + "urls": []interface{}{"http://192.0.2.0:8086"}, + "username": "bob", + }, + }, + }, + { + updateAction: client.ConfigUpdateAction{ + Delete: []string{"urls"}, + }, + element: "default", + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"}, + Options: map[string]interface{}{ + "default": true, + "disable-subscriptions": false, + "enabled": true, + "excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}}, + "http-port": float64(0), + "insecure-skip-verify": false, + "kapacitor-hostname": "", + "name": "default", + "password": true, + "ssl-ca": "", + "ssl-cert": "", + "ssl-key": "", + "startup-timeout": "1h0m0s", + "subscription-protocol": "https", + "subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}}, + "subscriptions-sync-interval": "1m0s", + "timeout": "0s", + "udp-bind": "", + "udp-buffer": float64(1e3), + "udp-read-buffer": float64(0), + "urls": []interface{}{db.URL()}, + "username": "bob", + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"}, + Options: map[string]interface{}{ + "default": true, + "disable-subscriptions": false, + "enabled": true, + "excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}}, + "http-port": float64(0), + "insecure-skip-verify": false, + "kapacitor-hostname": "", + "name": "default", + "password": true, + "ssl-ca": "", + "ssl-cert": "", + "ssl-key": "", + "startup-timeout": "1h0m0s", + "subscription-protocol": "https", + "subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}}, + "subscriptions-sync-interval": "1m0s", + "timeout": "0s", + "udp-bind": "", + "udp-buffer": float64(1e3), + "udp-read-buffer": float64(0), + "urls": []interface{}{db.URL()}, + "username": "bob", + }, + }, + }, + { + updateAction: client.ConfigUpdateAction{ + Add: map[string]interface{}{ + "name": "new", + "urls": []string{db.URL()}, + }, + }, + element: "new", + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"}, + Options: map[string]interface{}{ + "default": true, + "disable-subscriptions": false, + "enabled": true, + "excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}}, + "http-port": float64(0), + "insecure-skip-verify": false, + "kapacitor-hostname": "", + "name": "default", + "password": true, + "ssl-ca": "", + "ssl-cert": "", + "ssl-key": "", + "startup-timeout": "1h0m0s", + "subscription-protocol": "https", + "subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}}, + "subscriptions-sync-interval": "1m0s", + "timeout": "0s", + "udp-bind": "", + "udp-buffer": float64(1e3), + "udp-read-buffer": float64(0), + "urls": []interface{}{db.URL()}, + "username": "bob", + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/new"}, + Options: map[string]interface{}{ + "default": false, + "disable-subscriptions": false, + "enabled": false, + "excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}}, + "http-port": float64(0), + "insecure-skip-verify": false, + "kapacitor-hostname": "", + "name": "new", + "password": false, + "ssl-ca": "", + "ssl-cert": "", + "ssl-key": "", + "startup-timeout": "5m0s", + "subscription-protocol": "http", + "subscriptions": nil, + "subscriptions-sync-interval": "1m0s", + "timeout": "0s", + "udp-bind": "", + "udp-buffer": float64(1e3), + "udp-read-buffer": float64(0), + "urls": []interface{}{db.URL()}, + "username": "", + }, + }, + }, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/new"}, + Options: map[string]interface{}{ + "default": false, + "disable-subscriptions": false, + "enabled": false, + "excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}}, + "http-port": float64(0), + "insecure-skip-verify": false, + "kapacitor-hostname": "", + "name": "new", + "password": false, + "ssl-ca": "", + "ssl-cert": "", + "ssl-key": "", + "startup-timeout": "5m0s", + "subscription-protocol": "http", + "subscriptions": nil, + "subscriptions-sync-interval": "1m0s", + "timeout": "0s", + "udp-bind": "", + "udp-buffer": float64(1e3), + "udp-read-buffer": float64(0), + "urls": []interface{}{db.URL()}, + "username": "", + }, + }, + }, + }, + }, + { + section: "alerta", + setDefaults: func(c *server.Config) { + c.Alerta.URL = "http://alerta.example.com" + }, + expDefaultSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alerta"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alerta/"}, + Options: map[string]interface{}{ + "enabled": false, + "environment": "", + "origin": "", + "token": false, + "url": "http://alerta.example.com", + }}, + }, + }, + expDefaultElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alerta/"}, + Options: map[string]interface{}{ + "enabled": false, + "environment": "", + "origin": "", + "token": false, + "url": "http://alerta.example.com", + }, + }, + updates: []updateAction{ + { + updateAction: client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "token": "token", + "origin": "kapacitor", + }, + }, + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alerta"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alerta/"}, + Options: map[string]interface{}{ + "enabled": false, + "environment": "", + "origin": "kapacitor", + "token": true, + "url": "http://alerta.example.com", + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alerta/"}, + Options: map[string]interface{}{ + "enabled": false, + "environment": "", + "origin": "kapacitor", + "token": true, + "url": "http://alerta.example.com", + }, + }, + }, + }, + }, + { + section: "kubernetes", + setDefaults: func(c *server.Config) { + c.Kubernetes.APIServers = []string{"http://localhost:80001"} + }, + expDefaultSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes/"}, + Options: map[string]interface{}{ + "api-servers": []interface{}{"http://localhost:80001"}, + "ca-path": "", + "enabled": false, + "in-cluster": false, + "namespace": "", + "token": false, + }, + }}, + }, + expDefaultElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes/"}, + Options: map[string]interface{}{ + "api-servers": []interface{}{"http://localhost:80001"}, + "ca-path": "", + "enabled": false, + "in-cluster": false, + "namespace": "", + "token": false, + }, + }, + updates: []updateAction{ + { + updateAction: client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "token": "secret", + }, + }, + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes/"}, + Options: map[string]interface{}{ + "api-servers": []interface{}{"http://localhost:80001"}, + "ca-path": "", + "enabled": false, + "in-cluster": false, + "namespace": "", + "token": true, + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes/"}, + Options: map[string]interface{}{ + "api-servers": []interface{}{"http://localhost:80001"}, + "ca-path": "", + "enabled": false, + "in-cluster": false, + "namespace": "", + "token": true, + }, + }, + }, + }, + }, + { + section: "hipchat", + setDefaults: func(c *server.Config) { + c.HipChat.URL = "http://hipchat.example.com" + }, + expDefaultSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat/"}, + Options: map[string]interface{}{ + "enabled": false, + "global": false, + "room": "", + "state-changes-only": false, + "token": false, + "url": "http://hipchat.example.com", + }, + }}, + }, + expDefaultElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat/"}, + Options: map[string]interface{}{ + "enabled": false, + "global": false, + "room": "", + "state-changes-only": false, + "token": false, + "url": "http://hipchat.example.com", + }, + }, + updates: []updateAction{ + { + updateAction: client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "token": "token", + "room": "kapacitor", + }, + }, + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat/"}, + Options: map[string]interface{}{ + "enabled": false, + "global": false, + "room": "kapacitor", + "state-changes-only": false, + "token": true, + "url": "http://hipchat.example.com", + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat/"}, + Options: map[string]interface{}{ + "enabled": false, + "global": false, + "room": "kapacitor", + "state-changes-only": false, + "token": true, + "url": "http://hipchat.example.com", + }, + }, + }, + }, + }, + { + section: "opsgenie", + setDefaults: func(c *server.Config) { + c.OpsGenie.URL = "http://opsgenie.example.com" + }, + expDefaultSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie/"}, + Options: map[string]interface{}{ + "api-key": false, + "enabled": false, + "global": false, + "recipients": nil, + "recovery_url": opsgenie.DefaultOpsGenieRecoveryURL, + "teams": nil, + "url": "http://opsgenie.example.com", + }, + }}, + }, + expDefaultElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie/"}, + Options: map[string]interface{}{ + "api-key": false, + "enabled": false, + "global": false, + "recipients": nil, + "recovery_url": opsgenie.DefaultOpsGenieRecoveryURL, + "teams": nil, + "url": "http://opsgenie.example.com", + }, + }, + updates: []updateAction{ + { + updateAction: client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "api-key": "token", + "global": true, + "teams": []string{"teamA", "teamB"}, + }, + }, + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie/"}, + Options: map[string]interface{}{ + "api-key": true, + "enabled": false, + "global": true, + "recipients": nil, + "recovery_url": opsgenie.DefaultOpsGenieRecoveryURL, + "teams": []interface{}{"teamA", "teamB"}, + "url": "http://opsgenie.example.com", + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie/"}, + Options: map[string]interface{}{ + "api-key": true, + "enabled": false, + "global": true, + "recipients": nil, + "recovery_url": opsgenie.DefaultOpsGenieRecoveryURL, + "teams": []interface{}{"teamA", "teamB"}, + "url": "http://opsgenie.example.com", + }, + }, + }, + }, + }, + { + section: "pagerduty", + setDefaults: func(c *server.Config) { + c.PagerDuty.ServiceKey = "secret" + }, + expDefaultSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty/"}, + Options: map[string]interface{}{ + "enabled": false, + "global": false, + "service-key": true, + "url": pagerduty.DefaultPagerDutyAPIURL, + }, + }}, + }, + expDefaultElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty/"}, + Options: map[string]interface{}{ + "enabled": false, + "global": false, + "service-key": true, + "url": pagerduty.DefaultPagerDutyAPIURL, + }, + }, + updates: []updateAction{ + { + updateAction: client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "service-key": "", + "enabled": true, + }, + }, + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty/"}, + Options: map[string]interface{}{ + "enabled": true, + "global": false, + "service-key": false, + "url": pagerduty.DefaultPagerDutyAPIURL, + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty/"}, + Options: map[string]interface{}{ + "enabled": true, + "global": false, + "service-key": false, + "url": pagerduty.DefaultPagerDutyAPIURL, + }, + }, + }, + }, + }, + { + section: "smtp", + setDefaults: func(c *server.Config) { + c.SMTP.Host = "smtp.example.com" + }, + expDefaultSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp/"}, + Options: map[string]interface{}{ + "enabled": false, + "from": "", + "global": false, + "host": "smtp.example.com", + "idle-timeout": "30s", + "no-verify": false, + "password": false, + "port": float64(25), + "state-changes-only": false, + "to": nil, + "username": "", + }, + }}, + }, + expDefaultElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp/"}, + Options: map[string]interface{}{ + "enabled": false, + "from": "", + "global": false, + "host": "smtp.example.com", + "idle-timeout": "30s", + "no-verify": false, + "password": false, + "port": float64(25), + "state-changes-only": false, + "to": nil, + "username": "", + }, + }, + updates: []updateAction{ + { + updateAction: client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "idle-timeout": "1m0s", + "global": true, + "password": "secret", + }, + }, + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp/"}, + Options: map[string]interface{}{ + "enabled": false, + "from": "", + "global": true, + "host": "smtp.example.com", + "idle-timeout": "1m0s", + "no-verify": false, + "password": true, + "port": float64(25), + "state-changes-only": false, + "to": nil, + "username": "", + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp/"}, + Options: map[string]interface{}{ + "enabled": false, + "from": "", + "global": true, + "host": "smtp.example.com", + "idle-timeout": "1m0s", + "no-verify": false, + "password": true, + "port": float64(25), + "state-changes-only": false, + "to": nil, + "username": "", + }, + }, + }, + }, + }, + { + section: "sensu", + setDefaults: func(c *server.Config) { + c.Sensu.Addr = "sensu.example.com:3000" + }, + expDefaultSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu/"}, + Options: map[string]interface{}{ + "addr": "sensu.example.com:3000", + "enabled": false, + "source": "Kapacitor", + }, + }}, + }, + expDefaultElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu/"}, + Options: map[string]interface{}{ + "addr": "sensu.example.com:3000", + "enabled": false, + "source": "Kapacitor", + }, + }, + updates: []updateAction{ + { + updateAction: client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "addr": "sensu.local:3000", + "enabled": true, + "source": "", + }, + }, + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu/"}, + Options: map[string]interface{}{ + "addr": "sensu.local:3000", + "enabled": true, + "source": "", + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu/"}, + Options: map[string]interface{}{ + "addr": "sensu.local:3000", + "enabled": true, + "source": "", + }, + }, + }, + }, + }, + { + section: "slack", + setDefaults: func(c *server.Config) { + c.Slack.Global = true + }, + expDefaultSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"}, + Options: map[string]interface{}{ + "channel": "", + "enabled": false, + "global": true, + "state-changes-only": false, + "url": false, + }, + }}, + }, + expDefaultElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"}, + Options: map[string]interface{}{ + "channel": "", + "enabled": false, + "global": true, + "state-changes-only": false, + "url": false, + }, + }, + updates: []updateAction{ + { + updateAction: client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "enabled": true, + "global": false, + "channel": "#general", + "url": "http://slack.example.com/secret-token", + }, + }, + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"}, + Options: map[string]interface{}{ + "channel": "#general", + "enabled": true, + "global": false, + "state-changes-only": false, + "url": true, + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"}, + Options: map[string]interface{}{ + "channel": "#general", + "enabled": true, + "global": false, + "state-changes-only": false, + "url": true, + }, + }, + }, + }, + }, + { + section: "talk", + setDefaults: func(c *server.Config) { + c.Talk.AuthorName = "Kapacitor" + }, + expDefaultSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk/"}, + Options: map[string]interface{}{ + "enabled": false, + "url": false, + "author_name": "Kapacitor", + }, + }}, + }, + expDefaultElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk/"}, + Options: map[string]interface{}{ + "enabled": false, + "url": false, + "author_name": "Kapacitor", + }, + }, + updates: []updateAction{ + { + updateAction: client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "enabled": true, + "url": "http://talk.example.com/secret-token", + }, + }, + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk/"}, + Options: map[string]interface{}{ + "enabled": true, + "url": true, + "author_name": "Kapacitor", + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk/"}, + Options: map[string]interface{}{ + "enabled": true, + "url": true, + "author_name": "Kapacitor", + }, + }, + }, + }, + }, + { + section: "telegram", + setDefaults: func(c *server.Config) { + c.Telegram.ChatId = "kapacitor" + }, + expDefaultSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram/"}, + Options: map[string]interface{}{ + "chat-id": "kapacitor", + "disable-notification": false, + "disable-web-page-preview": false, + "enabled": false, + "global": false, + "parse-mode": "", + "state-changes-only": false, + "token": false, + "url": telegram.DefaultTelegramURL, + }, + }}, + }, + expDefaultElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram/"}, + Options: map[string]interface{}{ + "chat-id": "kapacitor", + "disable-notification": false, + "disable-web-page-preview": false, + "enabled": false, + "global": false, + "parse-mode": "", + "state-changes-only": false, + "token": false, + "url": telegram.DefaultTelegramURL, + }, + }, + updates: []updateAction{ + { + updateAction: client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "enabled": true, + "token": "token", + }, + }, + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram/"}, + Options: map[string]interface{}{ + "chat-id": "kapacitor", + "disable-notification": false, + "disable-web-page-preview": false, + "enabled": true, + "global": false, + "parse-mode": "", + "state-changes-only": false, + "token": true, + "url": telegram.DefaultTelegramURL, + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram/"}, + Options: map[string]interface{}{ + "chat-id": "kapacitor", + "disable-notification": false, + "disable-web-page-preview": false, + "enabled": true, + "global": false, + "parse-mode": "", + "state-changes-only": false, + "token": true, + "url": telegram.DefaultTelegramURL, + }, + }, + }, + }, + }, { + section: "victorops", + setDefaults: func(c *server.Config) { + c.VictorOps.RoutingKey = "test" + c.VictorOps.APIKey = "secret" + }, + expDefaultSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops/"}, + Options: map[string]interface{}{ + "api-key": true, + "enabled": false, + "global": false, + "routing-key": "test", + "url": victorops.DefaultVictorOpsAPIURL, + }, + }}, + }, + expDefaultElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops/"}, + Options: map[string]interface{}{ + "api-key": true, + "enabled": false, + "global": false, + "routing-key": "test", + "url": victorops.DefaultVictorOpsAPIURL, + }, + }, + updates: []updateAction{ + { + updateAction: client.ConfigUpdateAction{ + Set: map[string]interface{}{ + "api-key": "", + "global": true, + }, + }, + expSection: client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops/"}, + Options: map[string]interface{}{ + "api-key": false, + "enabled": false, + "global": true, + "routing-key": "test", + "url": victorops.DefaultVictorOpsAPIURL, + }, + }}, + }, + expElement: client.ConfigElement{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops/"}, + Options: map[string]interface{}{ + "api-key": false, + "enabled": false, + "global": true, + "routing-key": "test", + "url": victorops.DefaultVictorOpsAPIURL, + }, + }, + }, + }, + }, + } + + compareElements := func(got, exp client.ConfigElement) error { + if got.Link != exp.Link { + return fmt.Errorf("elements have different links, got %v exp %v", got.Link, exp.Link) + } + for k, v := range exp.Options { + if g, ok := got.Options[k]; !ok { + return fmt.Errorf("missing option %q", k) + } else if !reflect.DeepEqual(g, v) { + return fmt.Errorf("unexpected config option %q got %#v exp %#v types: got %T exp %T", k, g, v, g, v) + } + } + for k := range got.Options { + if v, ok := exp.Options[k]; !ok { + return fmt.Errorf("extra option %q with value %#v", k, v) + } + } + return nil + } + compareSections := func(got, exp client.ConfigSection) error { + if got.Link != exp.Link { + return fmt.Errorf("sections have different links, got %v exp %v", got.Link, exp.Link) + } + if len(got.Elements) != len(exp.Elements) { + return fmt.Errorf("sections are different lengths, got %d exp %d", len(got.Elements), len(exp.Elements)) + } + for i := range exp.Elements { + if err := compareElements(got.Elements[i], exp.Elements[i]); err != nil { + return errors.Wrapf(err, "section element %d are not equal", i) + } + } + return nil + } + + validate := func( + cli *client.Client, + section, + element string, + expSection client.ConfigSection, + expElement client.ConfigElement, + ) error { + // Get all sections + if config, err := cli.ConfigSections(); err != nil { + return err + } else { + if err := compareSections(config.Sections[section], expSection); err != nil { + return fmt.Errorf("%s: %v", section, err) + } + } + // Get the specific section + sectionLink := cli.ConfigSectionLink(section) + if got, err := cli.ConfigSection(sectionLink); err != nil { + return err + } else { + if err := compareSections(got, expSection); err != nil { + return fmt.Errorf("%s: %v", section, err) + } + } + elementLink := cli.ConfigElementLink(section, element) + // Get the specific element + if got, err := cli.ConfigElement(elementLink); err != nil { + return err + } else { + if err := compareElements(got, expElement); err != nil { + return fmt.Errorf("%s/%s: %v", section, element, err) + } + } + return nil + } + + for _, tc := range testCases { + // Create default config + c := NewConfig() + if tc.setDefaults != nil { + tc.setDefaults(c) + } + s := OpenServer(c) + cli := Client(s) + defer s.Close() + + if err := validate(cli, tc.section, tc.element, tc.expDefaultSection, tc.expDefaultElement); err != nil { + t.Errorf("unexpected defaults for %s/%s: %v", tc.section, tc.element, err) + } + + for i, ua := range tc.updates { + link := cli.ConfigElementLink(tc.section, ua.element) + + if len(ua.updateAction.Add) > 0 || + len(ua.updateAction.Remove) > 0 { + link = cli.ConfigSectionLink(tc.section) + } + + if err := cli.ConfigUpdate(link, ua.updateAction); err != nil { + t.Fatal(err) + } + if err := validate(cli, tc.section, ua.element, ua.expSection, ua.expElement); err != nil { + t.Errorf("unexpected update result %d for %s/%s: %v", i, tc.section, ua.element, err) + } + } + } +} diff --git a/services/alerta/config.go b/services/alerta/config.go index 311f05584..13eeacb09 100644 --- a/services/alerta/config.go +++ b/services/alerta/config.go @@ -1,18 +1,34 @@ package alerta +import ( + "net/url" + + "github.com/pkg/errors" +) + type Config struct { // Whether Alerta integration is enabled. - Enabled bool `toml:"enabled"` + Enabled bool `toml:"enabled" override:"enabled"` // The Alerta URL. - URL string `toml:"url"` + URL string `toml:"url" override:"url"` // The authentication token for this notification, can be overridden per alert. - Token string `toml:"token"` + Token string `toml:"token" override:"token,redact"` // The environment in which to raise the alert. - Environment string `toml:"environment"` + Environment string `toml:"environment" override:"environment"` // The origin of the alert. - Origin string `toml:"origin"` + Origin string `toml:"origin" override:"origin"` } func NewConfig() Config { return Config{} } + +func (c Config) Validate() error { + if c.Enabled && c.URL == "" { + return errors.New("must specify url") + } + if _, err := url.Parse(c.URL); err != nil { + return errors.Wrapf(err, "invalid url %q", c.URL) + } + return nil +} diff --git a/services/alerta/service.go b/services/alerta/service.go index b8527d3f7..3c85aa41d 100644 --- a/services/alerta/service.go +++ b/services/alerta/service.go @@ -5,28 +5,26 @@ import ( "encoding/json" "errors" "fmt" + "io" "io/ioutil" "log" "net/http" "net/url" + "path" + "sync/atomic" ) type Service struct { - url string - token string - environment string - origin string + configValue atomic.Value logger *log.Logger } func NewService(c Config, l *log.Logger) *Service { - return &Service{ - url: c.URL, - token: c.Token, - environment: c.Environment, - origin: c.Origin, - logger: l, + s := &Service{ + logger: l, } + s.configValue.Store(c) + return s } func (s *Service) Open() error { @@ -37,28 +35,78 @@ func (s *Service) Close() error { return nil } +func (s *Service) config() Config { + return s.configValue.Load().(Config) +} + +func (s *Service) Update(newConfig []interface{}) error { + if l := len(newConfig); l != 1 { + return fmt.Errorf("expected only one new config object, got %d", l) + } + if c, ok := newConfig[0].(Config); !ok { + return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfig[0]) + } else { + s.configValue.Store(c) + } + return nil +} + func (s *Service) Alert(token, resource, event, environment, severity, group, value, message, origin string, service []string, data interface{}) error { if resource == "" || event == "" { return errors.New("Resource and Event are required to send an alert") } + url, post, err := s.preparePost(token, resource, event, environment, severity, group, value, message, origin, service, data) + + resp, err := http.Post(url, "application/json", post) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusCreated { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + type response struct { + Message string `json:"message"` + } + r := &response{Message: fmt.Sprintf("failed to understand Alerta response. code: %d content: %s", resp.StatusCode, string(body))} + b := bytes.NewReader(body) + dec := json.NewDecoder(b) + dec.Decode(r) + return errors.New(r.Message) + } + return nil +} + +func (s *Service) preparePost(token, resource, event, environment, severity, group, value, message, origin string, service []string, data interface{}) (string, io.Reader, error) { + c := s.config() + + if !c.Enabled { + return "", nil, errors.New("service is not enabled") + } + if token == "" { - token = s.token + token = c.Token } if environment == "" { - environment = s.environment + environment = c.Environment } if origin == "" { - origin = s.origin + origin = c.Origin } - var Url *url.URL - Url, err := url.Parse(s.url + "/alert?api-key=" + token) + u, err := url.Parse(c.URL) if err != nil { - return err + return "", nil, err } + u.Path = path.Join(u.Path, "alert") + v := url.Values{} + v.Set("api-key", token) + u.RawQuery = v.Encode() postData := make(map[string]interface{}) postData["resource"] = resource @@ -78,27 +126,8 @@ func (s *Service) Alert(token, resource, event, environment, severity, group, va enc := json.NewEncoder(&post) err = enc.Encode(postData) if err != nil { - return err + return "", nil, err } - resp, err := http.Post(Url.String(), "application/json", &post) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusCreated { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - type response struct { - Message string `json:"message"` - } - r := &response{Message: fmt.Sprintf("failed to understand Alerta response. code: %d content: %s", resp.StatusCode, string(body))} - b := bytes.NewReader(body) - dec := json.NewDecoder(b) - dec.Decode(r) - return errors.New(r.Message) - } - return nil + return u.String(), &post, nil } diff --git a/services/config/config.go b/services/config/config.go new file mode 100644 index 000000000..58757bc78 --- /dev/null +++ b/services/config/config.go @@ -0,0 +1,11 @@ +package config + +type Config struct { + Enabled bool `toml:"enabled"` +} + +func NewConfig() Config { + return Config{ + Enabled: true, + } +} diff --git a/services/config/dao.go b/services/config/dao.go new file mode 100644 index 000000000..c184179e3 --- /dev/null +++ b/services/config/dao.go @@ -0,0 +1,189 @@ +package config + +import ( + "bytes" + "encoding/json" + "errors" + + "github.com/influxdata/kapacitor/services/storage" +) + +var ( + ErrNoOverrideExists = errors.New("no override exists") +) + +// Data access object for Override data. +type OverrideDAO interface { + // Retrieve a override + Get(id string) (Override, error) + + // Set an override. + // If it does not already exist it will be created, + // otherwise it will be replaced. + Set(o Override) error + + // Delete a override. + // It is not an error to delete an non-existent override. + Delete(id string) error + + // List all overrides whose ID starts with the given prefix + List(prefix string) ([]Override, error) +} + +//-------------------------------------------------------------------- +// The following structures are stored in a database via JSON encoding. +// Changes to the structures could break existing data. +// +// Many of these structures are exact copies of structures found elsewhere, +// this is intentional so that all structures stored in the database are +// defined here and nowhere else. So as to not accidentally change +// the JSON serialization format in incompatible ways. + +// version is the current version of the Override structure. +const version = 1 + +type Override struct { + // Unique identifier for the override + ID string `json:"id"` + + // Map of key value pairs of option overrides. + Options map[string]interface{} `json:"options"` + + Create bool `json:"create"` +} + +// versionWrapper wraps a structure with a version so that changes +// to the structure can be properly decoded. +type versionWrapper struct { + Version int `json:"version"` + Value *json.RawMessage `json:"value"` +} + +const ( + overrideDataPrefix = "/overrides/data/" + overrideIndexesPrefix = "/overrides/indexes/" + + // Name of ID index + idIndex = "id/" +) + +// Key/Value store based implementation of the OverrideDAO +type overrideKV struct { + store storage.Interface +} + +func newOverrideKV(store storage.Interface) *overrideKV { + return &overrideKV{ + store: store, + } +} + +func encodeOverride(o Override) ([]byte, error) { + raw, err := json.Marshal(o) + if err != nil { + return nil, err + } + rawCopy := make(json.RawMessage, len(raw)) + copy(rawCopy, raw) + wrapper := versionWrapper{ + Version: version, + Value: &rawCopy, + } + return json.Marshal(wrapper) +} + +func decodeOverride(data []byte) (Override, error) { + var wrapper versionWrapper + err := json.Unmarshal(data, &wrapper) + if err != nil { + return Override{}, err + } + var override Override + if wrapper.Value == nil { + return Override{}, errors.New("empty override") + } + dec := json.NewDecoder(bytes.NewReader(*wrapper.Value)) + // Do not convert all nums to float64, rather use json.Number which is a Stringer + dec.UseNumber() + err = dec.Decode(&override) + return override, err +} + +// Create a key for the override data +func (d *overrideKV) overrideDataKey(id string) string { + return overrideDataPrefix + id +} + +// Create a key for a given index and value. +// +// Indexes are maintained via a 'directory' like system: +// +// /overrides/data/ID -- contains encoded override data +// /overrides/index/id/ID -- contains the override ID +// +// As such to list all overrides in ID sorted order use the /overrides/index/id/ directory. +func (d *overrideKV) overrideIndexKey(index, value string) string { + return overrideIndexesPrefix + index + value +} + +func (d *overrideKV) Get(id string) (Override, error) { + key := d.overrideDataKey(id) + if exists, err := d.store.Exists(key); err != nil { + return Override{}, err + } else if !exists { + return Override{}, ErrNoOverrideExists + } + kv, err := d.store.Get(key) + if err != nil { + return Override{}, err + } + return decodeOverride(kv.Value) +} + +func (d *overrideKV) Set(o Override) error { + key := d.overrideDataKey(o.ID) + + data, err := encodeOverride(o) + if err != nil { + return err + } + // Put data + err = d.store.Put(key, data) + if err != nil { + return err + } + // Put ID index + indexKey := d.overrideIndexKey(idIndex, o.ID) + return d.store.Put(indexKey, []byte(o.ID)) +} + +func (d *overrideKV) Delete(id string) error { + key := d.overrideDataKey(id) + indexKey := d.overrideIndexKey(idIndex, id) + + dataErr := d.store.Delete(key) + indexErr := d.store.Delete(indexKey) + if dataErr != nil { + return dataErr + } + return indexErr +} + +func (d *overrideKV) List(prefix string) ([]Override, error) { + // List all override ids sorted by ID + ids, err := d.store.List(overrideIndexesPrefix + idIndex + prefix) + if err != nil { + return nil, err + } + overrides := make([]Override, 0, len(ids)) + for _, kv := range ids { + id := string(kv.Value) + o, err := d.Get(id) + if err != nil { + return nil, err + } + overrides = append(overrides, o) + } + + return overrides, nil +} diff --git a/services/config/dao_test.go b/services/config/dao_test.go new file mode 100644 index 000000000..fabfde17f --- /dev/null +++ b/services/config/dao_test.go @@ -0,0 +1,61 @@ +package config + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" +) + +func Test_encodeOverride_decodeOverride(t *testing.T) { + testCases := []struct { + o Override + exp []byte + }{ + { + o: Override{}, + exp: []byte(`{"version":1,"value":{"id":"","options":null,"create":false}}`), + }, + { + o: Override{ + ID: "42", + }, + exp: []byte(`{"version":1,"value":{"id":"42","options":null,"create":false}}`), + }, + { + o: Override{ + ID: "42", + Create: true, + }, + exp: []byte(`{"version":1,"value":{"id":"42","options":null,"create":true}}`), + }, + { + o: Override{ + ID: "42", + Options: map[string]interface{}{ + "a": json.Number("1"), + "b": []interface{}{"x", "y", "z"}, + "c": map[string]interface{}{"k1": "x", "k2": "y", "k3": "z"}, + }, + Create: true, + }, + exp: []byte(`{"version":1,"value":{"id":"42","options":{"a":1,"b":["x","y","z"],"c":{"k1":"x","k2":"y","k3":"z"}},"create":true}}`), + }, + } + for _, tc := range testCases { + got, err := encodeOverride(tc.o) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(got, tc.exp) { + t.Errorf("unexpected encoding:\ngot\n%s\nexp\n%s\n", string(got), string(tc.exp)) + } + o, err := decodeOverride(got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(o, tc.o) { + t.Errorf("unexpected decoding:\ngot\n%v\nexp\n%v\n", o, tc.o) + } + } +} diff --git a/services/config/override/doc.go b/services/config/override/doc.go new file mode 100644 index 000000000..3a201d205 --- /dev/null +++ b/services/config/override/doc.go @@ -0,0 +1,64 @@ +/* Overrider provides an API for overriding and reading redacted values for a configuration object. +The configuration object provided is expected to have two levels of nested structs. +The top level struct should have fields called "sections". +These fields may either be a struct or a slice of structs. +As such a section consists of a list of elements. +In the case where the field is a struct and not a slice, the section list always contains one element. +Further nested levels may exist but Overrider will not interact with them directly. + +In order for a section to be overridden an `override` struct tag must be present. +The `override` tag defines a name for the section and option. +Struct tags can be used to mark options as redacted by adding a `,redact` to the end of the `override` tag value. + +Example: + type SectionAConfig struct { + Option string `override:"option"` + Password string `override:"password,redact"` + } + + type SectionBConfig struct { + ID string `override:"id"` + Option string `override:"option"` + } + + type Config struct { + SectionA SectionAConfig `override:"section-a"` + SectionB []SectionBConfig `override:"section-b,element-key=id"` + IgnoredSection IgnoredConfig + IgnoredField string + } + + type IgnoredConfig struct { + // contains anything ... + } + + // Setup + c := Config{ + SectionA: SectionAConfig{ + Option: "option value", + Password: "secret", + }, + SectionB: []SectionBConfig{ + { + ID: "id0", + Option: "option value 0", + }, + { + ID: "id1", + Option: "option value 1", + }, + }, + IgnoredSection: IgnoredConfig{}, + IgnoredField: "this value is ignored", + } + o := override.New(c) + // Read redacted section values + redacted, err := o.Sections() + // Override options for a section + newElement, err := o.Override(Override{ + Section: "section-b", + Element: "id1", // Element may be empty when overriding a section which is not a list. + Options: map[string]interface{}{"option": "overridden option value"}, + }) +*/ +package override diff --git a/services/config/override/override.go b/services/config/override/override.go new file mode 100644 index 000000000..ad85aaaeb --- /dev/null +++ b/services/config/override/override.go @@ -0,0 +1,753 @@ +package override + +import ( + "encoding" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" + "github.com/pkg/errors" +) + +const ( + structTagKey = "override" + redactKeyword = "redact" + elementKeyword = "element-key=" +) + +// Validator is a type that can validate itself. +// If an element is a Validator, then Validate() is called +// whenever it is modified. +type Validator interface { + Validate() error +} + +// Override specifies what configuration values should be overridden and how. +// Configuration options can be overridden as well as elements of a section +// can be deleted or created. +type Override struct { + // Section is the name of the section to override. + Section string + // Element is the name of the element within a section to override. + // If the section is not a slice of structs then this can remain empty. + Element string + // Options is a set of option name to value to override existing values. + Options map[string]interface{} + // Delete indicates whether the specified element should be deleted. + Delete bool + // Create indicates whether to create a new element in the specified section. + // To create a new element leave the element name empty in this Override object + // and provide the value in the Options map under the element key. + Create bool +} + +// Validate that the values set on this Override are self-consistent. +func (o Override) Validate() error { + if o.Section == "" { + return errors.New("section cannot be empty") + } + if o.Delete && o.Element == "" { + return errors.New("element cannot be empty if deleting an element") + } + if o.Create && o.Element != "" { + return errors.New("element must be empty if creating an element, set the element key value via the options") + } + if o.Delete && len(o.Options) > 0 { + return errors.New("cannot delete an element and provide options in the same override") + } + if o.Delete && o.Create { + return errors.New("cannot create and delete an element in the same override") + } + return nil +} + +// ElementKeys returns a map of section name to element key for each section. +func ElementKeys(config interface{}) (map[string]string, error) { + // walk the config and read all sections + walker := newSectionWalker() + if err := reflectwalk.Walk(config, walker); err != nil { + return nil, errors.Wrap(err, "failed to read sections from configuration object") + } + + return walker.elementKeysMap(), nil +} + +// OverrideConfig applies all given overrides and returns a map of all configuration sections, even if they were not overridden. +// The overrides are all applied to the same object and the original configuration object remains unmodified. +// +// Values must be of the same type as the named option field, or have another means of converting the value. +// +// Numeric types will be converted to the absolute type using Go's default conversion mechanisms. +// Strings and Stringer types will be parsed for numeric values if possible. +// TextUnmarshaler types will attempt to unmarshal string values. +// +// Mismatched types or failure to convert the value will result in an error. +// +// An element value that is a Validator will be validated and any encounted error returned. +// +// When a new element is being created if the element type is a Initer, then the zero value of the +// element will first have defaults set before the overrides are applied. +// +// The underlying configuration object is not modified, but rather a copy is returned via the Element type. +func OverrideConfig(config interface{}, os []Override) (map[string]Section, error) { + // First make a copy into which we can apply the updates. + copy, err := copystructure.Copy(config) + if err != nil { + return nil, errors.Wrap(err, "failed to copy configuration object") + } + + // Apply all overrides to the same copy + for _, o := range os { + // We do not need to keep a reference to the section since we are going to walk the entire copy next + _, err := applyOverride(copy, o) + if err != nil { + return nil, errors.Wrapf(err, "failed to override configuration %s/%s", o.Section, o.Element) + } + } + + // Walk the copy to return all sections + walker := newSectionWalker() + if err := reflectwalk.Walk(copy, walker); err != nil { + return nil, errors.Wrap(err, "failed to read sections from configuration object") + } + + return walker.sectionsMap(), nil +} + +// applyOverride applies the given override to the specified object. +func applyOverride(object interface{}, o Override) (Element, error) { + if err := o.Validate(); err != nil { + return Element{}, errors.Wrap(err, "invalid override") + } + walker := newOverrideWalker(o) + + // walk the copy and apply the updates + if err := reflectwalk.Walk(object, walker); err != nil { + return Element{}, err + } + unused := walker.unused() + if len(unused) > 0 { + return Element{}, fmt.Errorf("unknown options %v in section %s", unused, o.Section) + } + // Return the modified copy + element := walker.elementObject() + if element.value == nil && !o.Delete { + return Element{}, fmt.Errorf("unknown section %s", o.Section) + } + // Validate new value + if v, ok := element.value.(Validator); ok { + if err := v.Validate(); err != nil { + return Element{}, errors.Wrap(err, "failed validation") + } + } + return element, nil +} + +// overrideWalker applies the changes onto the walked value. +type overrideWalker struct { + depthWalker + + o Override + + used map[string]bool + elementValue reflect.Value + currentSectionName string + currentElementName string + currentSlice reflect.Value + elementKey string +} + +func newOverrideWalker(o Override) *overrideWalker { + return &overrideWalker{ + o: o, + used: make(map[string]bool, len(o.Options)), + } +} + +func (w *overrideWalker) unused() []string { + unused := make([]string, 0, len(w.o.Options)) + for name := range w.o.Options { + if !w.used[name] { + unused = append(unused, name) + } + } + return unused +} + +func (w *overrideWalker) elementObject() Element { + if w.elementValue.IsValid() { + return Element{ + value: w.elementValue.Interface(), + element: w.o.Element, + } + } + return Element{} +} + +func (w *overrideWalker) Struct(reflect.Value) error { + return nil +} + +func (w *overrideWalker) StructField(f reflect.StructField, v reflect.Value) error { + switch w.depth { + // Section level + case 0: + name, ok := getSectionName(f) + if ok { + // Only override the section if a struct tag was present + w.currentSectionName = name + if w.o.Section == w.currentSectionName { + w.elementValue = v + w.elementKey = getElementKey(f) + } + } else { + w.currentSectionName = "" + } + // Option level + case 1: + // Skip this field if its not for the section/element we care about + if w.currentSectionName != w.o.Section || w.currentElementName != w.o.Element { + break + } + + name := fieldName(f) + setValue, ok := w.o.Options[name] + if !ok { + name = strings.ToLower(name) + setValue, ok = w.o.Options[name] + } + if ok { + if !w.o.Create && name == w.elementKey { + return fmt.Errorf("cannot override element key %s", name) + } + if err := weakCopyValue(v, reflect.ValueOf(setValue)); err != nil { + return errors.Wrapf(err, "cannot set option %s", name) + } + w.used[name] = true + } + } + return nil +} + +// Initer set defaults on the receiving object. +// If a type is a Initer and a new value needs to be created of that type, +// then Init() is called on a new instance of that type. +type Initer interface { + Init() +} + +var initerType = reflect.TypeOf((*Initer)(nil)).Elem() + +func (w *overrideWalker) Slice(v reflect.Value) error { + if w.o.Section != w.currentSectionName || w.depth != 1 { + return nil + } + w.currentSlice = v + switch { + case w.o.Delete: + // Explictly set the section value to the zero value + w.elementValue = reflect.Value{} + case w.o.Create: + // Create a new element in the slice + var n reflect.Value + et := v.Type().Elem() + if et.Kind() == reflect.Ptr { + n = reflect.New(et.Elem()) + } else { + n = reflect.New(et) + } + // If the type is a initer, call Default + if n.Type().Implements(initerType) { + n.Interface().(Initer).Init() + } + // Indirect the value if we didn't want a pointer + if et.Kind() != reflect.Ptr { + n = reflect.Indirect(n) + } + v.Set(reflect.Append(v, n)) + // Set element key + if w.elementKey == "" { + return fmt.Errorf("cannot create new element, no element key found. An element key must be specified via the `%s:\",%s\"` struct tag", structTagKey, elementKeyword) + } + // Get the value that is now part of the slice. + n = v.Index(v.Len() - 1) + elementField := findFieldByElementKey(n, w.elementKey) + if !elementField.IsValid() { + return fmt.Errorf("could not find field with the name of the element key %q", w.elementKey) + } + if elementField.Kind() != reflect.String { + return fmt.Errorf("element key field must be of type string, got %s", elementField.Type()) + } + setValue, ok := w.o.Options[w.elementKey] + if !ok { + return fmt.Errorf("element key %q not present in options", w.elementKey) + } + str, ok := setValue.(string) + if !ok { + return fmt.Errorf("type of element key must be a string, got %T ", setValue) + } + w.o.Element = str + w.used[w.elementKey] = true + if err := weakCopyValue(elementField, reflect.ValueOf(setValue)); err != nil { + return errors.Wrapf(err, "cannot set element key %q on new element", w.elementKey) + } + default: + // We are modifying an existing slice element. + // Nothing to do here. + } + return nil +} + +func (w *overrideWalker) SliceElem(idx int, v reflect.Value) error { + if w.depth == 1 && w.currentSectionName == w.o.Section && w.o.Element != "" { + w.currentElementName = "" + if w.elementKey == "" { + return fmt.Errorf("an element key must be specified via the `%s:\",%s\"` struct tag", structTagKey, elementKeyword) + } + // Get current element name via field on current value + elementField := findFieldByElementKey(v, w.elementKey) + if !elementField.IsValid() { + return fmt.Errorf("could not find field with name %q on value of type %s", w.elementKey, v.Type()) + } + if elementField.Kind() != reflect.String { + return fmt.Errorf("element key field must be of type string, got %s", elementField.Type()) + } + w.currentElementName = elementField.String() + if w.o.Element == w.currentElementName { + if w.o.Delete { + // Delete the element from the slice by re-slicing the element out + w.currentSlice.Set( + reflect.AppendSlice( + w.currentSlice.Slice(0, idx), + w.currentSlice.Slice(idx+1, w.currentSlice.Len()), + ), + ) + } else { + w.elementValue = v + } + } + } + return nil +} + +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +// weakCopyValue copies the value of dst into src, where numeric and interface types are copied weakly. +func weakCopyValue(dst, src reflect.Value) (err error) { + defer func() { + // This shouldn't be necessary but it better to catch a panic here, + // where we can provide context instead of crashing the server or letting it bouble up. + if r := recover(); r != nil { + if e, ok := r.(error); ok { + err = e + } else { + err = fmt.Errorf("%v", r) + } + } + }() + if !dst.CanSet() { + return errors.New("not settable") + } + if src.Kind() == reflect.Interface { + src = src.Elem() + } + srcK := src.Kind() + dstK := dst.Kind() + + // Get addressable value since it may implement + // the TextUnmarshaler interface + var addrDst reflect.Value + if d := reflect.Indirect(dst); d.CanAddr() { + addrDst = d.Addr() + } + + if srcK == dstK { + if dst.Type() == src.Type() { + // Perform normal copy + dst.Set(src) + } else { + // Perform recursive copy into elements + switch dstK { + case reflect.Map: + // Make new map + dst.Set(reflect.MakeMap(dst.Type())) + for _, key := range src.MapKeys() { + value := reflect.Indirect(reflect.New(dst.Type().Elem())) + if err := weakCopyValue(value, src.MapIndex(key)); err != nil { + return errors.Wrap(err, "failed to copy map value") + } + dst.SetMapIndex(key, value) + } + case reflect.Slice: + // Make new slice + dst.Set(reflect.MakeSlice(dst.Type(), src.Len(), src.Len())) + for i := 0; i < src.Len(); i++ { + value := reflect.Indirect(reflect.New(dst.Type().Elem())) + if err := weakCopyValue(value, src.Index(i)); err != nil { + return errors.Wrap(err, "failed to copy slice value") + } + dst.Index(i).Set(value) + } + default: + return fmt.Errorf("cannot copy mismatched types got %s exp %s", src.Type().String(), dst.Type().String()) + } + } + } else if addrDst.Type().Implements(textUnmarshalerType) { + um := addrDst.Interface().(encoding.TextUnmarshaler) + var text []byte + if src.Type().Implements(stringerType) || srcK == reflect.String { + text = []byte(src.String()) + } else { + return fmt.Errorf("cannot unmarshal %s into %s", srcK, dstK) + } + if err := um.UnmarshalText(text); err != nil { + errors.Wrap(err, "failed to unmarshal text") + } + } else if isNumericKind(dstK) { + // Perform weak numeric copy + if isNumericKind(srcK) { + dst.Set(src.Convert(dst.Type())) + return nil + } else { + var str string + if src.Type().Implements(stringerType) || srcK == reflect.String { + str = src.String() + } else { + return fmt.Errorf("cannot convert %s into %s", srcK, dstK) + } + switch { + case isIntKind(dstK): + if i, err := strconv.ParseInt(str, 10, 64); err == nil { + dst.SetInt(i) + return nil + } + case isUintKind(dstK): + if i, err := strconv.ParseUint(str, 10, 64); err == nil { + dst.SetUint(i) + return nil + } + case isFloatKind(dstK): + if f, err := strconv.ParseFloat(str, 64); err == nil { + dst.SetFloat(f) + return nil + } + } + return fmt.Errorf("cannot convert string %q into %s", str, dstK) + } + } else { + return fmt.Errorf("wrong kind %s, expected value of kind %s: %t", srcK, dstK, srcK == dstK) + } + return nil +} + +// Stringer is a type that can provide a string value of itself. +// If a value is a Stringer and needs to be copied into a numeric value, +// then String() is called and parsed as a numeric value if possible. +type Stringer interface { + String() string +} + +var stringerType = reflect.TypeOf((*Stringer)(nil)).Elem() + +func isNumericKind(k reflect.Kind) bool { + // Ignoring complex kinds since we cannot convert them + return k >= reflect.Int && k <= reflect.Float64 +} +func isIntKind(k reflect.Kind) bool { + return k >= reflect.Int && k <= reflect.Int64 +} +func isUintKind(k reflect.Kind) bool { + return k >= reflect.Uint && k <= reflect.Uint64 +} +func isFloatKind(k reflect.Kind) bool { + return k == reflect.Float32 || k == reflect.Float64 +} + +// Element provides access to the underlying value or a map of redacted values. +type Element struct { + value interface{} + element string +} + +// ElementID returns the value of the field specified by the element key. +// It is unique for all elements within a Section. +func (e Element) ElementID() string { + return e.element +} + +// Value returns the underlying value of the configuration element. +func (e Element) Value() interface{} { + return e.value +} + +// Redacted returns the options for the element in a map. +// Any fields with the `override:",redact"` tag set will be replaced +// with a boolean value indicating whether a non-zero value was set. +func (e Element) Redacted() (map[string]interface{}, error) { + walker := newRedactWalker() + // walk the section and collect redacted options + if err := reflectwalk.Walk(e.value, walker); err != nil { + return nil, errors.Wrap(err, "failed to redact section") + } + return walker.optionsMap(), nil +} + +// getElementKey returns the name of the field taht is used to uniquely identify elements of a list. +func getElementKey(f reflect.StructField) string { + parts := strings.Split(f.Tag.Get(structTagKey), ",") + if len(parts) > 1 { + for _, p := range parts[1:] { + if strings.HasPrefix(p, elementKeyword) { + return strings.TrimPrefix(p, elementKeyword) + } + } + } + return "" +} + +func findFieldByElementKey(v reflect.Value, elementKey string) (field reflect.Value) { + v = reflect.Indirect(v) + if v.Kind() != reflect.Struct { + return + } + field = v.FieldByName(elementKey) + if field.IsValid() { + return + } + + t := v.Type() + for i := 0; i < t.NumField(); i++ { + field = v.Field(i) + // Skip any unexported fields + if !field.CanSet() { + continue + } + name := fieldName(t.Field(i)) + if name == elementKey { + return + } + } + return +} + +// redactWalker reads the the sections from the walked values and redacts and sensitive fields. +type redactWalker struct { + depthWalker + options map[string]interface{} +} + +func newRedactWalker() *redactWalker { + return &redactWalker{ + options: make(map[string]interface{}), + } +} + +func (w *redactWalker) optionsMap() map[string]interface{} { + return w.options +} + +func (w *redactWalker) Struct(reflect.Value) error { + return nil +} + +func (w *redactWalker) StructField(f reflect.StructField, v reflect.Value) error { + switch w.depth { + // Top level + case 0: + name := fieldName(f) + w.options[name] = getRedactedValue(f, v) + // Ignore all other levels + default: + } + return nil +} + +func getRedactedValue(f reflect.StructField, v reflect.Value) interface{} { + if isRedacted(f) { + return !isZero(v) + } else { + return v.Interface() + } +} + +func isRedacted(f reflect.StructField) bool { + parts := strings.Split(f.Tag.Get(structTagKey), ",") + if len(parts) > 1 { + for _, p := range parts[1:] { + if p == redactKeyword { + return true + } + } + } + return false +} + +// isZero returns whether if v is equal to the zero value of its type. +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Func, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + // Check arrays linearly since its element type may not be comparable. + z := true + for i := 0; i < v.Len() && z; i++ { + z = z && isZero(v.Index(i)) + } + return z + case reflect.Struct: + // Check structs recusively since not all of its field may be comparable + z := true + for i := 0; i < v.NumField() && z; i++ { + if f := v.Field(i); f.CanSet() { + z = z && isZero(f) + } + } + return z + default: + // Compare other types directly: + z := reflect.Zero(v.Type()) + return v.Interface() == z.Interface() + } +} + +// Section is a list of Elements. +// Elements are sorted by their element ID. +type Section []Element + +func (s Section) Len() int { return len(s) } +func (s Section) Less(i, j int) bool { return s[i].ElementID() < s[j].ElementID() } +func (s Section) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sectionWalker reads the sections from the walked values and redacts any sensitive fields. +type sectionWalker struct { + depthWalker + sections map[string]Section + currentSectionName string + elementKeys map[string]string +} + +func newSectionWalker() *sectionWalker { + return §ionWalker{ + sections: make(map[string]Section), + elementKeys: make(map[string]string), + } +} + +func (w *sectionWalker) sectionsMap() map[string]Section { + for _, sectionList := range w.sections { + sort.Sort(sectionList) + } + return w.sections +} + +func (w *sectionWalker) elementKeysMap() map[string]string { + return w.elementKeys +} + +func (w *sectionWalker) Struct(reflect.Value) error { + return nil +} + +func (w *sectionWalker) StructField(f reflect.StructField, v reflect.Value) error { + switch w.depth { + // Section level + case 0: + name, ok := getSectionName(f) + if ok { + w.currentSectionName = name + elementKey := getElementKey(f) + w.elementKeys[name] = elementKey + if k := reflect.Indirect(v).Kind(); k == reflect.Struct { + w.sections[name] = Section{{ + value: v.Interface(), + }} + } + } else { + w.currentSectionName = "" + } + // Skip all other levels + default: + } + return nil +} + +func (w *sectionWalker) Slice(reflect.Value) error { + return nil +} + +func (w *sectionWalker) SliceElem(idx int, v reflect.Value) error { + // Skip sections that we are not interested in + if w.currentSectionName == "" { + return nil + } + switch w.depth { + //Option level + case 1: + // Get element value from object + var element string + elementKey, ok := w.elementKeys[w.currentSectionName] + if !ok { + return fmt.Errorf("no element key found for section %q, %v", w.currentSectionName, v) + } + elementField := findFieldByElementKey(v, elementKey) + if elementField.IsValid() { + if elementField.Kind() != reflect.String { + return fmt.Errorf("element key field must be of type string, got %s", elementField.Type()) + } + element = elementField.String() + } else { + return fmt.Errorf("could not find field with the name of the element key %q on element object", elementKey) + } + w.sections[w.currentSectionName] = append(w.sections[w.currentSectionName], Element{ + value: v.Interface(), + element: element, + }) + // Skip all other levels + default: + } + return nil +} + +// getSectionName returns the name of the section based off its `override` struct tag. +// If no tag is present the Go field name is returned and the second return value is false. +func getSectionName(f reflect.StructField) (string, bool) { + parts := strings.Split(f.Tag.Get(structTagKey), ",") + if parts[0] != "" { + return parts[0], true + } + return f.Name, false +} + +// depthWalker keeps track of the current depth count into nested structs. +type depthWalker struct { + depth int +} + +func (w *depthWalker) Enter(l reflectwalk.Location) error { + if l == reflectwalk.StructField { + w.depth++ + } + return nil +} + +func (w *depthWalker) Exit(l reflectwalk.Location) error { + if l == reflectwalk.StructField { + w.depth-- + } + return nil +} + +// fieldName returns the name of a field based on the value of the `override` struct tag. +// If no override struct tag is found the field name is returned. +func fieldName(f reflect.StructField) (name string) { + parts := strings.Split(f.Tag.Get(structTagKey), ",") + name = parts[0] + if name == "" { + name = f.Name + } + return +} diff --git a/services/config/override/override_internal_test.go b/services/config/override/override_internal_test.go new file mode 100644 index 000000000..69d15810e --- /dev/null +++ b/services/config/override/override_internal_test.go @@ -0,0 +1,49 @@ +package override + +import ( + "reflect" + "testing" +) + +func TestGetSectionName(t *testing.T) { + testCases := []struct { + f reflect.StructField + expName string + expOK bool + }{ + { + f: reflect.StructField{ + Name: "FieldName", + Tag: `override:"name"`, + }, + expName: "name", + expOK: true, + }, + { + f: reflect.StructField{ + Name: "FieldName", + }, + expName: "FieldName", + expOK: false, + }, + { + f: reflect.StructField{ + Name: "FieldName", + Tag: `override:"name,element-key=id"`, + }, + expName: "name", + expOK: true, + }, + } + + for _, tc := range testCases { + t.Log(tc.f) + name, ok := getSectionName(tc.f) + if got, exp := name, tc.expName; got != exp { + t.Errorf("unexpected name got %q exp %q", got, exp) + } + if got, exp := ok, tc.expOK; got != exp { + t.Errorf("unexpected ok got %t exp %t", got, exp) + } + } +} diff --git a/services/config/override/override_test.go b/services/config/override/override_test.go new file mode 100644 index 000000000..277123a1d --- /dev/null +++ b/services/config/override/override_test.go @@ -0,0 +1,1609 @@ +package override_test + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + "github.com/influxdata/kapacitor/services/config/override" + "github.com/mitchellh/copystructure" +) + +type SectionA struct { + Option1 string `override:"option1"` + Option2 string `override:"option2"` +} +type SectionB struct { + Option3 string `override:"option3"` +} +type SectionC struct { + Option4 int64 `override:"option4"` + Password string `override:"password,redact"` +} + +type SectionD struct { + ID string `override:"id"` + Option5 string `override:"option5"` + Option6 map[string]map[string]int `override:"option6"` + Option7 [][]int `override:"option7"` +} + +func (d *SectionD) Init() { + d.Option5 = "o5" +} + +func (d SectionD) Validate() error { + if d.ID == "" { + return fmt.Errorf("ID cannot be empty") + } + return nil +} + +type SectionIgnored struct { + String string +} + +type SectionNums struct { + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + + Uint uint + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + + Float32 float32 + Float64 float64 +} + +type TestConfig struct { + SectionA SectionA `override:"section-a"` + SectionB SectionB `override:"section-b"` + SectionC *SectionC `override:"section-c"` + SectionNums SectionNums `override:"section-nums"` + SectionDs []SectionD `override:"section-d,element-key=id"` + SectionIgnored SectionIgnored + IgnoredInt int + IgnoredString string +} + +func ExampleOverrideConfig() { + config := &TestConfig{ + SectionA: SectionA{ + Option1: "o1", + }, + SectionB: SectionB{ + Option3: "o2", + }, + SectionC: &SectionC{ + Option4: -1, + }, + SectionDs: []SectionD{ + { + ID: "x", + Option5: "x-5", + }, + { + ID: "y", + Option5: "y-5", + }, + { + ID: "z", + Option5: "z-5", + }, + }, + } + + // Override options in section-a + if newConfig, err := override.OverrideConfig(config, []override.Override{ + { + Section: "section-a", + Options: map[string]interface{}{ + "option1": "new option1 value", + "option2": "initial option2 value", + }, + }, + { + Section: "section-b", + Options: map[string]interface{}{ + "option3": "initial option3 value", + }, + }, + { + Section: "section-c", + Options: map[string]interface{}{ + "option4": 586, + }, + }, + { + Section: "section-d", + Element: "x", + Options: map[string]interface{}{ + "option5": "x-new-5", + }, + }, + { + Section: "section-d", + Element: "y", + Options: map[string]interface{}{ + "option5": "y-new-5", + }, + }, + { + Section: "section-d", + Create: true, + Options: map[string]interface{}{ + "id": "w", + "option5": "w-new-5", + }, + }, + }); err != nil { + fmt.Println("ERROR:", err) + } else { + a := newConfig["section-a"][0].Value().(SectionA) + fmt.Println("New SectionA.Option1:", a.Option1) + fmt.Println("New SectionA.Option2:", a.Option2) + + b := newConfig["section-b"][0].Value().(SectionB) + fmt.Println("New SectionB.Option3:", b.Option3) + + c := newConfig["section-c"][0].Value().(*SectionC) + fmt.Println("New SectionC.Option4:", c.Option4) + + // NOTE: Section elements are sorted by element key + d := newConfig["section-d"] + d0 := d[0].Value().(SectionD) + d1 := d[1].Value().(SectionD) + d2 := d[2].Value().(SectionD) + d3 := d[3].Value().(SectionD) + + fmt.Println("New SectionD[0].Option5:", d0.Option5) + fmt.Println("New SectionD[1].Option5:", d1.Option5) + fmt.Println("New SectionD[2].Option5:", d2.Option5) + fmt.Println("Old SectionD[3].Option5:", d3.Option5) + } + + //Output: + // New SectionA.Option1: new option1 value + // New SectionA.Option2: initial option2 value + // New SectionB.Option3: initial option3 value + // New SectionC.Option4: 586 + // New SectionD[0].Option5: w-new-5 + // New SectionD[1].Option5: x-new-5 + // New SectionD[2].Option5: y-new-5 + // Old SectionD[3].Option5: z-5 +} + +func TestOverrideConfig_Single(t *testing.T) { + testConfig := &TestConfig{ + SectionA: SectionA{ + Option1: "o1", + }, + SectionC: &SectionC{ + Option4: -1, + }, + SectionDs: []SectionD{ + { + ID: "x", + Option5: "x-5", + }, + { + ID: "y", + Option5: "y-5", + }, + { + ID: "z", + Option5: "z-5", + }, + }, + } + copy, err := copystructure.Copy(testConfig) + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + o override.Override + exp interface{} + redacted map[string]interface{} + }{ + { + o: override.Override{ + Section: "section-a", + Options: map[string]interface{}{ + "option1": "new-o1", + }, + }, + exp: SectionA{ + Option1: "new-o1", + }, + redacted: map[string]interface{}{ + "option1": "new-o1", + "option2": "", + }, + }, + { + o: override.Override{ + Section: "section-a", + Options: map[string]interface{}{ + "option1": "new-o1", + }, + }, + exp: SectionA{ + Option1: "new-o1", + }, + redacted: map[string]interface{}{ + "option1": "new-o1", + "option2": "", + }, + }, + { + o: override.Override{ + Section: "section-c", + Options: map[string]interface{}{ + "option4": 42, + }, + }, + exp: &SectionC{ + Option4: 42, + }, + redacted: map[string]interface{}{ + "option4": int64(42), + "password": false, + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": int(42), + "Int8": int(42), + "Int16": int(42), + "Int32": int(42), + "Int64": int(42), + "Uint": int(42), + "Uint8": int(42), + "Uint16": int(42), + "Uint32": int(42), + "Uint64": int(42), + "Float32": int(42), + "Float64": int(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": int(42), + "Int8": int(42), + "Int16": int(42), + "Int32": int(42), + "Int64": int(42), + "Uint": int(42), + "Uint8": int(42), + "Uint16": int(42), + "Uint32": int(42), + "Uint64": int(42), + "Float32": int(42), + "Float64": int(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": int8(42), + "Int8": int8(42), + "Int16": int8(42), + "Int32": int8(42), + "Int64": int8(42), + "Uint": int8(42), + "Uint8": int8(42), + "Uint16": int8(42), + "Uint32": int8(42), + "Uint64": int8(42), + "Float32": int8(42), + "Float64": int8(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": int16(42), + "Int8": int16(42), + "Int16": int16(42), + "Int32": int16(42), + "Int64": int16(42), + "Uint": int16(42), + "Uint8": int16(42), + "Uint16": int16(42), + "Uint32": int16(42), + "Uint64": int16(42), + "Float32": int16(42), + "Float64": int16(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": int32(42), + "Int8": int32(42), + "Int16": int32(42), + "Int32": int32(42), + "Int64": int32(42), + "Uint": int32(42), + "Uint8": int32(42), + "Uint16": int32(42), + "Uint32": int32(42), + "Uint64": int32(42), + "Float32": int32(42), + "Float64": int32(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": int64(42), + "Int8": int64(42), + "Int16": int64(42), + "Int32": int64(42), + "Int64": int64(42), + "Uint": int64(42), + "Uint8": int64(42), + "Uint16": int64(42), + "Uint32": int64(42), + "Uint64": int64(42), + "Float32": int64(42), + "Float64": int64(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": uint(42), + "Int8": uint(42), + "Int16": uint(42), + "Int32": uint(42), + "Int64": uint(42), + "Uint": uint(42), + "Uint8": uint(42), + "Uint16": uint(42), + "Uint32": uint(42), + "Uint64": uint(42), + "Float32": uint(42), + "Float64": uint(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": uint8(42), + "Int8": uint8(42), + "Int16": uint8(42), + "Int32": uint8(42), + "Int64": uint8(42), + "Uint": uint8(42), + "Uint8": uint8(42), + "Uint16": uint8(42), + "Uint32": uint8(42), + "Uint64": uint8(42), + "Float32": uint8(42), + "Float64": uint8(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": uint16(42), + "Int8": uint16(42), + "Int16": uint16(42), + "Int32": uint16(42), + "Int64": uint16(42), + "Uint": uint16(42), + "Uint8": uint16(42), + "Uint16": uint16(42), + "Uint32": uint16(42), + "Uint64": uint16(42), + "Float32": uint16(42), + "Float64": uint16(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": uint32(42), + "Int8": uint32(42), + "Int16": uint32(42), + "Int32": uint32(42), + "Int64": uint32(42), + "Uint": uint32(42), + "Uint8": uint32(42), + "Uint16": uint32(42), + "Uint32": uint32(42), + "Uint64": uint32(42), + "Float32": uint32(42), + "Float64": uint32(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": uint64(42), + "Int8": uint64(42), + "Int16": uint64(42), + "Int32": uint64(42), + "Int64": uint64(42), + "Uint": uint64(42), + "Uint8": uint64(42), + "Uint16": uint64(42), + "Uint32": uint64(42), + "Uint64": uint64(42), + "Float32": uint64(42), + "Float64": uint64(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": float32(42), + "Int8": float32(42), + "Int16": float32(42), + "Int32": float32(42), + "Int64": float32(42), + "Uint": float32(42), + "Uint8": float32(42), + "Uint16": float32(42), + "Uint32": float32(42), + "Uint64": float32(42), + "Float32": float32(42), + "Float64": float32(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": float64(42), + "Int8": float64(42), + "Int16": float64(42), + "Int32": float64(42), + "Int64": float64(42), + "Uint": float64(42), + "Uint8": float64(42), + "Uint16": float64(42), + "Uint32": float64(42), + "Uint64": float64(42), + "Float32": float64(42), + "Float64": float64(42), + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-nums", + Options: map[string]interface{}{ + "Int": "42", + "Int8": "42", + "Int16": "42", + "Int32": "42", + "Int64": "42", + "Uint": "42", + "Uint8": "42", + "Uint16": "42", + "Uint32": "42", + "Uint64": "42", + "Float32": "42", + "Float64": "42", + }, + }, + exp: SectionNums{ + Int: int(42), + Int8: int8(42), + Int16: int16(42), + Int32: int32(42), + Int64: int64(42), + Uint: uint(42), + Uint8: uint8(42), + Uint16: uint16(42), + Uint32: uint32(42), + Uint64: uint64(42), + Float32: float32(42), + Float64: float64(42), + }, + redacted: map[string]interface{}{ + "Int": int(42), + "Int8": int8(42), + "Int16": int16(42), + "Int32": int32(42), + "Int64": int64(42), + "Uint": uint(42), + "Uint8": uint8(42), + "Uint16": uint16(42), + "Uint32": uint32(42), + "Uint64": uint64(42), + "Float32": float32(42), + "Float64": float64(42), + }, + }, + { + o: override.Override{ + Section: "section-c", + Options: map[string]interface{}{ + "option4": 42, + "password": "supersecret", + }, + }, + exp: &SectionC{ + Option4: int64(42), + Password: "supersecret", + }, + redacted: map[string]interface{}{ + "option4": int64(42), + "password": true, + }, + }, + { + o: override.Override{ + Section: "section-d", + Element: "x", + Options: map[string]interface{}{ + "option5": "x-new-5", + }, + }, + exp: SectionD{ + ID: "x", + Option5: "x-new-5", + }, + redacted: map[string]interface{}{ + "id": "x", + "option5": "x-new-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + }, + { + o: override.Override{ + Section: "section-d", + Element: "x", + Options: map[string]interface{}{ + "option6": map[string]interface{}{"a": map[string]interface{}{"b": 42}}, + }, + }, + exp: SectionD{ + ID: "x", + Option5: "x-5", + Option6: map[string]map[string]int{"a": {"b": 42}}, + }, + redacted: map[string]interface{}{ + "id": "x", + "option5": "x-5", + "option6": map[string]map[string]int{"a": {"b": 42}}, + "option7": [][]int(nil), + }, + }, + { + o: override.Override{ + Section: "section-d", + Element: "x", + Options: map[string]interface{}{ + "option7": []interface{}{[]interface{}{6, 7, 42}, []interface{}{6, 9, 42}}, + }, + }, + exp: SectionD{ + ID: "x", + Option5: "x-5", + Option7: [][]int{{6, 7, 42}, {6, 9, 42}}, + }, + redacted: map[string]interface{}{ + "id": "x", + "option5": "x-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int{{6, 7, 42}, {6, 9, 42}}, + }, + }, + { + // Test that a Stringer can convert into a number + o: override.Override{ + Section: "section-d", + Element: "x", + Options: map[string]interface{}{ + "option7": []interface{}{ + []interface{}{ + json.Number("6"), + json.Number("7"), + json.Number("42"), + }, + []interface{}{ + json.Number("6"), + json.Number("9"), + json.Number("42"), + }}, + }, + }, + exp: SectionD{ + ID: "x", + Option5: "x-5", + Option7: [][]int{{6, 7, 42}, {6, 9, 42}}, + }, + redacted: map[string]interface{}{ + "id": "x", + "option5": "x-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int{{6, 7, 42}, {6, 9, 42}}, + }, + }, + } + for _, tc := range testCases { + if newConfig, err := override.OverrideConfig(testConfig, []override.Override{tc.o}); err != nil { + t.Fatal(err) + } else { + element := newConfig[tc.o.Section][0] + // Validate value + if got := element.Value(); !reflect.DeepEqual(got, tc.exp) { + t.Errorf("unexpected newConfig.Value result:\ngot\n%#v\nexp\n%#v\n", got, tc.exp) + } + // Validate redacted + if got, err := element.Redacted(); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(got, tc.redacted) { + t.Errorf("unexpected newConfig.Redacted result:\ngot\n%#v\nexp\n%#v\n", got, tc.redacted) + } + } + // Validate original not modified + if !reflect.DeepEqual(testConfig, copy) { + t.Errorf("original configuration object was modified. got %v exp %v", testConfig, copy) + } + } +} + +func TestOverrideConfig_Multiple(t *testing.T) { + testConfig := &TestConfig{ + SectionA: SectionA{ + Option1: "o1", + }, + SectionC: &SectionC{ + Option4: -1, + }, + SectionDs: []SectionD{ + { + ID: "x", + Option5: "x-5", + }, + { + ID: "y", + Option5: "y-5", + }, + { + ID: "z", + Option5: "z-5", + }, + }, + } + copy, err := copystructure.Copy(testConfig) + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + name string + os []override.Override + exp map[string][]map[string]interface{} + }{ + { + name: "leave section-c default", + os: []override.Override{ + { + Section: "section-a", + Options: map[string]interface{}{ + "option1": "new-1", + }, + }, + { + Section: "section-b", + Options: map[string]interface{}{ + "option3": "new-3", + }, + }, + { + Section: "section-d", + Element: "y", + Options: map[string]interface{}{ + "option5": "y-new-5", + }, + }, + { + Section: "section-d", + Element: "x", + Options: map[string]interface{}{ + "option5": "x-new-5", + }, + }, + }, + exp: map[string][]map[string]interface{}{ + "section-a": {{ + "option1": "new-1", + "option2": "", + }}, + "section-b": {{ + "option3": "new-3", + }}, + "section-c": {{ + "option4": int64(-1), + "password": false, + }}, + "section-nums": {{ + "Int": int(0), + "Int8": int8(0), + "Int16": int16(0), + "Int32": int32(0), + "Int64": int64(0), + "Uint": uint(0), + "Uint8": uint8(0), + "Uint16": uint16(0), + "Uint32": uint32(0), + "Uint64": uint64(0), + "Float32": float32(0), + "Float64": float64(0), + }}, + "section-d": { + { + "id": "x", + "option5": "x-new-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + { + "id": "y", + "option5": "y-new-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + { + "id": "z", + "option5": "z-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + }, + }, + }, + { + name: "override section-c password create new section-d element", + os: []override.Override{ + { + Section: "section-c", + Options: map[string]interface{}{ + "password": "secret", + }, + }, + { + Section: "section-d", + Create: true, + Options: map[string]interface{}{ + "id": "w", + "option5": "w-new-5", + }, + }, + }, + exp: map[string][]map[string]interface{}{ + "section-a": {{ + "option1": "o1", + "option2": "", + }}, + "section-b": {{ + "option3": "", + }}, + "section-c": {{ + "option4": int64(-1), + "password": true, + }}, + "section-nums": {{ + "Int": int(0), + "Int8": int8(0), + "Int16": int16(0), + "Int32": int32(0), + "Int64": int64(0), + "Uint": uint(0), + "Uint8": uint8(0), + "Uint16": uint16(0), + "Uint32": uint32(0), + "Uint64": uint64(0), + "Float32": float32(0), + "Float64": float64(0), + }}, + "section-d": { + { + "id": "w", + "option5": "w-new-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + { + "id": "x", + "option5": "x-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + { + "id": "y", + "option5": "y-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + { + "id": "z", + "option5": "z-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + }, + }, + }, + { + name: "delete element from section-d", + os: []override.Override{ + { + Section: "section-d", + Element: "y", + Delete: true, + }, + }, + exp: map[string][]map[string]interface{}{ + "section-a": {{ + "option1": "o1", + "option2": "", + }}, + "section-b": {{ + "option3": "", + }}, + "section-c": {{ + "option4": int64(-1), + "password": false, + }}, + "section-nums": {{ + "Int": int(0), + "Int8": int8(0), + "Int16": int16(0), + "Int32": int32(0), + "Int64": int64(0), + "Uint": uint(0), + "Uint8": uint8(0), + "Uint16": uint16(0), + "Uint32": uint32(0), + "Uint64": uint64(0), + "Float32": float32(0), + "Float64": float64(0), + }}, + "section-d": { + { + "id": "x", + "option5": "x-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + { + "id": "z", + "option5": "z-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + }, + }, + }, + { + name: "create element in section-d, delete y from section-d", + os: []override.Override{ + { + Section: "section-d", + Create: true, + Options: map[string]interface{}{ + "id": "w", + "option5": "w-new-5", + }, + }, + { + Section: "section-d", + Element: "y", + Delete: true, + }, + }, + exp: map[string][]map[string]interface{}{ + "section-a": {{ + "option1": "o1", + "option2": "", + }}, + "section-b": {{ + "option3": "", + }}, + "section-c": {{ + "option4": int64(-1), + "password": false, + }}, + "section-nums": {{ + "Int": int(0), + "Int8": int8(0), + "Int16": int16(0), + "Int32": int32(0), + "Int64": int64(0), + "Uint": uint(0), + "Uint8": uint8(0), + "Uint16": uint16(0), + "Uint32": uint32(0), + "Uint64": uint64(0), + "Float32": float32(0), + "Float64": float64(0), + }}, + "section-d": { + { + "id": "w", + "option5": "w-new-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + { + "id": "x", + "option5": "x-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + { + "id": "z", + "option5": "z-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + }, + }, + }, + { + name: "created element in section-d using defaults", + os: []override.Override{ + { + Section: "section-d", + Create: true, + Options: map[string]interface{}{ + "id": "w", + }, + }, + }, + exp: map[string][]map[string]interface{}{ + "section-a": {{ + "option1": "o1", + "option2": "", + }}, + "section-b": {{ + "option3": "", + }}, + "section-c": {{ + "option4": int64(-1), + "password": false, + }}, + "section-nums": {{ + "Int": int(0), + "Int8": int8(0), + "Int16": int16(0), + "Int32": int32(0), + "Int64": int64(0), + "Uint": uint(0), + "Uint8": uint8(0), + "Uint16": uint16(0), + "Uint32": uint32(0), + "Uint64": uint64(0), + "Float32": float32(0), + "Float64": float64(0), + }}, + "section-d": { + { + "id": "w", + "option5": "o5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + { + "id": "x", + "option5": "x-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + { + "id": "y", + "option5": "y-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + { + "id": "z", + "option5": "z-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + }, + }, + }, + { + name: "delete created element in section-d", + os: []override.Override{ + { + Section: "section-d", + Create: true, + Options: map[string]interface{}{ + "id": "w", + "option5": "w-new-5", + }, + }, + { + Section: "section-d", + Element: "y", + Delete: true, + }, + { + Section: "section-d", + Element: "w", + Delete: true, + }, + }, + exp: map[string][]map[string]interface{}{ + "section-a": {{ + "option1": "o1", + "option2": "", + }}, + "section-b": {{ + "option3": "", + }}, + "section-c": {{ + "option4": int64(-1), + "password": false, + }}, + "section-nums": {{ + "Int": int(0), + "Int8": int8(0), + "Int16": int16(0), + "Int32": int32(0), + "Int64": int64(0), + "Uint": uint(0), + "Uint8": uint8(0), + "Uint16": uint16(0), + "Uint32": uint32(0), + "Uint64": uint64(0), + "Float32": float32(0), + "Float64": float64(0), + }}, + "section-d": { + { + "id": "x", + "option5": "x-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + { + "id": "z", + "option5": "z-5", + "option6": map[string]map[string]int(nil), + "option7": [][]int(nil), + }, + }, + }, + }, + { + name: "delete all elements in section-d", + os: []override.Override{ + { + Section: "section-d", + Element: "x", + Delete: true, + }, + { + Section: "section-d", + Element: "y", + Delete: true, + }, + { + Section: "section-d", + Element: "z", + Delete: true, + }, + }, + exp: map[string][]map[string]interface{}{ + "section-a": {{ + "option1": "o1", + "option2": "", + }}, + "section-b": {{ + "option3": "", + }}, + "section-c": {{ + "option4": int64(-1), + "password": false, + }}, + "section-nums": {{ + "Int": int(0), + "Int8": int8(0), + "Int16": int16(0), + "Int32": int32(0), + "Int64": int64(0), + "Uint": uint(0), + "Uint8": uint8(0), + "Uint16": uint16(0), + "Uint32": uint32(0), + "Uint64": uint64(0), + "Float32": float32(0), + "Float64": float64(0), + }}, + }, + }, + } + + for _, tc := range testCases { + t.Log(tc.name) + if sections, err := override.OverrideConfig(testConfig, tc.os); err != nil { + t.Fatal(err) + } else { + // Validate sections + if got, exp := len(sections), len(tc.exp); got != exp { + t.Errorf("unexpected section count got %d exp %d", got, exp) + continue + } + for name, sectionList := range sections { + expSectionList, ok := tc.exp[name] + if !ok { + t.Errorf("extra section returned %s", name) + break + } + if got, exp := len(sectionList), len(expSectionList); got != exp { + t.Errorf("unexpected section list count got %v exp %v", sectionList, expSectionList) + break + } + for i, s := range sectionList { + redacted, err := s.Redacted() + if err != nil { + t.Fatal(err) + } + if got, exp := redacted, expSectionList[i]; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected sections result for %s element %d: \ngot\n%v\nexp\n%v\n", name, i, got, exp) + } + } + } + } + // Validate original not modified + if !reflect.DeepEqual(testConfig, copy) { + t.Errorf("original configuration object was modified. got %v exp %v", testConfig, copy) + } + } +} diff --git a/services/config/service.go b/services/config/service.go new file mode 100644 index 000000000..8638a5895 --- /dev/null +++ b/services/config/service.go @@ -0,0 +1,503 @@ +package config + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "path" + "regexp" + "strings" + + client "github.com/influxdata/kapacitor/client/v1" + "github.com/influxdata/kapacitor/services/config/override" + "github.com/influxdata/kapacitor/services/httpd" + "github.com/influxdata/kapacitor/services/storage" + "github.com/pkg/errors" +) + +const ( + configPath = "/config" + configPathAnchored = "/config/" + configBasePath = httpd.BasePath + configPathAnchored +) + +type ConfigUpdate struct { + Name string + NewConfig []interface{} + ErrC chan<- error +} + +type Service struct { + enabled bool + config interface{} + logger *log.Logger + updates chan<- ConfigUpdate + routes []httpd.Route + + // Cached map of section name to element key name + elementKeys map[string]string + + overrides OverrideDAO + + StorageService interface { + Store(namespace string) storage.Interface + } + HTTPDService interface { + AddRoutes([]httpd.Route) error + DelRoutes([]httpd.Route) + } +} + +func NewService(c Config, config interface{}, l *log.Logger, updates chan<- ConfigUpdate) *Service { + return &Service{ + enabled: c.Enabled, + config: config, + logger: l, + updates: updates, + } +} + +// The storage namespace for all configuration override data. +const configNamespace = "config_overrides" + +func (s *Service) Open() error { + store := s.StorageService.Store(configNamespace) + s.overrides = newOverrideKV(store) + + // Cache element keys + if elementKeys, err := override.ElementKeys(s.config); err != nil { + return errors.Wrap(err, "failed to determine the element keys") + } else { + s.elementKeys = elementKeys + } + + // Define API routes + s.routes = []httpd.Route{ + { + Name: "config", + Method: "GET", + Pattern: configPath, + HandlerFunc: s.handleGetConfig, + }, + { + Name: "config", + Method: "GET", + Pattern: configPathAnchored, + HandlerFunc: s.handleGetConfig, + }, + { + Name: "config", + Method: "POST", + Pattern: configPathAnchored, + HandlerFunc: s.handleUpdateSection, + }, + } + + err := s.HTTPDService.AddRoutes(s.routes) + return errors.Wrap(err, "failed to add API routes") +} + +func (s *Service) Close() error { + close(s.updates) + s.HTTPDService.DelRoutes(s.routes) + return nil +} + +type updateAction struct { + section string + element string + hasElement bool + + Set map[string]interface{} `json:"set"` + Delete []string `json:"delete"` + Add map[string]interface{} `json:"add"` + Remove []string `json:"remove"` +} + +func (ua updateAction) Validate() error { + if ua.section == "" { + return errors.New("must provide section name") + } + if !validSectionOrElement.MatchString(ua.section) { + return fmt.Errorf("invalid section name %q", ua.section) + } + if ua.element != "" && !validSectionOrElement.MatchString(ua.element) { + return fmt.Errorf("invalid element name %q", ua.element) + } + + sEmpty := len(ua.Set) == 0 + dEmpty := len(ua.Delete) == 0 + aEmpty := len(ua.Add) == 0 + rEmpty := len(ua.Remove) == 0 + + if (!sEmpty || !dEmpty) && !(aEmpty && rEmpty) { + return errors.New("cannot provide both set/delete and add/remove actions in the same update") + } + + if !aEmpty && ua.element != "" { + return errors.New("must not provide an element name when adding an a new override") + } + + if !rEmpty && ua.element != "" { + return errors.New("must not provide element when removing an override") + } + + if rEmpty && aEmpty && !ua.hasElement { + return errors.New("element not specified, are you missing a trailing '/'?") + } + return nil +} + +var validSectionOrElement = regexp.MustCompile(`^[-\w+]+$`) + +func sectionAndElementToID(section, element string) string { + id := path.Join(section, element) + if element == "" { + id += "/" + } + return id +} + +func sectionAndElementFromPath(p, basePath string) (section, element string, hasSection, hasElement bool) { + if !strings.HasPrefix(p, basePath) { + return "", "", false, false + } + s, e := sectionAndElementFromID(strings.TrimPrefix(p, basePath)) + return s, e, true, (e != "" || s != "" && strings.HasSuffix(p, "/")) +} + +func sectionAndElementFromID(id string) (section, element string) { + parts := strings.Split(id, "/") + if l := len(parts); l == 1 { + section = parts[0] + } else if l == 2 { + section = parts[0] + element = parts[1] + } + return +} + +func (s *Service) sectionLink(section string) client.Link { + return client.Link{Relation: client.Self, Href: path.Join(configBasePath, section)} +} + +func (s *Service) elementLink(section, element string) client.Link { + href := path.Join(configBasePath, section, element) + if element == "" { + href += "/" + } + return client.Link{Relation: client.Self, Href: href} +} + +var configLink = client.Link{Relation: client.Self, Href: path.Join(httpd.BasePath, configPath)} + +func (s *Service) handleUpdateSection(w http.ResponseWriter, r *http.Request) { + if !s.enabled { + httpd.HttpError(w, "config override service is not enabled", true, http.StatusForbidden) + return + } + section, element, hasSection, hasElement := sectionAndElementFromPath(r.URL.Path, configBasePath) + if !hasSection { + httpd.HttpError(w, "no section specified", true, http.StatusBadRequest) + return + + } + ua := updateAction{ + section: section, + element: element, + hasElement: hasElement, + } + err := json.NewDecoder(r.Body).Decode(&ua) + if err != nil { + httpd.HttpError(w, fmt.Sprint("failed to decode JSON:", err), true, http.StatusBadRequest) + return + } + + // Apply sets/deletes to stored overrides + overrides, saveFunc, err := s.overridesForUpdateAction(ua) + if err != nil { + httpd.HttpError(w, fmt.Sprint("failed to apply update: ", err), true, http.StatusBadRequest) + return + } + + // Apply overrides to config object + os := convertOverrides(overrides) + newConfig, err := override.OverrideConfig(s.config, os) + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) + return + } + + // collect element values + sectionList := make([]interface{}, len(newConfig[section])) + for i, s := range newConfig[section] { + sectionList[i] = s.Value() + } + + // Construct ConfigUpdate + errC := make(chan error, 1) + cu := ConfigUpdate{ + Name: section, + NewConfig: sectionList, + ErrC: errC, + } + + // Send update + s.updates <- cu + // Wait for error + if err := <-errC; err != nil { + httpd.HttpError(w, fmt.Sprintf("failed to update configuration %s/%s: %v", section, element, err), true, http.StatusInternalServerError) + return + } + + // Save the result of the update + if err := saveFunc(); err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + return + } + + // Success + w.WriteHeader(http.StatusNoContent) +} + +func (s *Service) handleGetConfig(w http.ResponseWriter, r *http.Request) { + if !s.enabled { + httpd.HttpError(w, "config override service is not enabled", true, http.StatusForbidden) + return + } + section, element, hasSection, hasElement := sectionAndElementFromPath(r.URL.Path, configBasePath) + config, err := s.getConfig(section) + if err != nil { + httpd.HttpError(w, fmt.Sprint("failed to resolve current config:", err), true, http.StatusInternalServerError) + return + } + if hasSection && section == "" { + httpd.HttpError(w, "section not specified, do you have an extra trailing '/'?", true, http.StatusBadRequest) + return + } + if !hasSection { + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(config) + } else if section != "" { + sec, ok := config.Sections[section] + if !ok { + httpd.HttpError(w, fmt.Sprint("unknown section: ", section), true, http.StatusNotFound) + return + } + if hasElement { + var elementEntry client.ConfigElement + // Find specified element + elementKey := s.elementKeys[section] + found := false + for _, e := range sec.Elements { + if (element == "" && elementKey == "") || e.Options[elementKey] == element { + elementEntry = e + found = true + break + } + } + if found { + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(elementEntry) + } else { + httpd.HttpError(w, fmt.Sprintf("unknown section/element: %s/%s", section, element), true, http.StatusNotFound) + return + } + } else { + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(sec) + } + } +} + +// overridesForUpdateAction produces a list of overrides relevant to the update action and +// returns save function. Call the save function to permanently store the result of the update. +func (s *Service) overridesForUpdateAction(ua updateAction) ([]Override, func() error, error) { + if err := ua.Validate(); err != nil { + return nil, nil, errors.Wrap(err, "invalid update action") + } + section := ua.section + element := ua.element + if len(ua.Remove) == 0 { + // If we are adding find element value based on the element key + if len(ua.Add) > 0 { + key, ok := s.elementKeys[section] + if !ok { + return nil, nil, fmt.Errorf("unknown section %q", section) + } + if key == "" { + return nil, nil, fmt.Errorf("section %q is not a list, cannot add new element", section) + } + elementValue, ok := ua.Add[key] + if !ok { + return nil, nil, fmt.Errorf("missing key %q in \"add\" map", key) + } + if str, ok := elementValue.(string); !ok { + return nil, nil, fmt.Errorf("expected %q key to be a string, got %T", key, elementValue) + } else { + element = str + } + } + + id := sectionAndElementToID(section, element) + + // Apply changes to single override + o, err := s.overrides.Get(id) + if err == ErrNoOverrideExists { + o = Override{ + ID: id, + Options: make(map[string]interface{}), + } + } else if err != nil { + return nil, nil, errors.Wrapf(err, "failed to retrieve existing overrides for %s", id) + } else if err == nil && len(ua.Add) > 0 { + return nil, nil, errors.Wrapf(err, "cannot add new override, override already exists for %s", id) + } + if len(ua.Add) > 0 { + // Drop all previous options and only use the current set. + o.Options = make(map[string]interface{}, len(ua.Add)) + o.Create = true + for k, v := range ua.Add { + o.Options[k] = v + } + } else { + for k, v := range ua.Set { + o.Options[k] = v + } + for _, k := range ua.Delete { + delete(o.Options, k) + } + } + saveFunc := func() error { + if err := s.overrides.Set(o); err != nil { + return errors.Wrapf(err, "failed to save override %s", o.ID) + } + return nil + } + + // Get all overrides for the section + overrides, err := s.overrides.List(section) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to get existing overrides for section %s", ua.section) + } + + // replace modified override + found := false + for i := range overrides { + if overrides[i].ID == id { + overrides[i] = o + found = true + break + } + } + if !found { + overrides = append(overrides, o) + } + return overrides, saveFunc, nil + } else { + // Remove the list of overrides + removed := make([]string, len(ua.Remove)) + removeLookup := make(map[string]bool, len(ua.Remove)) + for i, r := range ua.Remove { + id := sectionAndElementToID(section, r) + removed[i] = id + removeLookup[id] = true + } + // Get overrides for the section + overrides, err := s.overrides.List(section) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to get existing overrides for section %s", ua.section) + } + + // Filter overrides + filtered := overrides[:0] + for _, o := range overrides { + if !removeLookup[o.ID] { + filtered = append(filtered, o) + } + } + saveFunc := func() error { + for _, id := range removed { + if err := s.overrides.Delete(id); err != nil { + return errors.Wrapf(err, "failed to remove existing override %s", id) + } + } + return nil + } + return filtered, saveFunc, nil + } +} + +func convertOverrides(overrides []Override) []override.Override { + os := make([]override.Override, len(overrides)) + for i, o := range overrides { + section, element := sectionAndElementFromID(o.ID) + if o.Create { + element = "" + } + os[i] = override.Override{ + Section: section, + Element: element, + Options: o.Options, + Create: o.Create, + } + } + return os +} + +// getConfig returns a map of a fully resolved configuration object. +func (s *Service) getConfig(section string) (client.ConfigSections, error) { + overrides, err := s.overrides.List(section) + if err != nil { + return client.ConfigSections{}, errors.Wrap(err, "failed to retrieve config overrides") + } + os := convertOverrides(overrides) + sections, err := override.OverrideConfig(s.config, os) + if err != nil { + return client.ConfigSections{}, errors.Wrap(err, "failed to apply configuration overrides") + } + config := client.ConfigSections{ + Link: configLink, + Sections: make(map[string]client.ConfigSection, len(sections)), + } + for name, elements := range sections { + if !strings.HasPrefix(name, section) { + // Skip sections we did not request + continue + } + sec := config.Sections[name] + sec.Link = s.sectionLink(name) + for _, element := range elements { + redacted, err := element.Redacted() + if err != nil { + return client.ConfigSections{}, errors.Wrap(err, "failed to get redacted configuration data") + } + sec.Elements = append(sec.Elements, client.ConfigElement{ + Link: s.elementLink(name, element.ElementID()), + Options: redacted, + }) + } + config.Sections[name] = sec + } + return config, nil +} + +func (s *Service) Config() (map[string][]interface{}, error) { + overrides, err := s.overrides.List("") + if err != nil { + return nil, errors.Wrap(err, "failed to retrieve config overrides") + } + os := convertOverrides(overrides) + sections, err := override.OverrideConfig(s.config, os) + if err != nil { + return nil, errors.Wrap(err, "failed to apply configuration overrides") + } + config := make(map[string][]interface{}, len(sections)) + for name, sectionList := range sections { + for _, section := range sectionList { + config[name] = append(config[name], section.Value()) + } + } + return config, nil +} diff --git a/services/config/service_test.go b/services/config/service_test.go new file mode 100644 index 000000000..e071271ce --- /dev/null +++ b/services/config/service_test.go @@ -0,0 +1,1071 @@ +package config_test + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "reflect" + "strings" + "testing" + "time" + + client "github.com/influxdata/kapacitor/client/v1" + "github.com/influxdata/kapacitor/services/config" + "github.com/influxdata/kapacitor/services/httpd" + "github.com/influxdata/kapacitor/services/httpd/httpdtest" + "github.com/influxdata/kapacitor/services/storage/storagetest" +) + +type SectionA struct { + Option1 string `override:"option-1"` +} + +func (a SectionA) Validate() error { + if a.Option1 == "invalid" { + return errors.New("invalid option-1") + } + return nil +} + +type SectionB struct { + Option2 string `override:"option-2"` + Password string `override:"password,redact"` +} + +type SectionC struct { + Name string `override:"name"` + Option3 int `override:"option-3"` +} + +type TestConfig struct { + SectionA SectionA `override:"section-a"` + SectionB SectionB `override:"section-b"` + SectionCs []SectionC `override:"section-c,element-key=name"` +} + +func OpenNewSerivce(testConfig interface{}, updates chan<- config.ConfigUpdate) (*config.Service, *httpdtest.Server) { + c := config.NewConfig() + service := config.NewService(c, testConfig, log.New(os.Stderr, "[config] ", log.LstdFlags), updates) + service.StorageService = storagetest.New() + server := httpdtest.NewServer(testing.Verbose()) + service.HTTPDService = server + if err := service.Open(); err != nil { + panic(err) + } + return service, server +} + +func TestService_UpdateSection(t *testing.T) { + testCases := []struct { + body string + path string + expName string + expErr error + exp interface{} + updateErr error + skipUpdate bool + }{ + // NOTE: These test cases all update the same underlying service, + // so changes from one effect the next. + // In other words the order of tests is important + { + body: `{"set":{"option-1":"invalid"}}`, + path: "/section-a/", + expName: "section-a", + expErr: errors.New("failed to override configuration section-a/: failed validation: invalid option-1"), + skipUpdate: true, //error is validation error, so update is never sent + }, + { + body: `{"set":{"option-1": "new-o1"}}`, + path: "/section-a/", + expName: "section-a", + exp: []interface{}{ + SectionA{ + Option1: "new-o1", + }, + }, + }, + { + body: `{"add":{"name":"element0","option-3": 7}}`, + path: "/section-c/", + expName: "section-c", + exp: []interface{}{ + SectionC{ + Name: "element0", + Option3: 7, + }, + SectionC{ + Name: "element1", + Option3: 3, + }, + }, + }, + { + body: `{"set":{"option-3": "bob"}}`, + path: "/section-c/element1", + expName: "section-c", + expErr: errors.New("failed to override configuration section-c/element1: cannot set option option-3: cannot convert string \"bob\" into int"), + skipUpdate: true, + }, + { + body: `{"delete":["option-1"]}`, + path: "/section-a/", + expName: "section-a", + exp: []interface{}{ + SectionA{ + Option1: "o1", + }, + }, + }, + { + body: `{"set":{"option-2":"valid"}}`, + path: "/section-b/", + expName: "section-b", + expErr: errors.New("failed to update configuration section-b/: failed to update service"), + exp: []interface{}{ + SectionB{ + Option2: "valid", + }, + }, + updateErr: errors.New("failed to update service"), + }, + // Set unknown option + { + body: `{"set":{"unknown": "value"}}`, + path: "/section-a/", + expName: "section-a", + expErr: errors.New("failed to override configuration section-a/: unknown options [unknown] in section section-a"), + skipUpdate: true, + }, + // Validate unknown option was not persisted + { + body: `{"set":{"option-1": "value"}}`, + path: "/section-a/", + expName: "section-a", + exp: []interface{}{ + SectionA{ + Option1: "value", + }, + }, + }, + // Try to add element to non list section + { + body: `{"add":{"name":"element0","option-1": 7}}`, + path: "/section-a/", + expName: "section-a", + expErr: errors.New(`failed to apply update: section "section-a" is not a list, cannot add new element`), + skipUpdate: true, + }, + // Try to set element to wrong path + { + body: `{"set":{"option-1": 7}}`, + path: "/section-a", + expName: "section-a", + expErr: errors.New(`failed to apply update: invalid update action: element not specified, are you missing a trailing '/'?`), + skipUpdate: true, + }, + } + testConfig := &TestConfig{ + SectionA: SectionA{ + Option1: "o1", + }, + SectionCs: []SectionC{ + { + Name: "element1", + Option3: 3, + }, + }, + } + updates := make(chan config.ConfigUpdate, len(testCases)) + service, server := OpenNewSerivce(testConfig, updates) + defer server.Close() + defer service.Close() + basePath := server.Server.URL + httpd.BasePath + "/config" + for _, tc := range testCases { + if !tc.skipUpdate { + tc := tc + go func() { + cu := <-updates + err := tc.updateErr + if !reflect.DeepEqual(cu.NewConfig, tc.exp) { + err = fmt.Errorf("unexpected new config: got %v exp %v", cu.NewConfig, tc.exp) + } + if got, exp := cu.Name, tc.expName; got != exp { + err = fmt.Errorf("unexpected config update Name: got %s exp %s", got, exp) + } + cu.ErrC <- err + }() + } + resp, err := http.Post(basePath+tc.path, "application/json", strings.NewReader(tc.body)) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + + // Validate response + if tc.expErr != nil { + gotErr := struct { + Error string + }{} + json.Unmarshal(body, &gotErr) + if got, exp := gotErr.Error, tc.expErr.Error(); got != exp { + t.Fatalf("unexpected error:\ngot\n%q\nexp\n%q\n", got, exp) + } + } else if got, exp := resp.StatusCode, http.StatusNoContent; got != exp { + t.Fatalf("unexpected code: got %d exp %d.\nBody:\n%s", got, exp, string(body)) + } + + } +} + +func TestService_GetConfig(t *testing.T) { + type update struct { + Path string + Body string + } + testCases := []struct { + updates []update + expName string + exp client.ConfigSections + }{ + { + updates: []update{{ + Path: "/section-a/", + Body: `{"set":{"option-1": "new-o1"}}`, + }}, + exp: client.ConfigSections{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config"}, + Sections: map[string]client.ConfigSection{ + "section-a": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a/"}, + Options: map[string]interface{}{ + "option-1": "new-o1", + }}, + }, + }, + "section-b": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b/"}, + Options: map[string]interface{}{ + "option-2": "o2", + "password": false, + }}, + }, + }, + "section-c": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/x"}, + Options: map[string]interface{}{ + "name": "x", + "option-3": float64(1), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/y"}, + Options: map[string]interface{}{ + "name": "y", + "option-3": float64(2), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/z"}, + Options: map[string]interface{}{ + "name": "z", + "option-3": float64(3), + }, + }, + }, + }, + }, + }, + }, + { + updates: []update{ + { + Path: "/section-a/", + Body: `{"set":{"option-1": "new-o1"}}`, + }, + { + Path: "/section-a/", + Body: `{"delete":["option-1"]}`, + }, + }, + exp: client.ConfigSections{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config"}, + Sections: map[string]client.ConfigSection{ + "section-a": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a/"}, + Options: map[string]interface{}{ + "option-1": "o1", + }}, + }, + }, + "section-b": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b/"}, + Options: map[string]interface{}{ + "option-2": "o2", + "password": false, + }}, + }, + }, + "section-c": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/x"}, + Options: map[string]interface{}{ + "name": "x", + "option-3": float64(1), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/y"}, + Options: map[string]interface{}{ + "name": "y", + "option-3": float64(2), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/z"}, + Options: map[string]interface{}{ + "name": "z", + "option-3": float64(3), + }, + }, + }, + }, + }, + }, + }, + { + updates: []update{ + { + Path: "/section-a/", + Body: `{"set":{"option-1": "new-o1"}}`, + }, + { + Path: "/section-b/", + Body: `{"set":{"option-2":"new-o2"},"delete":["option-nonexistant"]}`, + }, + }, + exp: client.ConfigSections{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config"}, + Sections: map[string]client.ConfigSection{ + "section-a": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a/"}, + Options: map[string]interface{}{ + "option-1": "new-o1", + }}, + }, + }, + "section-b": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b/"}, + Options: map[string]interface{}{ + "option-2": "new-o2", + "password": false, + }}, + }, + }, + "section-c": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/x"}, + Options: map[string]interface{}{ + "name": "x", + "option-3": float64(1), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/y"}, + Options: map[string]interface{}{ + "name": "y", + "option-3": float64(2), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/z"}, + Options: map[string]interface{}{ + "name": "z", + "option-3": float64(3), + }, + }, + }, + }, + }, + }, + }, + { + updates: []update{ + { + Path: "/section-a/", + Body: `{"set":{"option-1": "new-o1"}}`, + }, + { + Path: "/section-a/", + Body: `{"set":{"option-1":"deleted"},"delete":["option-1"]}`, + }, + }, + exp: client.ConfigSections{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config"}, + Sections: map[string]client.ConfigSection{ + "section-a": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a/"}, + Options: map[string]interface{}{ + "option-1": "o1", + }}, + }, + }, + "section-b": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b/"}, + Options: map[string]interface{}{ + "option-2": "o2", + "password": false, + }}, + }, + }, + "section-c": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/x"}, + Options: map[string]interface{}{ + "name": "x", + "option-3": float64(1), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/y"}, + Options: map[string]interface{}{ + "name": "y", + "option-3": float64(2), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/z"}, + Options: map[string]interface{}{ + "name": "z", + "option-3": float64(3), + }, + }, + }, + }, + }, + }, + }, + { + updates: []update{ + { + Path: "/section-b/", + Body: `{"set":{"password": "secret"}}`, + }, + }, + exp: client.ConfigSections{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config"}, + Sections: map[string]client.ConfigSection{ + "section-a": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a/"}, + Options: map[string]interface{}{ + "option-1": "o1", + }, + }}, + }, + "section-b": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b/"}, + Options: map[string]interface{}{ + "option-2": "o2", + "password": true, + }, + }}, + }, + "section-c": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/x"}, + Options: map[string]interface{}{ + "name": "x", + "option-3": float64(1), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/y"}, + Options: map[string]interface{}{ + "name": "y", + "option-3": float64(2), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/z"}, + Options: map[string]interface{}{ + "name": "z", + "option-3": float64(3), + }, + }, + }, + }, + }, + }, + }, + { + updates: []update{ + { + Path: "/section-c/x", + Body: `{"set":{"option-3": 42}}`, + }, + }, + exp: client.ConfigSections{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config"}, + Sections: map[string]client.ConfigSection{ + "section-a": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a/"}, + Options: map[string]interface{}{ + "option-1": "o1", + }, + }}, + }, + "section-b": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b/"}, + Options: map[string]interface{}{ + "option-2": "o2", + "password": false, + }, + }}, + }, + "section-c": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/x"}, + Options: map[string]interface{}{ + "name": "x", + "option-3": float64(42), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/y"}, + Options: map[string]interface{}{ + "name": "y", + "option-3": float64(2), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/z"}, + Options: map[string]interface{}{ + "name": "z", + "option-3": float64(3), + }, + }, + }, + }, + }, + }, + }, + { + updates: []update{ + { + Path: "/section-c/x", + Body: `{"set":{"option-3": 42}}`, + }, + { + Path: "/section-c/x", + Body: `{"delete":["option-3"]}`, + }, + }, + exp: client.ConfigSections{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config"}, + Sections: map[string]client.ConfigSection{ + "section-a": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a/"}, + Options: map[string]interface{}{ + "option-1": "o1", + }, + }}, + }, + "section-b": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b/"}, + Options: map[string]interface{}{ + "option-2": "o2", + "password": false, + }, + }}, + }, + "section-c": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/x"}, + Options: map[string]interface{}{ + "name": "x", + "option-3": float64(1), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/y"}, + Options: map[string]interface{}{ + "name": "y", + "option-3": float64(2), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/z"}, + Options: map[string]interface{}{ + "name": "z", + "option-3": float64(3), + }, + }, + }, + }, + }, + }, + }, + { + updates: []update{ + { + Path: "/section-c", + Body: `{"add":{"name":"w", "option-3": 42}}`, + }, + }, + exp: client.ConfigSections{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config"}, + Sections: map[string]client.ConfigSection{ + "section-a": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a/"}, + Options: map[string]interface{}{ + "option-1": "o1", + }, + }}, + }, + "section-b": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b/"}, + Options: map[string]interface{}{ + "option-2": "o2", + "password": false, + }, + }}, + }, + "section-c": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/w"}, + Options: map[string]interface{}{ + "name": "w", + "option-3": float64(42), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/x"}, + Options: map[string]interface{}{ + "name": "x", + "option-3": float64(1), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/y"}, + Options: map[string]interface{}{ + "name": "y", + "option-3": float64(2), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/z"}, + Options: map[string]interface{}{ + "name": "z", + "option-3": float64(3), + }, + }, + }, + }, + }, + }, + }, + { + updates: []update{ + { + Path: "/section-c", + Body: `{"add":{"name":"w", "option-3": 42}}`, + }, + { + Path: "/section-c", + Body: `{"add":{"name":"q"}}`, + }, + }, + exp: client.ConfigSections{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config"}, + Sections: map[string]client.ConfigSection{ + "section-a": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a/"}, + Options: map[string]interface{}{ + "option-1": "o1", + }, + }}, + }, + "section-b": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b/"}, + Options: map[string]interface{}{ + "option-2": "o2", + "password": false, + }, + }}, + }, + "section-c": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/q"}, + Options: map[string]interface{}{ + "name": "q", + "option-3": float64(0), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/w"}, + Options: map[string]interface{}{ + "name": "w", + "option-3": float64(42), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/x"}, + Options: map[string]interface{}{ + "name": "x", + "option-3": float64(1), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/y"}, + Options: map[string]interface{}{ + "name": "y", + "option-3": float64(2), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/z"}, + Options: map[string]interface{}{ + "name": "z", + "option-3": float64(3), + }, + }, + }, + }, + }, + }, + }, + { + updates: []update{ + { + Path: "/section-c", + Body: `{"add":{"name":"w", "option-3": 42}}`, + }, + { + Path: "/section-c/w", + Body: `{"set":{"option-3": 24}}`, + }, + }, + exp: client.ConfigSections{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config"}, + Sections: map[string]client.ConfigSection{ + "section-a": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a/"}, + Options: map[string]interface{}{ + "option-1": "o1", + }, + }}, + }, + "section-b": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b/"}, + Options: map[string]interface{}{ + "option-2": "o2", + "password": false, + }, + }}, + }, + "section-c": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/w"}, + Options: map[string]interface{}{ + "name": "w", + "option-3": float64(24), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/x"}, + Options: map[string]interface{}{ + "name": "x", + "option-3": float64(1), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/y"}, + Options: map[string]interface{}{ + "name": "y", + "option-3": float64(2), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/z"}, + Options: map[string]interface{}{ + "name": "z", + "option-3": float64(3), + }, + }, + }, + }, + }, + }, + }, + { + updates: []update{ + { + Path: "/section-c", + Body: `{"add":{"name":"w", "option-3": 42}}`, + }, + { + Path: "/section-c/w", + Body: `{"set":{"option-3": 24}}`, + }, + { + Path: "/section-c", + Body: `{"remove":["w"]}`, + }, + }, + exp: client.ConfigSections{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config"}, + Sections: map[string]client.ConfigSection{ + "section-a": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a/"}, + Options: map[string]interface{}{ + "option-1": "o1", + }, + }}, + }, + "section-b": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b/"}, + Options: map[string]interface{}{ + "option-2": "o2", + "password": false, + }, + }}, + }, + "section-c": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/x"}, + Options: map[string]interface{}{ + "name": "x", + "option-3": float64(1), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/y"}, + Options: map[string]interface{}{ + "name": "y", + "option-3": float64(2), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/z"}, + Options: map[string]interface{}{ + "name": "z", + "option-3": float64(3), + }, + }, + }, + }, + }, + }, + }, + { + // Only added overrides can be removed, not existing default elements + updates: []update{ + { + Path: "/section-c", + Body: `{"remove":["x"]}`, + }, + }, + exp: client.ConfigSections{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config"}, + Sections: map[string]client.ConfigSection{ + "section-a": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-a/"}, + Options: map[string]interface{}{ + "option-1": "o1", + }, + }}, + }, + "section-b": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b"}, + Elements: []client.ConfigElement{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-b/"}, + Options: map[string]interface{}{ + "option-2": "o2", + "password": false, + }, + }}, + }, + "section-c": client.ConfigSection{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c"}, + Elements: []client.ConfigElement{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/x"}, + Options: map[string]interface{}{ + "name": "x", + "option-3": float64(1), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/y"}, + Options: map[string]interface{}{ + "name": "y", + "option-3": float64(2), + }, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/section-c/z"}, + Options: map[string]interface{}{ + "name": "z", + "option-3": float64(3), + }, + }, + }, + }, + }, + }, + }, + } + testConfig := &TestConfig{ + SectionA: SectionA{ + Option1: "o1", + }, + SectionB: SectionB{ + Option2: "o2", + }, + SectionCs: []SectionC{ + { + Name: "x", + Option3: 1, + }, + { + Name: "y", + Option3: 2, + }, + { + Name: "z", + Option3: 3, + }, + }, + } + for _, tc := range testCases { + updates := make(chan config.ConfigUpdate, len(testCases)) + service, server := OpenNewSerivce(testConfig, updates) + defer server.Close() + defer service.Close() + basePath := server.Server.URL + httpd.BasePath + "/config" + // Apply all updates + for _, update := range tc.updates { + go func() { + // Validate we got the update over the chan. + // This keeps the chan unblocked. + timer := time.NewTimer(10 * time.Millisecond) + defer timer.Stop() + select { + case cu := <-updates: + cu.ErrC <- nil + case <-timer.C: + t.Fatal("expected to get config update") + } + }() + resp, err := http.Post(basePath+update.Path, "application/json", strings.NewReader(update.Body)) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if got, exp := resp.StatusCode, http.StatusNoContent; got != exp { + t.Fatalf("update failed: got: %d exp: %d\nBody:\n%s", got, exp, string(body)) + } + } + + // Get config + resp, err := http.Get(basePath) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("update failed: %d", resp.StatusCode) + } + + var got client.ConfigSections + if err := json.NewDecoder(resp.Body).Decode(&got); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(got, tc.exp) { + t.Errorf("unexpected config:\ngot\n%v\nexp\n%v\n", got, tc.exp) + } + } +} diff --git a/services/hipchat/config.go b/services/hipchat/config.go index ac90616d1..d2ba56d61 100644 --- a/services/hipchat/config.go +++ b/services/hipchat/config.go @@ -1,22 +1,38 @@ package hipchat +import ( + "net/url" + + "github.com/pkg/errors" +) + type Config struct { // Whether HipChat integration is enabled. - Enabled bool `toml:"enabled"` + Enabled bool `toml:"enabled" override:"enabled"` // The HipChat API URL. - URL string `toml:"url"` + URL string `toml:"url" override:"url"` // The authentication token for this notification, can be overridden per alert. // https://www.hipchat.com/docs/apiv2/auth for info on obtaining a token. - Token string `toml:"token"` + Token string `toml:"token" override:"token,redact"` // The default room, can be overridden per alert. - Room string `toml:"room"` + Room string `toml:"room" override:"room"` // Whether all alerts should automatically post to HipChat - Global bool `toml:"global"` + Global bool `toml:"global" override:"global"` // Whether all alerts should automatically use stateChangesOnly mode. // Only applies if global is also set. - StateChangesOnly bool `toml:"state-changes-only"` + StateChangesOnly bool `toml:"state-changes-only" override:"state-changes-only"` } func NewConfig() Config { return Config{} } + +func (c Config) Validate() error { + if c.Enabled && c.URL == "" { + return errors.New("must specify url") + } + if _, err := url.Parse(c.URL); err != nil { + return errors.Wrapf(err, "invalid url %q", c.URL) + } + return nil +} diff --git a/services/hipchat/service.go b/services/hipchat/service.go index 28043b409..41051f5d7 100644 --- a/services/hipchat/service.go +++ b/services/hipchat/service.go @@ -5,32 +5,28 @@ import ( "encoding/json" "errors" "fmt" + "io" "io/ioutil" "log" "net/http" "net/url" + "path" + "sync/atomic" "github.com/influxdata/kapacitor" ) type Service struct { - room string - token string - url string - global bool - stateChangesOnly bool - logger *log.Logger + configValue atomic.Value + logger *log.Logger } func NewService(c Config, l *log.Logger) *Service { - return &Service{ - room: c.Room, - token: c.Token, - url: c.URL, - global: c.Global, - stateChangesOnly: c.StateChangesOnly, - logger: l, + s := &Service{ + logger: l, } + s.configValue.Store(c) + return s } func (s *Service) Open() error { @@ -41,29 +37,82 @@ func (s *Service) Close() error { return nil } +func (s *Service) config() Config { + return s.configValue.Load().(Config) +} + +func (s *Service) Update(newConfig []interface{}) error { + if l := len(newConfig); l != 1 { + return fmt.Errorf("expected only one new config object, got %d", l) + } + if c, ok := newConfig[0].(Config); !ok { + return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfig[0]) + } else { + s.configValue.Store(c) + } + return nil +} + func (s *Service) Global() bool { - return s.global + c := s.config() + return c.Global } func (s *Service) StateChangesOnly() bool { - return s.stateChangesOnly + c := s.config() + return c.StateChangesOnly } func (s *Service) Alert(room, token, message string, level kapacitor.AlertLevel) error { + url, post, err := s.preparePost(room, token, message, level) + if err != nil { + return err + } - //Generate HipChat API Url including room and authentication token + resp, err := http.Post(url, "application/json", post) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + type response struct { + Error string `json:"error"` + } + r := &response{Error: fmt.Sprintf("failed to understand HipChat response. code: %d content: %s", resp.StatusCode, string(body))} + b := bytes.NewReader(body) + dec := json.NewDecoder(b) + dec.Decode(r) + return errors.New(r.Error) + } + return nil +} + +func (s *Service) preparePost(room, token, message string, level kapacitor.AlertLevel) (string, io.Reader, error) { + c := s.config() + + if !c.Enabled { + return "", nil, errors.New("service is not enabled") + } + //Generate HipChat API URL including room and authentication token if room == "" { - room = s.room + room = c.Room } if token == "" { - token = s.token + token = c.Token } - var Url *url.URL - Url, err := url.Parse(s.url + "/" + room + "/notification?auth_token=" + token) + u, err := url.Parse(c.URL) if err != nil { - return err + return "", nil, err } + u.Path = path.Join(u.Path, room, "notification") + v := url.Values{} + v.Set("auth_token", token) + u.RawQuery = v.Encode() var color string switch level { @@ -85,27 +134,7 @@ func (s *Service) Alert(room, token, message string, level kapacitor.AlertLevel) enc := json.NewEncoder(&post) err = enc.Encode(postData) if err != nil { - return err - } - - resp, err := http.Post(Url.String(), "application/json", &post) - if err != nil { - return err + return "", nil, err } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - type response struct { - Error string `json:"error"` - } - r := &response{Error: fmt.Sprintf("failed to understand HipChat response. code: %d content: %s", resp.StatusCode, string(body))} - b := bytes.NewReader(body) - dec := json.NewDecoder(b) - dec.Decode(r) - return errors.New(r.Error) - } - return nil + return u.String(), &post, nil } diff --git a/services/httpd/httpdtest/server.go b/services/httpd/httpdtest/server.go new file mode 100644 index 000000000..64edaced5 --- /dev/null +++ b/services/httpd/httpdtest/server.go @@ -0,0 +1,49 @@ +package httpdtest + +import ( + "expvar" + "log" + "net/http/httptest" + + "github.com/influxdata/kapacitor/services/httpd" + "github.com/influxdata/kapacitor/services/logging/loggingtest" +) + +type Server struct { + Handler *httpd.Handler + Server *httptest.Server +} + +func NewServer(verbose bool) *Server { + statMap := &expvar.Map{} + statMap.Init() + ls := loggingtest.New() + s := &Server{ + Handler: httpd.NewHandler( + false, + verbose, + verbose, + false, + statMap, + ls.NewLogger("[httpdtest] ", log.LstdFlags), + ls, + "", + ), + } + + s.Server = httptest.NewServer(s.Handler) + return s +} + +func (s *Server) Close() error { + s.Server.Close() + return nil +} + +func (s *Server) AddRoutes(routes []httpd.Route) error { + return s.Handler.AddRoutes(routes) +} + +func (s *Server) DelRoutes(routes []httpd.Route) { + s.Handler.DelRoutes(routes) +} diff --git a/services/httpd/service.go b/services/httpd/service.go index f12210e77..52355415d 100644 --- a/services/httpd/service.go +++ b/services/httpd/service.go @@ -70,7 +70,6 @@ func NewService(c Config, hostname string, l *log.Logger, li logging.Interface) ), logger: l, } - s.Handler.logger = s.logger return s } diff --git a/services/influxdb/config.go b/services/influxdb/config.go index 3fe8f1568..83afcbfc1 100644 --- a/services/influxdb/config.go +++ b/services/influxdb/config.go @@ -21,55 +21,55 @@ const ( ) type Config struct { - Enabled bool `toml:"enabled"` - Name string `toml:"name"` - Default bool `toml:"default"` - URLs []string `toml:"urls"` - Username string `toml:"username"` - Password string `toml:"password"` + Enabled bool `toml:"enabled" override:"enabled"` + Name string `toml:"name" override:"name"` + Default bool `toml:"default" override:"default"` + URLs []string `toml:"urls" override:"urls"` + Username string `toml:"username" override:"username"` + Password string `toml:"password" override:"password,redact"` // Path to CA file - SSLCA string `toml:"ssl-ca"` + SSLCA string `toml:"ssl-ca" override:"ssl-ca"` // Path to host cert file - SSLCert string `toml:"ssl-cert"` + SSLCert string `toml:"ssl-cert" override:"ssl-cert"` // Path to cert key file - SSLKey string `toml:"ssl-key"` + SSLKey string `toml:"ssl-key" override:"ssl-key"` // Use SSL but skip chain & host verification - InsecureSkipVerify bool `toml:"insecure-skip-verify"` + InsecureSkipVerify bool `toml:"insecure-skip-verify" override:"insecure-skip-verify"` - Timeout toml.Duration `toml:"timeout"` - DisableSubscriptions bool `toml:"disable-subscriptions"` - SubscriptionProtocol string `toml:"subscription-protocol"` - Subscriptions map[string][]string `toml:"subscriptions"` - ExcludedSubscriptions map[string][]string `toml:"excluded-subscriptions"` - KapacitorHostname string `toml:"kapacitor-hostname"` - HTTPPort int `toml:"http-port"` - UDPBind string `toml:"udp-bind"` - UDPBuffer int `toml:"udp-buffer"` - UDPReadBuffer int `toml:"udp-read-buffer"` - StartUpTimeout toml.Duration `toml:"startup-timeout"` - SubscriptionSyncInterval toml.Duration `toml:"subscriptions-sync-interval"` + Timeout toml.Duration `toml:"timeout" override:"timeout"` + DisableSubscriptions bool `toml:"disable-subscriptions" override:"disable-subscriptions"` + SubscriptionProtocol string `toml:"subscription-protocol" override:"subscription-protocol"` + Subscriptions map[string][]string `toml:"subscriptions" override:"subscriptions"` + ExcludedSubscriptions map[string][]string `toml:"excluded-subscriptions" override:"excluded-subscriptions"` + KapacitorHostname string `toml:"kapacitor-hostname" override:"kapacitor-hostname"` + HTTPPort int `toml:"http-port" override:"http-port"` + UDPBind string `toml:"udp-bind" override:"udp-bind"` + UDPBuffer int `toml:"udp-buffer" override:"udp-buffer"` + UDPReadBuffer int `toml:"udp-read-buffer" override:"udp-read-buffer"` + StartUpTimeout toml.Duration `toml:"startup-timeout" override:"startup-timeout"` + SubscriptionSyncInterval toml.Duration `toml:"subscriptions-sync-interval" override:"subscriptions-sync-interval"` } func NewConfig() Config { - return Config{ - Enabled: true, - // Cannot initialize slice - // See: https://github.com/BurntSushi/toml/pull/68 - //URLs: []string{"http://localhost:8086"}, - Username: "", - Password: "", - Subscriptions: make(map[string][]string), - ExcludedSubscriptions: map[string][]string{ - stats.DefaultDatabse: []string{stats.DefaultRetentionPolicy}, - }, - UDPBuffer: udp.DefaultBuffer, - StartUpTimeout: toml.Duration(DefaultStartUpTimeout), - SubscriptionProtocol: DefaultSubscriptionProtocol, - SubscriptionSyncInterval: toml.Duration(DefaultSubscriptionSyncInterval), + c := &Config{} + c.Init() + c.Enabled = true + return *c +} + +func (c *Config) Init() { + c.Name = "default" + c.URLs = []string{"http://localhost:8086"} + c.ExcludedSubscriptions = map[string][]string{ + stats.DefaultDatabse: []string{stats.DefaultRetentionPolicy}, } + c.UDPBuffer = udp.DefaultBuffer + c.StartUpTimeout = toml.Duration(DefaultStartUpTimeout) + c.SubscriptionProtocol = DefaultSubscriptionProtocol + c.SubscriptionSyncInterval = toml.Duration(DefaultSubscriptionSyncInterval) } -func (c *Config) SetDefaultValues() { +func (c *Config) ApplyConditionalDefaults() { if c.UDPBuffer == 0 { c.UDPBuffer = udp.DefaultBuffer } @@ -109,7 +109,7 @@ func (c Config) Validate() error { switch c.SubscriptionProtocol { case "http", "https", "udp": default: - return fmt.Errorf("invalid subscription protocol, must be one of 'udp', 'http' or 'https', got %s", c.SubscriptionProtocol) + return fmt.Errorf("invalid subscription protocol, must be one of 'udp', 'http' or 'https', got %q: %v", c.SubscriptionProtocol, c) } return nil } diff --git a/services/influxdb/service.go b/services/influxdb/service.go index b73eac021..ee7ab3e92 100644 --- a/services/influxdb/service.go +++ b/services/influxdb/service.go @@ -2,6 +2,7 @@ package influxdb import ( "bytes" + "context" "crypto/rand" "crypto/tls" "crypto/x509" @@ -43,10 +44,19 @@ const ( // Handles requests to write or read from an InfluxDB cluster type Service struct { + mu sync.RWMutex + opened bool + defaultInfluxDB string clusters map[string]*influxdbCluster routes []httpd.Route + subName string + hostname string + clusterID string + httpPort int + useTokens bool + PointsWriter interface { WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error } @@ -58,7 +68,7 @@ type Service struct { DelRoutes([]httpd.Route) } ClientCreator interface { - Create(influxdb.HTTPConfig) (influxdb.Client, error) + Create(influxdb.Config) (influxdb.ClientUpdater, error) } AuthService interface { GrantSubscriptionAccess(token, db, rp string) error @@ -69,102 +79,35 @@ type Service struct { logger *log.Logger } -func NewService(configs []Config, defaultInfluxDB, httpPort int, hostname string, useTokens bool, l *log.Logger) *Service { +func NewService(configs []Config, httpPort int, hostname string, useTokens bool, l *log.Logger) (*Service, error) { clusterID := kapacitor.ClusterIDVar.StringValue() subName := subNamePrefix + clusterID - clusters := make(map[string]*influxdbCluster, len(configs)) - var defaultInfluxDBName string - for i, c := range configs { - urls := make([]influxdb.HTTPConfig, len(c.URLs)) - // Config should have been validated already, ignore error - tlsConfig, _ := getTLSConfig(c.SSLCA, c.SSLCert, c.SSLKey, c.InsecureSkipVerify) - if c.InsecureSkipVerify { - l.Printf("W! Using InsecureSkipVerify when connecting to InfluxDB @ %v this is insecure!", c.URLs) - } - var credentials *influxdb.Credentials - if c.Username != "" { - credentials = &influxdb.Credentials{ - Method: influxdb.UserAuthentication, - Username: c.Username, - Password: c.Password, - } - } - for i, u := range c.URLs { - urls[i] = influxdb.HTTPConfig{ - URL: u, - Credentials: credentials, - UserAgent: "Kapacitor", - Timeout: time.Duration(c.Timeout), - TLSConfig: tlsConfig, - } - } - subs := make(map[subEntry]bool, len(c.Subscriptions)) - for cluster, rps := range c.Subscriptions { - for _, rp := range rps { - se := subEntry{cluster, rp, subName} - subs[se] = true - } - } - exSubs := make(map[subEntry]bool, len(c.ExcludedSubscriptions)) - for cluster, rps := range c.ExcludedSubscriptions { - for _, rp := range rps { - se := subEntry{cluster, rp, subName} - exSubs[se] = true - } - } - runningSubs := make(map[subEntry]bool, len(c.Subscriptions)) - services := make(map[subEntry]openCloser, len(c.Subscriptions)) - port := httpPort - if c.HTTPPort != 0 { - port = c.HTTPPort - } - host := hostname - if c.KapacitorHostname != "" { - host = c.KapacitorHostname - } - clusters[c.Name] = &influxdbCluster{ - clusterName: c.Name, - configs: urls, - configSubs: subs, - exConfigSubs: exSubs, - hostname: host, - httpPort: port, - logger: l, - udpBind: c.UDPBind, - udpBuffer: c.UDPBuffer, - udpReadBuffer: c.UDPReadBuffer, - startupTimeout: time.Duration(c.StartUpTimeout), - subscriptionSyncInterval: time.Duration(c.SubscriptionSyncInterval), - clusterID: clusterID, - subName: subName, - disableSubs: c.DisableSubscriptions, - protocol: c.SubscriptionProtocol, - runningSubs: runningSubs, - services: services, - // Do not use tokens for non http protocols - useTokens: useTokens && (c.SubscriptionProtocol == "http" || c.SubscriptionProtocol == "https"), - } - if defaultInfluxDB == i { - defaultInfluxDBName = c.Name - } - } - return &Service{ - defaultInfluxDB: defaultInfluxDBName, - clusters: clusters, - logger: l, - RandReader: rand.Reader, + s := &Service{ + clusters: make(map[string]*influxdbCluster), + clusterID: clusterID, + subName: subName, + hostname: hostname, + httpPort: httpPort, + useTokens: useTokens, + logger: l, + RandReader: rand.Reader, + } + if err := s.updateConfigs(configs); err != nil { + return nil, err } + return s, nil } func (s *Service) Open() error { + s.mu.Lock() + defer s.mu.Unlock() + if s.opened { + return nil + } + s.opened = true for _, cluster := range s.clusters { - cluster.PointsWriter = s.PointsWriter - cluster.LogService = s.LogService - cluster.AuthService = s.AuthService - cluster.ClientCreator = s.ClientCreator - cluster.randReader = s.RandReader - err := cluster.Open() - if err != nil { + s.assignServiceToCluster(cluster) + if err := cluster.Open(); err != nil { return err } } @@ -179,15 +122,136 @@ func (s *Service) Open() error { }, } - err := s.HTTPDService.AddRoutes(s.routes) - if err != nil { + if err := s.HTTPDService.AddRoutes(s.routes); err != nil { return errors.Wrap(err, "adding API routes") } // Revoke any tokens for removed clusters. - err = s.revokeClusterTokens() + err := s.revokeClusterTokens() return errors.Wrap(err, "revoking old cluster tokens") } +func (s *Service) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + if !s.opened { + return nil + } + s.opened = false + s.HTTPDService.DelRoutes(s.routes) + var lastErr error + for _, cluster := range s.clusters { + err := cluster.Close() + if err != nil { + lastErr = err + } + } + return lastErr +} + +func (s *Service) Update(newConfigs []interface{}) error { + s.mu.Lock() + defer s.mu.Unlock() + configs := make([]Config, len(newConfigs)) + for i, c := range newConfigs { + if config, ok := c.(Config); ok { + configs[i] = config + } else { + return fmt.Errorf("unexpected config object type, got %T exp %T", c, config) + } + } + if err := s.updateConfigs(configs); err != nil { + return err + } + // Revoke any tokens for removed clusters. + err := s.revokeClusterTokens() + return errors.Wrap(err, "revoking old cluster tokens") +} + +// updateConfigs updates the running configuration for the various clusters. +// Must have the lock to call. +func (s *Service) updateConfigs(configs []Config) error { + removedClusters := make(map[string]*influxdbCluster, len(configs)) + s.defaultInfluxDB = "" + enabledCount := 0 + for _, c := range configs { + cluster, exists := s.clusters[c.Name] + if !c.Enabled { + if exists { + removedClusters[c.Name] = cluster + } + // Skip disabled configs + continue + } + enabledCount++ + if exists { + if err := cluster.Update(c); err != nil { + return errors.Wrapf(err, "failed to update cluster %s", c.Name) + } + } else { + var err error + cluster, err = newInfluxDBCluster(c, s.hostname, s.clusterID, s.subName, s.httpPort, s.useTokens, s.logger) + if err != nil { + return err + } + s.assignServiceToCluster(cluster) + if s.opened { + if err := cluster.Open(); err != nil { + return err + } + } + s.clusters[c.Name] = cluster + } + if c.Default { + s.defaultInfluxDB = c.Name + } + } + // If only one enabled cluster assume it is the default + if enabledCount == 1 { + for _, c := range configs { + if c.Enabled { + s.defaultInfluxDB = c.Name + } + } + } + if enabledCount > 0 && s.defaultInfluxDB == "" { + return errors.New("no default cluster found") + } + + // Find any deleted clusters + for name, cluster := range s.clusters { + found := false + for _, c := range configs { + if c.Name == name { + found = true + break + } + } + if !found { + removedClusters[name] = cluster + } + } + + // Unlink/Close/Delete all removed clusters + for name, cluster := range removedClusters { + if err := cluster.UnlinkSubscriptions(); err != nil { + s.logger.Printf("E! failed to unlink subscriptions for cluster %s: %s", name, err) + } + if err := cluster.Close(); err != nil { + s.logger.Printf("E! failed to close cluster %s: %s", name, err) + } + delete(s.clusters, name) + } + return nil +} + +func (s *Service) assignServiceToCluster(cluster *influxdbCluster) { + cluster.PointsWriter = s.PointsWriter + cluster.LogService = s.LogService + cluster.AuthService = s.AuthService + cluster.ClientCreator = s.ClientCreator + cluster.randReader = s.RandReader +} + // Refresh the subscriptions linking for all clusters. func (s *Service) handleSubscriptions(w http.ResponseWriter, r *http.Request) { err := s.LinkSubscriptions() @@ -198,10 +262,10 @@ func (s *Service) handleSubscriptions(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) } -// Trigger a linkSubscriptions event for all clusters +// Trigger a LinkSubscriptions event for all clusters func (s *Service) LinkSubscriptions() error { for clusterName, cluster := range s.clusters { - err := cluster.linkSubscriptions() + err := cluster.LinkSubscriptions() if err != nil { return errors.Wrapf(err, "linking cluster %s", clusterName) } @@ -223,41 +287,32 @@ func (s *Service) revokeClusterTokens() error { // Revoke invalid token s.AuthService.RevokeSubscriptionAccess(token) } else if _, ok := s.clusters[clusterName]; !ok { - // Revoke token for old non existant cluster + // Revoke token for old non existent or disabled cluster s.AuthService.RevokeSubscriptionAccess(token) } } return nil } -func (s *Service) Close() error { - s.HTTPDService.DelRoutes(s.routes) - var lastErr error - for _, cluster := range s.clusters { - err := cluster.Close() - if err != nil { - lastErr = err - } - } - return lastErr -} - -func (s *Service) NewDefaultClient() (influxdb.Client, error) { - return s.clusters[s.defaultInfluxDB].NewClient() -} - +// NewNamedClient returns a new client for the given name or the default client if the name is empty. func (s *Service) NewNamedClient(name string) (influxdb.Client, error) { + s.mu.RLock() + defer s.mu.RUnlock() + if name == "" { + name = s.defaultInfluxDB + } cluster, ok := s.clusters[name] if !ok { return nil, fmt.Errorf("no such InfluxDB config %s", name) } - return cluster.NewClient() + return cluster.NewClient(), nil } type influxdbCluster struct { clusterName string - configs []influxdb.HTTPConfig + influxdbConfig influxdb.Config + client influxdb.ClientUpdater i int configSubs map[subEntry]bool exConfigSubs map[subEntry]bool @@ -279,6 +334,8 @@ type influxdbCluster struct { subSyncTicker *time.Ticker services map[subEntry]openCloser + randReader io.Reader + PointsWriter interface { WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error } @@ -286,17 +343,19 @@ type influxdbCluster struct { NewLogger(string, int) *log.Logger } ClientCreator interface { - Create(influxdb.HTTPConfig) (influxdb.Client, error) + Create(influxdb.Config) (influxdb.ClientUpdater, error) } AuthService interface { GrantSubscriptionAccess(token, db, rp string) error ListSubscriptionTokens() ([]string, error) RevokeSubscriptionAccess(token string) error } + ctxMu sync.Mutex + ctx context.Context + cancelCtx context.CancelFunc - randReader io.Reader - - mu sync.Mutex + mu sync.RWMutex + opened bool } type openCloser interface { @@ -315,33 +374,127 @@ type subInfo struct { Destinations []string } -func (s *influxdbCluster) Open() error { - s.mu.Lock() - if !s.disableSubs { - if s.subscriptionSyncInterval != 0 { - s.subSyncTicker = time.NewTicker(s.subscriptionSyncInterval) - go func() { - for _ = range s.subSyncTicker.C { - s.linkSubscriptions() - } - }() +func newInfluxDBCluster(c Config, hostname, clusterID, subName string, httpPort int, useTokens bool, l *log.Logger) (*influxdbCluster, error) { + if c.InsecureSkipVerify { + l.Printf("W! Using InsecureSkipVerify when connecting to InfluxDB @ %v this is insecure!", c.URLs) + } + config, err := httpConfig(c) + if err != nil { + return nil, err + } + subs := subsFromConfig(subName, c.Subscriptions) + exSubs := subsFromConfig(subName, c.ExcludedSubscriptions) + port := httpPort + if c.HTTPPort != 0 { + port = c.HTTPPort + } + host := hostname + if c.KapacitorHostname != "" { + host = c.KapacitorHostname + } + return &influxdbCluster{ + clusterName: c.Name, + influxdbConfig: config, + configSubs: subs, + exConfigSubs: exSubs, + hostname: host, + httpPort: port, + logger: l, + udpBind: c.UDPBind, + udpBuffer: c.UDPBuffer, + udpReadBuffer: c.UDPReadBuffer, + startupTimeout: time.Duration(c.StartUpTimeout), + subscriptionSyncInterval: time.Duration(c.SubscriptionSyncInterval), + clusterID: clusterID, + subName: subName, + disableSubs: c.DisableSubscriptions, + protocol: c.SubscriptionProtocol, + runningSubs: make(map[subEntry]bool, len(c.Subscriptions)), + services: make(map[subEntry]openCloser, len(c.Subscriptions)), + // Do not use tokens for non http protocols + useTokens: useTokens && (c.SubscriptionProtocol == "http" || c.SubscriptionProtocol == "https"), + }, nil +} + +func httpConfig(c Config) (influxdb.Config, error) { + tlsConfig, err := getTLSConfig(c.SSLCA, c.SSLCert, c.SSLKey, c.InsecureSkipVerify) + if err != nil { + return influxdb.Config{}, errors.Wrap(err, "invalid TLS options") + } + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + } + var credentials influxdb.Credentials + if c.Username != "" { + credentials = influxdb.Credentials{ + Method: influxdb.UserAuthentication, + Username: c.Username, + Password: c.Password, } } - // Release lock so we can call linkSubscriptions. - s.mu.Unlock() - return s.linkSubscriptions() + return influxdb.Config{ + URLs: c.URLs, + Timeout: time.Duration(c.Timeout), + Transport: tr, + Credentials: credentials, + }, nil } -func (s *influxdbCluster) Close() error { - s.mu.Lock() - defer s.mu.Unlock() +func subsFromConfig(subName string, s map[string][]string) map[subEntry]bool { + subs := make(map[subEntry]bool, len(s)) + for cluster, rps := range s { + for _, rp := range rps { + se := subEntry{cluster, rp, subName} + subs[se] = true + } + } + return subs +} + +func (c *influxdbCluster) Open() error { + ctx, cancel := c.setupContext() + defer cancel() - if s.subSyncTicker != nil { - s.subSyncTicker.Stop() + c.mu.Lock() + defer c.mu.Unlock() + + if c.opened { + return nil + } + c.opened = true + + if cli, err := c.ClientCreator.Create(c.influxdbConfig); err != nil { + return errors.Wrap(err, "failed to create client") + } else { + c.client = cli } + c.watchSubs() + return c.linkSubscriptions(ctx) +} + +func (c *influxdbCluster) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + if !c.opened { + return nil + } + c.opened = false + + if c.subSyncTicker != nil { + c.subSyncTicker.Stop() + } + + return c.closeServices() +} + +// closeServices closes all running services. +// Must have lock to call. +func (c *influxdbCluster) closeServices() error { var lastErr error - for _, service := range s.services { + for se, service := range c.services { + delete(c.runningSubs, se) + delete(c.services, se) err := service.Close() if err != nil { lastErr = err @@ -350,52 +503,229 @@ func (s *influxdbCluster) Close() error { return lastErr } -func (s *influxdbCluster) Addr() string { - config := s.configs[s.i] - s.i = (s.i + 1) % len(s.configs) - return config.URL -} +func (c *influxdbCluster) Update(conf Config) error { + // Setup context before getting lock + ctx, cancel := c.setupContext() -func (s *influxdbCluster) NewClient() (c influxdb.Client, err error) { - tries := 0 - for tries < len(s.configs) { - tries++ - config := s.configs[s.i] - s.i = (s.i + 1) % len(s.configs) - c, err = s.ClientCreator.Create(config) - if err != nil { - continue + c.mu.Lock() + defer c.mu.Unlock() + + if conf.InsecureSkipVerify { + c.logger.Printf("W! Using InsecureSkipVerify when connecting to InfluxDB @ %v this is insecure!", conf.URLs) + } + if conf.HTTPPort != 0 { + c.httpPort = conf.HTTPPort + } + if conf.KapacitorHostname != "" { + c.hostname = conf.KapacitorHostname + } + + reset := false + resetServices := func() { + // Close services and let them get re-opened during linking. + if !reset { + c.closeServices() + reset = true } - _, _, err = c.Ping(config.Timeout) + } + + if c.udpBind != conf.UDPBind { + c.udpBind = conf.UDPBind + // UDP bind changed + resetServices() + } + if c.udpBuffer != conf.UDPBuffer { + c.udpBuffer = conf.UDPBuffer + // UDP buffer changed + resetServices() + } + if c.udpReadBuffer != conf.UDPReadBuffer { + c.udpReadBuffer = conf.UDPReadBuffer + // UDP read buffer changed + resetServices() + } + + // Check if disabled status changed. + unlinkDone := make(chan struct{}) + if c.disableSubs != conf.DisableSubscriptions { + c.disableSubs = conf.DisableSubscriptions + if c.opened && conf.DisableSubscriptions { + // Subscriptions have been disabled, unlink. + go func() { + c.mu.Lock() + defer c.mu.Unlock() + defer close(unlinkDone) + c.unlinkSubscriptions() + }() + } else { + close(unlinkDone) + } + } else { + close(unlinkDone) + } + + // Check if subscriptions sync interval changed. + if i := time.Duration(conf.SubscriptionSyncInterval); c.subscriptionSyncInterval != i { + c.subscriptionSyncInterval = i + c.watchSubs() + } + + c.startupTimeout = time.Duration(conf.StartUpTimeout) + c.protocol = conf.SubscriptionProtocol + var err error + c.influxdbConfig, err = httpConfig(conf) + if err != nil { + return err + } + if c.client != nil { + err := c.client.Update(c.influxdbConfig) if err != nil { - continue + return errors.Wrap(err, "failed to update client") } - return } - return + c.configSubs = subsFromConfig(c.subName, conf.Subscriptions) + c.exConfigSubs = subsFromConfig(c.subName, conf.ExcludedSubscriptions) + + // Run linkSubscriptions in the background as it can take a while + // because of validateClientWithBackoff. + if c.opened { + go func() { + c.mu.Lock() + defer c.mu.Unlock() + defer cancel() + + // Wait for any unlinking to finish + <-unlinkDone + + err := c.linkSubscriptions(ctx) + if err != nil { + c.logger.Printf("E! failed to link subscriptions for cluster %s: %v", c.clusterName, err) + } + }() + } + + return nil } -func (s *influxdbCluster) linkSubscriptions() error { - if s.disableSubs { - return nil +// watchSubs setups the goroutine to watch the subscriptions and continuously link them. +// The caller must have the lock. +func (c *influxdbCluster) watchSubs() { + if c.subSyncTicker != nil { + c.subSyncTicker.Stop() } - s.mu.Lock() - defer s.mu.Unlock() - s.logger.Println("D! linking subscriptions for cluster", s.clusterName) + if !c.disableSubs && c.subscriptionSyncInterval != 0 { + c.subSyncTicker = time.NewTicker(c.subscriptionSyncInterval) + ticker := c.subSyncTicker + go func() { + for _ = range ticker.C { + c.LinkSubscriptions() + } + }() + } +} + +func (c *influxdbCluster) NewClient() influxdb.Client { + return c.client +} + +// validateClientWithBackoff repeatedly calls client.Ping until either +// a successfull response or the context is canceled. +func (c *influxdbCluster) validateClientWithBackoff(ctx context.Context) error { b := backoff.NewExponentialBackOff() - b.MaxElapsedTime = s.startupTimeout + b.MaxElapsedTime = c.startupTimeout ticker := backoff.NewTicker(b) - var err error - var cli influxdb.Client - for range ticker.C { - cli, err = s.NewClient() - if err != nil { - s.logger.Println("D! failed to connect to InfluxDB, retrying... ", err) - continue + defer ticker.Stop() + done := ctx.Done() + for { + select { + case <-done: + return errors.New("canceled") + case _, ok := <-ticker.C: + if !ok { + return errors.New("failed to connect to InfluxDB, retry limit reached") + } + _, _, err := c.client.Ping(ctx) + if err != nil { + c.logger.Println("D! failed to connect to InfluxDB, retrying... ", err) + continue + } + return nil + } + } +} + +// UnlinkSubscriptions acquires the lock and then unlinks the subscriptions +func (c *influxdbCluster) UnlinkSubscriptions() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.unlinkSubscriptions() +} + +// unlinkSubscriptions, you must have the lock to call this function. +func (c *influxdbCluster) unlinkSubscriptions() error { + c.logger.Println("D! unlinking subscriptions for cluster", c.clusterName) + // Get all existing subscriptions + resp, err := c.execQuery(&influxql.ShowSubscriptionsStatement{}) + if err != nil { + return err + } + for _, res := range resp.Results { + for _, series := range res.Series { + for _, v := range series.Values { + se := subEntry{ + db: series.Name, + } + for i, c := range series.Columns { + switch c { + case "retention_policy": + se.rp = v[i].(string) + case "name": + se.name = v[i].(string) + } + } + if se.name == c.subName { + c.dropSub(se.name, se.db, se.rp) + c.closeSub(se) + } + } } - ticker.Stop() - break } + return nil +} + +// setupContext returns a context, the previous context will be canceled if it exists. +// Must be called without the mu lock. +func (c *influxdbCluster) setupContext() (context.Context, context.CancelFunc) { + // Check existing context + c.ctxMu.Lock() + defer c.ctxMu.Unlock() + if c.ctx != nil { + // Cancel existing context + c.cancelCtx() + } + + c.ctx, c.cancelCtx = context.WithCancel(context.Background()) + return c.ctx, c.cancelCtx +} + +// LinkSubscriptions acquires the lock and then links the subscriptions. +func (c *influxdbCluster) LinkSubscriptions() error { + ctx, cancel := c.setupContext() + defer cancel() + + c.mu.Lock() + defer c.mu.Unlock() + return c.linkSubscriptions(ctx) +} + +// linkSubscriptions you must have the lock to call this method. +func (c *influxdbCluster) linkSubscriptions(ctx context.Context) error { + if c.disableSubs { + return nil + } + + c.logger.Println("D! linking subscriptions for cluster", c.clusterName) + err := c.validateClientWithBackoff(ctx) if err != nil { return err } @@ -404,7 +734,7 @@ func (s *influxdbCluster) linkSubscriptions() error { // Get all databases and retention policies var allSubs []subEntry - resp, err := s.execQuery(cli, &influxql.ShowDatabasesStatement{}) + resp, err := c.execQuery(&influxql.ShowDatabasesStatement{}) if err != nil { return err } @@ -414,7 +744,7 @@ func (s *influxdbCluster) linkSubscriptions() error { for _, v := range dbs { db := v[0].(string) - rpResp, err := s.execQuery(cli, &influxql.ShowRetentionPoliciesStatement{ + rpResp, err := c.execQuery(&influxql.ShowRetentionPoliciesStatement{ Database: db, }) if err != nil { @@ -428,7 +758,7 @@ func (s *influxdbCluster) linkSubscriptions() error { se := subEntry{ db: db, rp: rpname, - name: s.subName, + name: c.subName, } allSubs = append(allSubs, se) } @@ -437,7 +767,7 @@ func (s *influxdbCluster) linkSubscriptions() error { } // Get all existing subscriptions - resp, err = s.execQuery(cli, &influxql.ShowSubscriptionsStatement{}) + resp, err = c.execQuery(&influxql.ShowSubscriptionsStatement{}) if err != nil { return err } @@ -469,32 +799,32 @@ func (s *influxdbCluster) linkSubscriptions() error { if se.name == legacySubName { // This is an old-style subscription, // drop it and recreate with new name. - err := s.dropSub(cli, se.name, se.db, se.rp) + err := c.dropSub(se.name, se.db, se.rp) if err != nil { return err } - se.name = s.subName - err = s.createSub(cli, se.name, se.db, se.rp, si.Mode, si.Destinations) + se.name = c.subName + err = c.createSub(se.name, se.db, se.rp, si.Mode, si.Destinations) if err != nil { return err } existingSubs[se] = si - } else if se.name == s.clusterID { + } else if se.name == c.clusterID { // This is an just the cluster ID // drop it and recreate with new name. - err := s.dropSub(cli, se.name, se.db, se.rp) - se.name = s.subName - err = s.createSub(cli, se.name, se.db, se.rp, si.Mode, si.Destinations) + err := c.dropSub(se.name, se.db, se.rp) + se.name = c.subName + err = c.createSub(se.name, se.db, se.rp, si.Mode, si.Destinations) if err != nil { return err } existingSubs[se] = si - } else if se.name == s.subName { + } else if se.name == c.subName { // Check if the something has changed or is invalid. - if s.changedOrInvalid(se, si) { + if c.changedOrInvalid(se, si) { // Something changed or is invalid, drop the sub and let it get recreated - s.dropSub(cli, se.name, se.db, se.rp) - s.closeSub(se) + c.dropSub(se.name, se.db, se.rp) + c.closeSub(se) } else { existingSubs[se] = si } @@ -503,82 +833,88 @@ func (s *influxdbCluster) linkSubscriptions() error { } } - // Compare existing subs to configured list - all := len(s.configSubs) == 0 + // start any missing subscriptions + // and drop any extra subs for se, si := range existingSubs { - if (s.configSubs[se] || all) && !s.exConfigSubs[se] && !s.runningSubs[se] { + shouldExist := c.shouldSubExist(se) + if shouldExist && !c.runningSubs[se] { // Check if this kapacitor instance is in the list of hosts for _, dest := range si.Destinations { u, err := url.Parse(dest) if err != nil { - s.logger.Println("E! invalid URL in subscription destinations:", err) + c.logger.Println("E! invalid URL in subscription destinations:", err) continue } host, port, err := net.SplitHostPort(u.Host) - if host == s.hostname { + if host == c.hostname { numSubscriptions++ if u.Scheme == "udp" { - _, err := s.startUDPListener(se, port) + _, err := c.startUDPListener(se, port) if err != nil { - s.logger.Println("E! failed to start UDP listener:", err) + c.logger.Println("E! failed to start UDP listener:", err) } } - s.runningSubs[se] = true + c.runningSubs[se] = true break } } + } else if !shouldExist { + // Drop extra sub + c.dropSub(se.name, se.db, se.rp) + // Remove from existing list + delete(existingSubs, se) } } + // create and start any new subscriptions - // stop any removed subscriptions for _, se := range allSubs { _, exists := existingSubs[se] - // If we have been configured to subscribe and the subscription is not started yet. - if (s.configSubs[se] || all) && !s.exConfigSubs[se] && !(s.runningSubs[se] && exists) { + // If we have been configured to subscribe and the subscription is not created/started yet. + if c.shouldSubExist(se) && !(c.runningSubs[se] && exists) { var destination string - switch s.protocol { + switch c.protocol { case "http", "https": - if s.useTokens { + if c.useTokens { // Generate token - token, err := s.generateRandomToken() + token, err := c.generateRandomToken() if err != nil { return errors.Wrap(err, "generating token") } - err = s.AuthService.GrantSubscriptionAccess(token, se.db, se.rp) + err = c.AuthService.GrantSubscriptionAccess(token, se.db, se.rp) if err != nil { return err } u := url.URL{ - Scheme: s.protocol, + Scheme: c.protocol, User: url.UserPassword(httpd.SubscriptionUser, token), - Host: fmt.Sprintf("%s:%d", s.hostname, s.httpPort), + Host: fmt.Sprintf("%s:%d", c.hostname, c.httpPort), } destination = u.String() } else { u := url.URL{ - Scheme: s.protocol, - Host: fmt.Sprintf("%s:%d", s.hostname, s.httpPort), + Scheme: c.protocol, + Host: fmt.Sprintf("%s:%d", c.hostname, c.httpPort), } destination = u.String() } case "udp": - addr, err := s.startUDPListener(se, "0") + addr, err := c.startUDPListener(se, "0") if err != nil { - s.logger.Println("E! failed to start UDP listener:", err) + c.logger.Println("E! failed to start UDP listener:", err) } - destination = fmt.Sprintf("udp://%s:%d", s.hostname, addr.Port) + destination = fmt.Sprintf("udp://%s:%d", c.hostname, addr.Port) } numSubscriptions++ mode := "ANY" destinations := []string{destination} - err = s.createSub(cli, se.name, se.db, se.rp, mode, destinations) + err = c.createSub(se.name, se.db, se.rp, mode, destinations) if err != nil { return err } // Mark as running - s.runningSubs[se] = true + c.runningSubs[se] = true // Update exiting set existingSubs[se] = subInfo{ Mode: mode, @@ -588,13 +924,13 @@ func (s *influxdbCluster) linkSubscriptions() error { } // revoke any extra tokens - tokens, err := s.AuthService.ListSubscriptionTokens() + tokens, err := c.AuthService.ListSubscriptionTokens() if err != nil { return errors.Wrap(err, "getting existing subscription tokens") } // populate set of existing tokens. existingTokens := make(map[string]bool, len(existingSubs)) - if s.useTokens { + if c.useTokens { for _, si := range existingSubs { u, err := url.Parse(si.Destinations[0]) if err != nil || u.User == nil { @@ -609,24 +945,24 @@ func (s *influxdbCluster) linkSubscriptions() error { for _, token := range tokens { clusterName, _, err := splitToken(token) // Skip invalid token or token from another cluster - if err != nil || clusterName != s.clusterName { + if err != nil || clusterName != c.clusterName { continue } // If the token is not part of the existing set we need to revoke if !existingTokens[token] { - s.AuthService.RevokeSubscriptionAccess(token) + c.AuthService.RevokeSubscriptionAccess(token) } } // Close any subs for dbs that have been dropped - for se, running := range s.runningSubs { + for se, running := range c.runningSubs { if !running { continue } if _, exists := existingSubs[se]; !exists { - err := s.closeSub(se) + err := c.closeSub(se) if err != nil { - s.logger.Printf("E! failed to close service for %v: %s", se, err) + c.logger.Printf("E! failed to close service for %v: %s", se, err) } } } @@ -635,8 +971,12 @@ func (s *influxdbCluster) linkSubscriptions() error { return nil } +func (c *influxdbCluster) shouldSubExist(se subEntry) bool { + return (len(c.configSubs) == 0 || c.configSubs[se]) && !c.exConfigSubs[se] +} + // Determine whether a subscription has differing values from the config. -func (s *influxdbCluster) changedOrInvalid(se subEntry, si subInfo) bool { +func (c *influxdbCluster) changedOrInvalid(se subEntry, si subInfo) bool { // Validate destinations if len(si.Destinations) == 0 { return true @@ -645,7 +985,7 @@ func (s *influxdbCluster) changedOrInvalid(se subEntry, si subInfo) bool { if err != nil { return true } - if u.Scheme != s.protocol { + if u.Scheme != c.protocol { return true } @@ -653,7 +993,7 @@ func (s *influxdbCluster) changedOrInvalid(se subEntry, si subInfo) bool { if err != nil { return true } - if host != s.hostname { + if host != c.hostname { return true } @@ -664,14 +1004,14 @@ func (s *influxdbCluster) changedOrInvalid(se subEntry, si subInfo) bool { if err != nil { return true } - if int(pn) != s.httpPort { + if int(pn) != c.httpPort { return true } // Further checks for the user token - if !s.useTokens && u.User != nil { + if !c.useTokens && u.User != nil { return true } - if s.useTokens { + if c.useTokens { if u.User == nil || u.User.Username() != httpd.SubscriptionUser { return true } @@ -680,7 +1020,7 @@ func (s *influxdbCluster) changedOrInvalid(se subEntry, si subInfo) bool { return true } clusterName, _, err := splitToken(t) - if err != nil || clusterName != s.clusterName { + if err != nil || clusterName != c.clusterName { return true } } @@ -689,13 +1029,12 @@ func (s *influxdbCluster) changedOrInvalid(se subEntry, si subInfo) bool { } // Close the service and stop tracking it. -func (s *influxdbCluster) closeSub(se subEntry) (err error) { - if service, ok := s.services[se]; ok { - s.logger.Println("D! closing service for", se) +func (c *influxdbCluster) closeSub(se subEntry) (err error) { + if service, ok := c.services[se]; ok { err = service.Close() } - delete(s.runningSubs, se) - delete(s.services, se) + delete(c.runningSubs, se) + delete(c.services, se) return } @@ -720,18 +1059,18 @@ func splitToken(token string) (string, string, error) { // Generate a token tagged with the cluster name. // ; -func (s *influxdbCluster) generateRandomToken() (string, error) { - l := len(s.clusterName) +func (c *influxdbCluster) generateRandomToken() (string, error) { + l := len(c.clusterName) tokenBytes := make([]byte, l+tokenSize+1) - copy(tokenBytes[:l], []byte(s.clusterName)) + copy(tokenBytes[:l], []byte(c.clusterName)) tokenBytes[l] = tokenDelimiter - if _, err := io.ReadFull(s.randReader, tokenBytes[l+1:]); err != nil { + if _, err := io.ReadFull(c.randReader, tokenBytes[l+1:]); err != nil { return "", err } return base64.RawURLEncoding.EncodeToString(tokenBytes), nil } -func (s *influxdbCluster) createSub(cli influxdb.Client, name, cluster, rp, mode string, destinations []string) error { +func (c *influxdbCluster) createSub(name, cluster, rp, mode string, destinations []string) error { var buf bytes.Buffer for i, dst := range destinations { if i != 0 { @@ -741,8 +1080,7 @@ func (s *influxdbCluster) createSub(cli influxdb.Client, name, cluster, rp, mode buf.Write([]byte(dst)) buf.Write([]byte("'")) } - _, err := s.execQuery( - cli, + _, err := c.execQuery( &influxql.CreateSubscriptionStatement{ Name: name, Database: cluster, @@ -754,9 +1092,8 @@ func (s *influxdbCluster) createSub(cli influxdb.Client, name, cluster, rp, mode return errors.Wrapf(err, "creating sub %s for db %q and rp %q", name, cluster, rp) } -func (s *influxdbCluster) dropSub(cli influxdb.Client, name, cluster, rp string) (err error) { - _, err = s.execQuery( - cli, +func (c *influxdbCluster) dropSub(name, cluster, rp string) (err error) { + _, err = c.execQuery( &influxql.DropSubscriptionStatement{ Name: name, Database: cluster, @@ -766,38 +1103,35 @@ func (s *influxdbCluster) dropSub(cli influxdb.Client, name, cluster, rp string) return } -func (s *influxdbCluster) startUDPListener(se subEntry, port string) (*net.UDPAddr, error) { - c := udp.Config{} - c.Enabled = true - c.BindAddress = fmt.Sprintf("%s:%s", s.udpBind, port) - c.Database = se.db - c.RetentionPolicy = se.rp - c.Buffer = s.udpBuffer - c.ReadBuffer = s.udpReadBuffer +func (c *influxdbCluster) startUDPListener(se subEntry, port string) (*net.UDPAddr, error) { + conf := udp.Config{} + conf.Enabled = true + conf.BindAddress = fmt.Sprintf("%s:%s", c.udpBind, port) + conf.Database = se.db + conf.RetentionPolicy = se.rp + conf.Buffer = c.udpBuffer + conf.ReadBuffer = c.udpReadBuffer - l := s.LogService.NewLogger(fmt.Sprintf("[udp:%s.%s] ", se.db, se.rp), log.LstdFlags) - service := udp.NewService(c, l) - service.PointsWriter = s.PointsWriter + l := c.LogService.NewLogger(fmt.Sprintf("[udp:%s.%s] ", se.db, se.rp), log.LstdFlags) + service := udp.NewService(conf, l) + service.PointsWriter = c.PointsWriter err := service.Open() if err != nil { return nil, err } - s.services[se] = service - s.logger.Println("I! started UDP listener for", se.db, se.rp) + c.services[se] = service + c.logger.Println("I! started UDP listener for", se.db, se.rp) return service.Addr(), nil } -func (s *influxdbCluster) execQuery(cli influxdb.Client, q influxql.Statement) (*influxdb.Response, error) { +func (c *influxdbCluster) execQuery(q influxql.Statement) (*influxdb.Response, error) { query := influxdb.Query{ Command: q.String(), } - resp, err := cli.Query(query) + resp, err := c.client.Query(query) if err != nil { return nil, err } - if err := resp.Error(); err != nil { - return nil, err - } return resp, nil } @@ -817,7 +1151,6 @@ func getTLSConfig( "Could not load TLS client key/certificate: %s", err) } - t.Certificates = []tls.Certificate{cert} } else if SSLCert != "" { return nil, errors.New("Must provide both key and cert files: only cert file provided.") diff --git a/services/influxdb/service_test.go b/services/influxdb/service_test.go index 9fff39976..b93435159 100644 --- a/services/influxdb/service_test.go +++ b/services/influxdb/service_test.go @@ -1,6 +1,7 @@ package influxdb_test import ( + "context" "encoding/base64" "errors" "fmt" @@ -79,6 +80,10 @@ func TestService_Open_LinkSubscriptions(t *testing.T) { NoPassword bool WrongCluster bool } + type partialConfig struct { + configSubs map[string][]string + configExSubs map[string][]string + } testCases := map[string]struct { useTokens bool @@ -91,6 +96,9 @@ func TestService_Open_LinkSubscriptions(t *testing.T) { revokedTokens []string subChanged subChanged + // apply new config between rounds + partialConfigs map[string]partialConfig + // Second round secondClusters map[string]clusterInfo secondTokens []string @@ -742,16 +750,92 @@ func TestService_Open_LinkSubscriptions(t *testing.T) { rp: "rpA", }}, }, + "ConfigChange_NewSubs": { + clusters: map[string]clusterInfo{ + testClusterName: { + dbrps: map[string][]string{ + "db1": []string{"rpA", "rpB"}, + "db2": []string{"rpC", "rpD"}, + }, + subs: map[string][]string{ + "db1": []string{"rpA", "rpB"}, + "db2": []string{"rpC", "rpD"}, + }, + }, + }, + secondClusters: map[string]clusterInfo{ + testClusterName: { + dbrps: map[string][]string{ + "db1": []string{"rpA", "rpB"}, + "db2": []string{"rpC", "rpD"}, + }, + subs: map[string][]string{ + "db1": []string{"rpA", "rpB"}, + "db2": []string{"rpC", "rpD"}, + }, + }, + }, + partialConfigs: map[string]partialConfig{ + testClusterName: { + configSubs: map[string][]string{ + "db1": {"rpA"}, + }, + }, + }, + secondDropSubs: []string{ + `DROP SUBSCRIPTION "` + testSubName + `" ON db1.rpB`, + `DROP SUBSCRIPTION "` + testSubName + `" ON db2.rpC`, + `DROP SUBSCRIPTION "` + testSubName + `" ON db2.rpD`, + }, + }, + "ConfigChange_NewExcludes": { + clusters: map[string]clusterInfo{ + testClusterName: { + dbrps: map[string][]string{ + "db1": []string{"rpA", "rpB"}, + "db2": []string{"rpC", "rpD"}, + }, + subs: map[string][]string{ + "db1": []string{"rpA", "rpB"}, + "db2": []string{"rpC", "rpD"}, + }, + }}, + secondClusters: map[string]clusterInfo{ + testClusterName: { + dbrps: map[string][]string{ + "db1": []string{"rpA", "rpB"}, + "db2": []string{"rpC", "rpD"}, + }, + subs: map[string][]string{ + "db1": []string{"rpA", "rpB"}, + "db2": []string{"rpC", "rpD"}, + }, + }}, + partialConfigs: map[string]partialConfig{ + testClusterName: { + configExSubs: map[string][]string{ + "db1": {"rpA"}, + }, + }, + }, + secondDropSubs: []string{ + `DROP SUBSCRIPTION "` + testSubName + `" ON db1.rpA`, + }, + }, } for testName, tc := range testCases { t.Log("starting test:", testName) log.Println("starting test:", testName) clusterNames := make([]string, 0, len(tc.clusters)) + clusterNameLookup := make(map[string]int, len(tc.clusters)) + i := 0 for clusterName := range tc.clusters { clusterNames = append(clusterNames, clusterName) + clusterNameLookup[clusterName] = i + i++ } - c := NewDefaultTestConfigs(clusterNames) - s, as, cs := NewTestService(c, "localhost", tc.useTokens) + defaultConfigs := NewDefaultTestConfigs(clusterNames) + s, as, cs := NewTestService(defaultConfigs, "localhost", tc.useTokens) // Define the active vars var activeClusters map[string]clusterInfo @@ -900,13 +984,15 @@ func TestService_Open_LinkSubscriptions(t *testing.T) { dropSubs = make(map[string]bool) grantedTokens = make(map[tokenGrant]bool) revokedTokens = make(map[string]bool) + + log.Println("D! first round") if err := s.Open(); err != nil { t.Fatal(err) } defer s.Close() validate( t, - testName, + testName+"-1", tc.createSubs, tc.dropSubs, tc.grantedTokens, @@ -926,11 +1012,24 @@ func TestService_Open_LinkSubscriptions(t *testing.T) { grantedTokens = make(map[tokenGrant]bool) revokedTokens = make(map[string]bool) + log.Println("D! second round") + if len(tc.partialConfigs) > 0 { + configs := make([]interface{}, 0, len(tc.partialConfigs)) + for name, pc := range tc.partialConfigs { + c := defaultConfigs[clusterNameLookup[name]] + c.Subscriptions = pc.configSubs + c.ExcludedSubscriptions = pc.configExSubs + configs = append(configs, c) + } + if err := s.Update(configs); err != nil { + t.Fatal(err) + } + } s.LinkSubscriptions() validate( t, - testName, + testName+"-2", tc.secondCreateSubs, tc.secondDropSubs, tc.secondGrantedTokens, @@ -1058,7 +1157,10 @@ func NewDefaultTestConfigs(clusters []string) []influxdb.Config { func NewTestService(configs []influxdb.Config, hostname string, useTokens bool) (*influxdb.Service, *authService, *clientCreator) { httpPort := 9092 l := ls.NewLogger("[test-influxdb] ", log.LstdFlags) - s := influxdb.NewService(configs, 0, httpPort, hostname, useTokens, l) + s, err := influxdb.NewService(configs, httpPort, hostname, useTokens, l) + if err != nil { + panic(err) + } s.LogService = ls s.HTTPDService = httpdService{} as := &authService{} @@ -1096,42 +1198,42 @@ func (a *authService) RevokeSubscriptionAccess(token string) error { type clientCreator struct { // Index for the order the client was created, matches the order clusters are created. - CreateFunc func(influxcli.HTTPConfig) (influxcli.Client, error) + CreateFunc func(influxcli.Config) (influxcli.ClientUpdater, error) // Cient functions passed down to any created client - PingFunc func(timeout time.Duration) (time.Duration, string, error) - WriteFunc func(bp influxcli.BatchPoints) error - QueryFunc func(clusterName string, q influxcli.Query) (*influxcli.Response, error) - CloseFunc func() error + PingFunc func(ctx context.Context) (time.Duration, string, error) + WriteFunc func(bp influxcli.BatchPoints) error + QueryFunc func(clusterName string, q influxcli.Query) (*influxcli.Response, error) + UpdateFunc func(influxcli.Config) error } -func (c *clientCreator) Create(config influxcli.HTTPConfig) (influxcli.Client, error) { +func (c *clientCreator) Create(config influxcli.Config) (influxcli.ClientUpdater, error) { if c.CreateFunc != nil { return c.CreateFunc(config) } // Retrieve cluster name from URL - u, _ := url.Parse(config.URL) + u, _ := url.Parse(config.URLs[0]) cli := influxDBClient{ clusterName: u.Host, PingFunc: c.PingFunc, WriteFunc: c.WriteFunc, QueryFunc: c.QueryFunc, - CloseFunc: c.CloseFunc, + UpdateFunc: c.UpdateFunc, } return cli, nil } type influxDBClient struct { clusterName string - PingFunc func(timeout time.Duration) (time.Duration, string, error) + PingFunc func(ctx context.Context) (time.Duration, string, error) WriteFunc func(bp influxcli.BatchPoints) error QueryFunc func(clusterName string, q influxcli.Query) (*influxcli.Response, error) - CloseFunc func() error + UpdateFunc func(influxcli.Config) error } -func (c influxDBClient) Ping(timeout time.Duration) (time.Duration, string, error) { +func (c influxDBClient) Ping(ctx context.Context) (time.Duration, string, error) { if c.PingFunc != nil { - return c.PingFunc(timeout) + return c.PingFunc(ctx) } return 0, "testversion", nil } @@ -1147,9 +1249,9 @@ func (c influxDBClient) Query(q influxcli.Query) (*influxcli.Response, error) { } return &influxcli.Response{}, nil } -func (c influxDBClient) Close() error { - if c.CloseFunc != nil { - return c.CloseFunc() +func (c influxDBClient) Update(config influxcli.Config) error { + if c.UpdateFunc != nil { + return c.UpdateFunc(config) } return nil } diff --git a/services/k8s/client/client.go b/services/k8s/client/client.go index d57d2b790..6910187d0 100644 --- a/services/k8s/client/client.go +++ b/services/k8s/client/client.go @@ -12,6 +12,7 @@ import ( "path" "path/filepath" "sync" + "sync/atomic" "github.com/pkg/errors" ) @@ -49,6 +50,7 @@ type Client interface { // Scales returns an interface for interactive with Scale resources. // If namespace is empty the default client namespace will be used. Scales(namespace string) ScalesInterface + Update(c Config) error } // httpClient is a lightweight HTTP client for k8s resources. @@ -56,12 +58,11 @@ type Client interface { // so as to make replacing this client with an official client simpler // once https://github.com/kubernetes/kubernetes/issues/5660 is fixed type httpClient struct { - mu sync.Mutex - urls []*url.URL - namespace string - client *http.Client - current int - authHeader string + mu sync.RWMutex + config Config + urls []url.URL + client *http.Client + index int32 } func NewConfigInCluster() (Config, error) { @@ -97,53 +98,81 @@ func NewConfigInCluster() (Config, error) { return config, nil } -func NewInCluster() (Client, error) { - config, err := NewConfigInCluster() +func New(c Config) (Client, error) { + if c.Namespace == "" { + c.Namespace = NamespaceDefault + } + urls, err := parseURLs(c.URLs) if err != nil { return nil, err } - return New(config) + return &httpClient{ + config: c, + urls: urls, + client: &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: c.TLSConfig, + }, + }, + }, nil } -func New(c Config) (Client, error) { - if len(c.URLs) == 0 { - return nil, fmt.Errorf("must provide at least one URL") - } - urls := make([]*url.URL, len(c.URLs)) - for i := range c.URLs { - u, err := url.Parse(c.URLs[i]) +func (c *httpClient) pickURL(urls []url.URL) url.URL { + i := atomic.LoadInt32(&c.index) + i = (i + 1) % int32(len(urls)) + atomic.StoreInt32(&c.index, i) + return urls[i] +} + +func parseURLs(urlStrs []string) ([]url.URL, error) { + urls := make([]url.URL, len(urlStrs)) + for i, urlStr := range urlStrs { + u, err := url.Parse(urlStr) if err != nil { - return nil, errors.Wrapf(err, "invalid url %q", c.URLs[i]) + return nil, errors.Wrapf(err, "invalid url %q", urlStr) } - urls[i] = u + urls[i] = *u } - tr := &http.Transport{ - TLSClientConfig: c.TLSConfig, - } - return &httpClient{ - urls: urls, - client: &http.Client{ - Transport: tr, - }, - namespace: c.Namespace, - authHeader: fmt.Sprintf("Bearer %s", c.Token), - }, nil + return urls, nil } -func (c *httpClient) nextURL() *url.URL { +func (c *httpClient) Update(new Config) error { c.mu.Lock() - u := c.urls[c.current] - c.current = (c.current + 1) % len(c.urls) - c.mu.Unlock() - return u + defer c.mu.Unlock() + + old := c.config + c.config = new + if c.config.Namespace == "" { + c.config.Namespace = NamespaceDefault + } + // Replace urls + urls, err := parseURLs(new.URLs) + if err != nil { + return err + } + c.urls = urls + + if old.TLSConfig != new.TLSConfig { + c.client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: new.TLSConfig, + }, + } + } + return nil } func (c *httpClient) Do(r http.Request) (*http.Response, error) { - u := c.nextURL() + c.mu.RLock() + config := c.config + u := c.pickURL(c.urls) + client := c.client + c.mu.RUnlock() + r.URL.Host = u.Host r.URL.Scheme = u.Scheme - r.Header.Set("Authorization", c.authHeader) - resp, err := c.client.Do(&r) + r.Header.Set("Authorization", fmt.Sprintf("Bearer %s", config.Token)) + resp, err := client.Do(&r) return resp, errors.Wrap(err, "k8s client request failed") } @@ -219,11 +248,10 @@ type Scales struct { func (c *httpClient) Scales(namespace string) ScalesInterface { if namespace == "" { - if c.namespace != "" { - namespace = c.namespace - } else { - namespace = NamespaceDefault - } + c.mu.RLock() + config := c.config + c.mu.RUnlock() + namespace = config.Namespace } return Scales{c: c, namespace: namespace} } diff --git a/services/k8s/config.go b/services/k8s/config.go index faa5ff366..854d70cf7 100644 --- a/services/k8s/config.go +++ b/services/k8s/config.go @@ -10,12 +10,12 @@ import ( ) type Config struct { - Enabled bool `toml:"enabled"` - InCluster bool `toml:"in-cluster"` - APIServers []string `toml:"api-servers"` - Token string `toml:"token"` - CAPath string `toml:"ca-path"` - Namespace string `toml:"namespace"` + Enabled bool `toml:"enabled" override:"enabled"` + InCluster bool `toml:"in-cluster" override:"in-cluster"` + APIServers []string `toml:"api-servers" override:"api-servers"` + Token string `toml:"token" override:"token,redact"` + CAPath string `toml:"ca-path" override:"ca-path"` + Namespace string `toml:"namespace" override:"namespace"` } func NewConfig() Config { diff --git a/services/k8s/service.go b/services/k8s/service.go index 56a5f1330..25e57c8fa 100644 --- a/services/k8s/service.go +++ b/services/k8s/service.go @@ -1,15 +1,18 @@ package k8s import ( + "fmt" "log" + "sync/atomic" "github.com/influxdata/kapacitor/services/k8s/client" "github.com/pkg/errors" ) type Service struct { - client client.Client - logger *log.Logger + configValue atomic.Value // Config + client client.Client + logger *log.Logger } func NewService(c Config, l *log.Logger) (*Service, error) { @@ -22,10 +25,12 @@ func NewService(c Config, l *log.Logger) (*Service, error) { return nil, errors.Wrap(err, "failed to create k8s client") } - return &Service{ + s := &Service{ client: cli, logger: l, - }, nil + } + s.configValue.Store(c) + return s, nil } func (s *Service) Open() error { @@ -35,6 +40,27 @@ func (s *Service) Close() error { return nil } -func (s *Service) Client() client.Client { - return s.client +func (s *Service) Update(newConfig []interface{}) error { + if l := len(newConfig); l != 1 { + return fmt.Errorf("expected only one new config object, got %d", l) + } + c, ok := newConfig[0].(Config) + if !ok { + return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfig[0]) + } + + s.configValue.Store(c) + clientConfig, err := c.ClientConfig() + if err != nil { + return errors.Wrap(err, "failed to create k8s client config") + } + return s.client.Update(clientConfig) +} + +func (s *Service) Client() (client.Client, error) { + config := s.configValue.Load().(Config) + if !config.Enabled { + return nil, errors.New("service not enabled") + } + return s.client, nil } diff --git a/services/logging/loggingtest/logging.go b/services/logging/loggingtest/logging.go new file mode 100644 index 000000000..d42adc8e2 --- /dev/null +++ b/services/logging/loggingtest/logging.go @@ -0,0 +1,31 @@ +package loggingtest + +import ( + "io" + "log" + "os" + + "github.com/influxdata/kapacitor/services/logging" + "github.com/influxdata/wlog" +) + +type TestLogService struct{} + +func New() TestLogService { + return TestLogService{} +} + +func (l TestLogService) NewLogger(prefix string, flag int) *log.Logger { + return wlog.New(os.Stderr, prefix, flag) +} +func (l TestLogService) NewRawLogger(prefix string, flag int) *log.Logger { + return log.New(os.Stderr, prefix, flag) +} + +func (l TestLogService) NewStaticLevelLogger(prefix string, flag int, level logging.Level) *log.Logger { + return log.New(wlog.NewStaticLevelWriter(os.Stderr, wlog.Level(level)), prefix, flag) +} + +func (l TestLogService) NewStaticLevelWriter(level logging.Level) io.Writer { + return wlog.NewStaticLevelWriter(os.Stderr, wlog.Level(level)) +} diff --git a/services/opsgenie/config.go b/services/opsgenie/config.go index e692cc0cf..2d7368d97 100644 --- a/services/opsgenie/config.go +++ b/services/opsgenie/config.go @@ -1,23 +1,29 @@ package opsgenie +import ( + "net/url" + + "github.com/pkg/errors" +) + const DefaultOpsGenieAPIURL = "https://api.opsgenie.com/v1/json/alert" const DefaultOpsGenieRecoveryURL = "https://api.opsgenie.com/v1/json/alert/note" type Config struct { // Whether to enable OpsGenie integration. - Enabled bool `toml:"enabled"` + Enabled bool `toml:"enabled" override:"enabled"` // The OpsGenie API key. - APIKey string `toml:"api-key"` + APIKey string `toml:"api-key" override:"api-key,redact"` // The default Teams, can be overridden per alert. - Teams []string `toml:"teams"` + Teams []string `toml:"teams" override:"teams"` // The default Teams, can be overridden per alert. - Recipients []string `toml:"recipients"` + Recipients []string `toml:"recipients" override:"recipients"` // The OpsGenie API URL, should not need to be changed. - URL string `toml:"url"` + URL string `toml:"url" override:"url"` // The OpsGenie Recovery URL, you can change this based on which behavior you want a recovery to trigger (Add Notes, Close Alert, etc.) - RecoveryURL string `toml:"recovery_url"` + RecoveryURL string `toml:"recovery_url" override:"recovery_url"` // Whether every alert should automatically go to OpsGenie. - Global bool `toml:"global"` + Global bool `toml:"global" override:"global"` } func NewConfig() Config { @@ -26,3 +32,22 @@ func NewConfig() Config { RecoveryURL: DefaultOpsGenieRecoveryURL, } } + +func (c Config) Validate() error { + if c.URL == "" { + return errors.New("url cannot be empty") + } + if c.RecoveryURL == "" { + return errors.New("recovery_url cannot be empty") + } + if _, err := url.Parse(c.URL); err != nil { + return errors.Wrapf(err, "invalid URL %q", c.URL) + } + if _, err := url.Parse(c.RecoveryURL); err != nil { + return errors.Wrapf(err, "invalid recovery_url %q", c.URL) + } + if c.Enabled && c.APIKey == "" { + return errors.New("api-key cannot be empty") + } + return nil +} diff --git a/services/opsgenie/service.go b/services/opsgenie/service.go index 9b6f67cbb..0b6eb08d3 100644 --- a/services/opsgenie/service.go +++ b/services/opsgenie/service.go @@ -5,34 +5,27 @@ import ( "encoding/json" "errors" "fmt" + "io" "io/ioutil" "log" "net/http" + "sync/atomic" "time" "github.com/influxdata/kapacitor" ) type Service struct { - apikey string - teams []string - recipients []string - url string - recovery_url string - global bool - logger *log.Logger + configValue atomic.Value + logger *log.Logger } func NewService(c Config, l *log.Logger) *Service { - return &Service{ - teams: c.Teams, - recipients: c.Recipients, - apikey: c.APIKey, - url: c.URL + "/", - recovery_url: c.RecoveryURL + "/", - global: c.Global, - logger: l, + s := &Service{ + logger: l, } + s.configValue.Store(c) + return s } func (s *Service) Open() error { @@ -43,15 +36,65 @@ func (s *Service) Close() error { return nil } +func (s *Service) config() Config { + return s.configValue.Load().(Config) +} + +func (s *Service) Update(newConfig []interface{}) error { + if l := len(newConfig); l != 1 { + return fmt.Errorf("expected only one new config object, got %d", l) + } + if c, ok := newConfig[0].(Config); !ok { + return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfig[0]) + } else { + s.configValue.Store(c) + } + return nil +} + func (s *Service) Global() bool { - return s.global + c := s.config() + return c.Global } func (s *Service) Alert(teams []string, recipients []string, messageType, message, entityID string, t time.Time, details interface{}) error { + url, post, err := s.preparePost(teams, recipients, messageType, message, entityID, t, details) + if err != nil { + return err + } + + resp, err := http.Post(url, "application/json", post) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + type response struct { + Message string `json:"message"` + } + r := &response{Message: fmt.Sprintf("failed to understand OpsGenie response. code: %d content: %s", resp.StatusCode, string(body))} + b := bytes.NewReader(body) + dec := json.NewDecoder(b) + dec.Decode(r) + return errors.New(r.Message) + } + return nil +} + +func (s *Service) preparePost(teams []string, recipients []string, messageType, message, entityID string, t time.Time, details interface{}) (string, io.Reader, error) { + c := s.config() + if !c.Enabled { + return "", nil, errors.New("service not enabled") + } + ogData := make(map[string]interface{}) - url := s.url + url := c.URL + "/" - ogData["apiKey"] = s.apikey + ogData["apiKey"] = c.APIKey ogData["entity"] = entityID ogData["alias"] = entityID ogData["message"] = message @@ -67,20 +110,20 @@ func (s *Service) Alert(teams []string, recipients []string, messageType, messag switch messageType { case "RECOVERY": - url = s.recovery_url + url = c.RecoveryURL + "/" ogData["note"] = message } if details != nil { b, err := json.Marshal(details) if err != nil { - return err + return "", nil, err } ogData["description"] = string(b) } if len(teams) == 0 { - teams = s.teams + teams = c.Teams } if len(teams) > 0 { @@ -88,7 +131,7 @@ func (s *Service) Alert(teams []string, recipients []string, messageType, messag } if len(recipients) == 0 { - recipients = s.recipients + recipients = c.Recipients } if len(recipients) > 0 { @@ -100,27 +143,8 @@ func (s *Service) Alert(teams []string, recipients []string, messageType, messag enc := json.NewEncoder(&post) err := enc.Encode(ogData) if err != nil { - return err + return "", nil, err } - resp, err := http.Post(url, "application/json", &post) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - type response struct { - Message string `json:"message"` - } - r := &response{Message: fmt.Sprintf("failed to understand OpsGenie response. code: %d content: %s", resp.StatusCode, string(body))} - b := bytes.NewReader(body) - dec := json.NewDecoder(b) - dec.Decode(r) - return errors.New(r.Message) - } - return nil + return url, &post, nil } diff --git a/services/pagerduty/config.go b/services/pagerduty/config.go index 546bea56e..958cff8bd 100644 --- a/services/pagerduty/config.go +++ b/services/pagerduty/config.go @@ -1,16 +1,22 @@ package pagerduty +import ( + "net/url" + + "github.com/pkg/errors" +) + const DefaultPagerDutyAPIURL = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" type Config struct { // Whether PagerDuty integration is enabled. - Enabled bool `toml:"enabled"` + Enabled bool `toml:"enabled" override:"enabled"` // The PagerDuty API URL, should not need to be changed. - URL string `toml:"url"` + URL string `toml:"url" override:"url"` // The PagerDuty service key. - ServiceKey string `toml:"service-key"` + ServiceKey string `toml:"service-key" override:"service-key,redact"` // Whether every alert should automatically go to PagerDuty - Global bool `toml:"global"` + Global bool `toml:"global" override:"global"` } func NewConfig() Config { @@ -18,3 +24,13 @@ func NewConfig() Config { URL: DefaultPagerDutyAPIURL, } } + +func (c Config) Validate() error { + if c.URL == "" { + return errors.New("url cannot be empty") + } + if _, err := url.Parse(c.URL); err != nil { + return errors.Wrapf(err, "invalid URL %q", c.URL) + } + return nil +} diff --git a/services/pagerduty/service.go b/services/pagerduty/service.go index e6272a431..a0d921b83 100644 --- a/services/pagerduty/service.go +++ b/services/pagerduty/service.go @@ -5,30 +5,30 @@ import ( "encoding/json" "errors" "fmt" + "io" "io/ioutil" "log" "net/http" + "sync/atomic" "github.com/influxdata/kapacitor" ) type Service struct { + configValue atomic.Value + HTTPDService interface { URL() string } - serviceKey string - url string - global bool - logger *log.Logger + logger *log.Logger } func NewService(c Config, l *log.Logger) *Service { - return &Service{ - serviceKey: c.ServiceKey, - url: c.URL, - global: c.Global, - logger: l, + s := &Service{ + logger: l, } + s.configValue.Store(c) + return s } func (s *Service) Open() error { @@ -39,24 +39,74 @@ func (s *Service) Close() error { return nil } +func (s *Service) config() Config { + return s.configValue.Load().(Config) +} + +func (s *Service) Update(newConfig []interface{}) error { + if l := len(newConfig); l != 1 { + return fmt.Errorf("expected only one new config object, got %d", l) + } + if c, ok := newConfig[0].(Config); !ok { + return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfig[0]) + } else { + s.configValue.Store(c) + } + return nil +} + func (s *Service) Global() bool { - return s.global + c := s.config() + return c.Global } func (s *Service) Alert(serviceKey, incidentKey, desc string, level kapacitor.AlertLevel, details interface{}) error { + url, post, err := s.preparePost(serviceKey, incidentKey, desc, level, details) + if err != nil { + return err + } + resp, err := http.Post(url, "application/json", post) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + type response struct { + Message string `json:"message"` + } + r := &response{Message: fmt.Sprintf("failed to understand PagerDuty response. code: %d content: %s", resp.StatusCode, string(body))} + b := bytes.NewReader(body) + dec := json.NewDecoder(b) + dec.Decode(r) + return errors.New(r.Message) + } + return nil +} + +func (s *Service) preparePost(serviceKey, incidentKey, desc string, level kapacitor.AlertLevel, details interface{}) (string, io.Reader, error) { + + c := s.config() + if !c.Enabled { + return "", nil, errors.New("service is not enabled") + } + var eventType string switch level { case kapacitor.WarnAlert, kapacitor.CritAlert: eventType = "trigger" case kapacitor.InfoAlert: - return fmt.Errorf("AlertLevel 'info' is currently ignored by the PagerDuty service") + return "", nil, fmt.Errorf("AlertLevel 'info' is currently ignored by the PagerDuty service") default: eventType = "resolve" } pData := make(map[string]string) if serviceKey == "" { - pData["service_key"] = s.serviceKey + pData["service_key"] = c.ServiceKey } else { pData["service_key"] = serviceKey } @@ -68,7 +118,7 @@ func (s *Service) Alert(serviceKey, incidentKey, desc string, level kapacitor.Al if details != nil { b, err := json.Marshal(details) if err != nil { - return err + return "", nil, err } pData["details"] = string(b) } @@ -78,27 +128,8 @@ func (s *Service) Alert(serviceKey, incidentKey, desc string, level kapacitor.Al enc := json.NewEncoder(&post) err := enc.Encode(pData) if err != nil { - return err + return "", nil, err } - resp, err := http.Post(s.url, "application/json", &post) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - type response struct { - Message string `json:"message"` - } - r := &response{Message: fmt.Sprintf("failed to understand PagerDuty response. code: %d content: %s", resp.StatusCode, string(body))} - b := bytes.NewReader(body) - dec := json.NewDecoder(b) - dec.Decode(r) - return errors.New(r.Message) - } - return nil + return c.URL, &post, nil } diff --git a/services/replay/service.go b/services/replay/service.go index 9a32c9837..15a28f4f6 100644 --- a/services/replay/service.go +++ b/services/replay/service.go @@ -70,7 +70,6 @@ type Service struct { DelRoutes([]httpd.Route) } InfluxDBService interface { - NewDefaultClient() (influxdb.Client, error) NewNamedClient(name string) (influxdb.Client, error) } TaskMasterLookup interface { @@ -1335,13 +1334,7 @@ func (s *Service) startRecordBatch(t *kapacitor.Task, start, stop time.Time) ([] defer close(source) // Connect to the cluster - var con influxdb.Client - var err error - if cluster != "" { - con, err = s.InfluxDBService.NewNamedClient(cluster) - } else { - con, err = s.InfluxDBService.NewDefaultClient() - } + cli, err := s.InfluxDBService.NewNamedClient(cluster) if err != nil { errors <- err return @@ -1351,15 +1344,11 @@ func (s *Service) startRecordBatch(t *kapacitor.Task, start, stop time.Time) ([] query := influxdb.Query{ Command: q.String(), } - resp, err := con.Query(query) + resp, err := cli.Query(query) if err != nil { errors <- err return } - if err := resp.Error(); err != nil { - errors <- err - return - } for _, res := range resp.Results { batches, err := models.ResultToBatches(res, groupByName) if err != nil { @@ -1576,24 +1565,16 @@ func (s *Service) execQuery(q, cluster string) (kapacitor.DBRP, *influxdb.Respon return dbrp, nil, errors.New("InfluxDB not configured, cannot record query") } // Query InfluxDB - var con influxdb.Client - if cluster != "" { - con, err = s.InfluxDBService.NewNamedClient(cluster) - } else { - con, err = s.InfluxDBService.NewDefaultClient() - } + con, err := s.InfluxDBService.NewNamedClient(cluster) if err != nil { - return dbrp, nil, err + return dbrp, nil, errors.Wrap(err, "failed to get InfluxDB client") } query := influxdb.Query{ Command: q, } resp, err := con.Query(query) if err != nil { - return dbrp, nil, err - } - if err := resp.Error(); err != nil { - return dbrp, nil, err + return dbrp, nil, errors.Wrap(err, "InfluxDB query failed") } return dbrp, resp, nil } diff --git a/services/sensu/config.go b/services/sensu/config.go index 0481f1bb6..eab47c8a2 100644 --- a/services/sensu/config.go +++ b/services/sensu/config.go @@ -6,11 +6,11 @@ const DefaultSource = "Kapacitor" type Config struct { // Whether Sensu integration is enabled. - Enabled bool `toml:"enabled"` + Enabled bool `toml:"enabled" override:"enabled"` // The Sensu client host:port address. - Addr string `toml:"addr"` + Addr string `toml:"addr" override:"addr"` // The JIT sensu source name of the alert. - Source string `toml:"source"` + Source string `toml:"source" override:"source"` } func NewConfig() Config { @@ -21,7 +21,7 @@ func NewConfig() Config { func (c Config) Validate() error { if c.Enabled && c.Addr == "" { - return errors.New("must specify sensu client address") + return errors.New("must specify client address") } return nil } diff --git a/services/sensu/service.go b/services/sensu/service.go index dda9b1cc9..5b8d1f215 100644 --- a/services/sensu/service.go +++ b/services/sensu/service.go @@ -8,24 +8,24 @@ import ( "log" "net" "regexp" + "sync/atomic" "github.com/influxdata/kapacitor" ) type Service struct { - addr string - source string - logger *log.Logger + configValue atomic.Value + logger *log.Logger } var validNamePattern = regexp.MustCompile(`^[\w\.-]+$`) func NewService(c Config, l *log.Logger) *Service { - return &Service{ - addr: c.Addr, - source: c.Source, + s := &Service{ logger: l, } + s.configValue.Store(c) + return s } func (s *Service) Open() error { @@ -36,11 +36,58 @@ func (s *Service) Close() error { return nil } +func (s *Service) config() Config { + return s.configValue.Load().(Config) +} + +func (s *Service) Update(newConfig []interface{}) error { + if l := len(newConfig); l != 1 { + return fmt.Errorf("expected only one new config object, got %d", l) + } + if c, ok := newConfig[0].(Config); !ok { + return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfig[0]) + } else { + s.configValue.Store(c) + } + return nil +} + func (s *Service) Alert(name, output string, level kapacitor.AlertLevel) error { if !validNamePattern.MatchString(name) { return fmt.Errorf("invalid name %q for sensu alert. Must match %v", name, validNamePattern) } + addr, postData, err := s.prepareData(name, output, level) + if err != nil { + return err + } + + conn, err := net.DialTCP("tcp", nil, addr) + if err != nil { + return err + } + defer conn.Close() + + enc := json.NewEncoder(conn) + err = enc.Encode(postData) + if err != nil { + return err + } + resp, err := ioutil.ReadAll(conn) + if string(resp) != "ok" { + return errors.New("sensu socket error: " + string(resp)) + } + return nil +} + +func (s *Service) prepareData(name, output string, level kapacitor.AlertLevel) (*net.TCPAddr, map[string]interface{}, error) { + + c := s.config() + + if !c.Enabled { + return nil, nil, errors.New("service is not enabled") + } + var status int switch level { case kapacitor.OKAlert: @@ -57,28 +104,14 @@ func (s *Service) Alert(name, output string, level kapacitor.AlertLevel) error { postData := make(map[string]interface{}) postData["name"] = name - postData["source"] = s.source + postData["source"] = c.Source postData["output"] = output postData["status"] = status - addr, err := net.ResolveTCPAddr("tcp", s.addr) - if err != nil { - return err - } - conn, err := net.DialTCP("tcp", nil, addr) + addr, err := net.ResolveTCPAddr("tcp", c.Addr) if err != nil { - return err + return nil, nil, err } - defer conn.Close() - enc := json.NewEncoder(conn) - err = enc.Encode(postData) - if err != nil { - return err - } - resp, err := ioutil.ReadAll(conn) - if string(resp) != "ok" { - return errors.New("sensu socket error: " + string(resp)) - } - return nil + return addr, postData, nil } diff --git a/services/slack/config.go b/services/slack/config.go index 515b10949..73011b70a 100644 --- a/services/slack/config.go +++ b/services/slack/config.go @@ -1,19 +1,35 @@ package slack +import ( + "net/url" + + "github.com/pkg/errors" +) + type Config struct { // Whether Slack integration is enabled. - Enabled bool `toml:"enabled"` + Enabled bool `toml:"enabled" override:"enabled"` // The Slack webhook URL, can be obtained by adding Incoming Webhook integration. - URL string `toml:"url"` + URL string `toml:"url" override:"url,redact"` // The default channel, can be overridden per alert. - Channel string `toml:"channel"` + Channel string `toml:"channel" override:"channel"` // Whether all alerts should automatically post to slack - Global bool `toml:"global"` + Global bool `toml:"global" override:"global"` // Whether all alerts should automatically use stateChangesOnly mode. // Only applies if global is also set. - StateChangesOnly bool `toml:"state-changes-only"` + StateChangesOnly bool `toml:"state-changes-only" override:"state-changes-only"` } func NewConfig() Config { return Config{} } + +func (c Config) Validate() error { + if c.Enabled && c.URL == "" { + return errors.New("must specify url") + } + if _, err := url.Parse(c.URL); err != nil { + return errors.Wrapf(err, "invalid url %q", c.URL) + } + return nil +} diff --git a/services/slack/service.go b/services/slack/service.go index 70cbc78f5..0e70349be 100644 --- a/services/slack/service.go +++ b/services/slack/service.go @@ -5,29 +5,26 @@ import ( "encoding/json" "errors" "fmt" + "io" "io/ioutil" "log" "net/http" + "sync/atomic" "github.com/influxdata/kapacitor" ) type Service struct { - channel string - url string - global bool - stateChangesOnly bool - logger *log.Logger + configValue atomic.Value + logger *log.Logger } func NewService(c Config, l *log.Logger) *Service { - return &Service{ - channel: c.Channel, - url: c.URL, - global: c.Global, - stateChangesOnly: c.StateChangesOnly, - logger: l, + s := &Service{ + logger: l, } + s.configValue.Store(c) + return s } func (s *Service) Open() error { @@ -38,11 +35,30 @@ func (s *Service) Close() error { return nil } +func (s *Service) config() Config { + return s.configValue.Load().(Config) +} + +func (s *Service) Update(newConfig []interface{}) error { + if l := len(newConfig); l != 1 { + return fmt.Errorf("expected only one new config object, got %d", l) + } + if c, ok := newConfig[0].(Config); !ok { + return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfig[0]) + } else { + s.configValue.Store(c) + } + return nil +} + func (s *Service) Global() bool { - return s.global + c := s.config() + return c.Global } + func (s *Service) StateChangesOnly() bool { - return s.stateChangesOnly + c := s.config() + return c.StateChangesOnly } // slack attachment info @@ -53,8 +69,40 @@ type attachment struct { } func (s *Service) Alert(channel, message string, level kapacitor.AlertLevel) error { + url, post, err := s.preparePost(channel, message, level) + if err != nil { + return err + } + resp, err := http.Post(url, "application/json", post) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + type response struct { + Error string `json:"error"` + } + r := &response{Error: fmt.Sprintf("failed to understand Slack response. code: %d content: %s", resp.StatusCode, string(body))} + b := bytes.NewReader(body) + dec := json.NewDecoder(b) + dec.Decode(r) + return errors.New(r.Error) + } + return nil +} + +func (s *Service) preparePost(channel, message string, level kapacitor.AlertLevel) (string, io.Reader, error) { + c := s.config() + + if !c.Enabled { + return "", nil, errors.New("service is not enabled") + } if channel == "" { - channel = s.channel + channel = c.Channel } var color string switch level { @@ -80,27 +128,8 @@ func (s *Service) Alert(channel, message string, level kapacitor.AlertLevel) err enc := json.NewEncoder(&post) err := enc.Encode(postData) if err != nil { - return err + return "", nil, err } - resp, err := http.Post(s.url, "application/json", &post) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - type response struct { - Error string `json:"error"` - } - r := &response{Error: fmt.Sprintf("failed to understand Slack response. code: %d content: %s", resp.StatusCode, string(body))} - b := bytes.NewReader(body) - dec := json.NewDecoder(b) - dec.Decode(r) - return errors.New(r.Error) - } - return nil + return c.URL, &post, nil } diff --git a/services/smtp/config.go b/services/smtp/config.go index 06225b2c6..7541098cf 100644 --- a/services/smtp/config.go +++ b/services/smtp/config.go @@ -1,38 +1,65 @@ package smtp import ( + "errors" + "fmt" + "strings" "time" "github.com/influxdata/influxdb/toml" ) type Config struct { - Enabled bool `toml:"enabled"` - Host string `toml:"host"` - Port int `toml:"port"` - Username string `toml:"username"` - Password string `toml:"password"` + Enabled bool `toml:"enabled" override:"enabled"` + Host string `toml:"host" override:"host"` + Port int `toml:"port" override:"port"` + Username string `toml:"username" override:"username"` + Password string `toml:"password" override:"password,redact"` // Whether to skip TLS verify. - NoVerify bool `toml:"no-verify"` + NoVerify bool `toml:"no-verify" override:"no-verify"` // Whether all alerts should trigger an email. - Global bool `toml:"global"` + Global bool `toml:"global" override:"global"` // Whether all alerts should automatically use stateChangesOnly mode. // Only applies if global is also set. - StateChangesOnly bool `toml:"state-changes-only"` + StateChangesOnly bool `toml:"state-changes-only" override:"state-changes-only"` // From address - From string `toml:"from"` + From string `toml:"from" override:"from"` // Default To addresses - To []string `toml:"to"` + To []string `toml:"to" override:"to"` // Close connection to SMTP server after idle timeout has elapsed - IdleTimeout toml.Duration `toml:"idle-timeout"` + IdleTimeout toml.Duration `toml:"idle-timeout" override:"idle-timeout"` } func NewConfig() Config { return Config{ Host: "localhost", Port: 25, - Username: "", - Password: "", IdleTimeout: toml.Duration(time.Second * 30), } } + +func (c Config) Validate() error { + if c.Host == "" { + return errors.New("host cannot be empty") + } + if c.Port <= 0 { + return fmt.Errorf("invalid port %d", c.Port) + } + if c.IdleTimeout < 0 { + return errors.New("idle timeout must be positive") + } + if c.Enabled && c.From == "" { + return errors.New("must provide a 'from' address") + } + // Poor mans email validation, but since emails have a very large domain this is probably good enough + // to catch user error. + if c.From != "" && !strings.ContainsRune(c.From, '@') { + return fmt.Errorf("invalid from email address: %q", c.From) + } + for _, t := range c.To { + if !strings.ContainsRune(t, '@') { + return fmt.Errorf("invalid to email address: %q", t) + } + } + return nil +} diff --git a/services/smtp/service.go b/services/smtp/service.go index fced31ddf..4111c8c5d 100644 --- a/services/smtp/service.go +++ b/services/smtp/service.go @@ -3,8 +3,10 @@ package smtp import ( "crypto/tls" "errors" + "fmt" "log" "sync" + "sync/atomic" "time" "gopkg.in/gomail.v2" @@ -13,70 +15,140 @@ import ( var ErrNoRecipients = errors.New("not sending email, no recipients defined") type Service struct { - c Config - mail chan *gomail.Message - logger *log.Logger - wg sync.WaitGroup + mu sync.Mutex + config atomic.Value + mail chan *gomail.Message + updates chan bool + logger *log.Logger + wg sync.WaitGroup + opened bool } func NewService(c Config, l *log.Logger) *Service { - return &Service{ - c: c, - mail: make(chan *gomail.Message), - logger: l, + s := &Service{ + updates: make(chan bool), + logger: l, } + s.config.Store(c) + return s } func (s *Service) Open() error { - s.logger.Println("I! Starting SMTP service") - if s.c.From == "" { - return errors.New("cannot open smtp service: missing from address in configuration") + s.mu.Lock() + defer s.mu.Unlock() + if s.opened { + return nil } + s.opened = true + + s.logger.Println("I! Starting SMTP service") + + s.mail = make(chan *gomail.Message) + s.wg.Add(1) - go s.runMailer() + go func() { + defer s.wg.Done() + s.runMailer() + }() + return nil } func (s *Service) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + if !s.opened { + return nil + } + s.opened = false + s.logger.Println("I! Closing SMTP service") + close(s.mail) s.wg.Wait() + + return nil +} + +func (s *Service) loadConfig() Config { + return s.config.Load().(Config) +} + +func (s *Service) Update(newConfig []interface{}) error { + if l := len(newConfig); l != 1 { + return fmt.Errorf("expected only one new config object, got %d", l) + } + if c, ok := newConfig[0].(Config); !ok { + return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfig[0]) + } else { + s.config.Store(c) + s.mu.Lock() + opened := s.opened + s.mu.Unlock() + if opened { + // Signal to create new dialer + s.updates <- true + } + } return nil } func (s *Service) Global() bool { - return s.c.Global + c := s.loadConfig() + return c.Global } func (s *Service) StateChangesOnly() bool { - return s.c.StateChangesOnly + c := s.loadConfig() + return c.StateChangesOnly } -func (s *Service) runMailer() { - defer s.wg.Done() - var d *gomail.Dialer - if s.c.Username == "" { - d = &gomail.Dialer{Host: s.c.Host, Port: s.c.Port} +func (s *Service) dialer() (d *gomail.Dialer, idleTimeout time.Duration) { + c := s.loadConfig() + if c.Username == "" { + d = &gomail.Dialer{Host: c.Host, Port: c.Port} } else { - d = gomail.NewPlainDialer(s.c.Host, s.c.Port, s.c.Username, s.c.Password) + d = gomail.NewPlainDialer(c.Host, c.Port, c.Username, c.Password) } - if s.c.NoVerify { + if c.NoVerify { d.TLSConfig = &tls.Config{InsecureSkipVerify: true} } + idleTimeout = time.Duration(c.IdleTimeout) + return +} + +func (s *Service) runMailer() { + var idleTimeout time.Duration + var d *gomail.Dialer + d, idleTimeout = s.dialer() var conn gomail.SendCloser var err error open := false - for { + done := false + for !done { + timer := time.NewTimer(idleTimeout) select { + case <-s.updates: + // Close old connection + if conn != nil { + if err := conn.Close(); err != nil { + s.logger.Println("E! error closing connection to old SMTP server:", err) + } + conn = nil + } + // Create new dialer + d, idleTimeout = s.dialer() + open = false case m, ok := <-s.mail: if !ok { - return + done = true + break } if !open { if conn, err = d.Dial(); err != nil { s.logger.Println("E! error connecting to SMTP server", err) - continue + break } open = true } @@ -85,7 +157,7 @@ func (s *Service) runMailer() { } // Close the connection to the SMTP server if no email was sent in // the last IdleTimeout duration. - case <-time.After(time.Duration(s.c.IdleTimeout)): + case <-timer.C: if open { if err := conn.Close(); err != nil { s.logger.Println("E! error closing connection to SMTP server:", err) @@ -93,21 +165,34 @@ func (s *Service) runMailer() { open = false } } + timer.Stop() } } func (s *Service) SendMail(to []string, subject, body string) error { + m, err := s.prepareMessge(to, subject, body) + if err != nil { + return err + } + s.mail <- m + return nil +} + +func (s *Service) prepareMessge(to []string, subject, body string) (*gomail.Message, error) { + c := s.loadConfig() + if !c.Enabled { + return nil, errors.New("service not enabled") + } if len(to) == 0 { - to = s.c.To + to = c.To } if len(to) == 0 { - return ErrNoRecipients + return nil, ErrNoRecipients } m := gomail.NewMessage() - m.SetHeader("From", s.c.From) + m.SetHeader("From", c.From) m.SetHeader("To", to...) m.SetHeader("Subject", subject) m.SetBody("text/html", body) - s.mail <- m - return nil + return m, nil } diff --git a/services/storage/storagetest/storage.go b/services/storage/storagetest/storage.go new file mode 100644 index 000000000..fe42fab67 --- /dev/null +++ b/services/storage/storagetest/storage.go @@ -0,0 +1,78 @@ +package storagetest + +import ( + "strings" + "sync" + + "github.com/influxdata/kapacitor/services/storage" +) + +type TestStore struct{} + +func New() TestStore { + return TestStore{} +} + +func (s TestStore) Store(name string) storage.Interface { + return NewMemStore(name) +} + +// Common interface for interacting with a simple Key/Value storage +type MemStore struct { + sync.Mutex + Name string + store map[string][]byte +} + +func NewMemStore(name string) *MemStore { + return &MemStore{ + Name: name, + store: make(map[string][]byte), + } +} + +func (s *MemStore) Put(key string, value []byte) error { + s.Lock() + s.store[key] = value + s.Unlock() + return nil +} + +func (s *MemStore) Get(key string) (*storage.KeyValue, error) { + s.Lock() + value, ok := s.store[key] + s.Unlock() + if !ok { + return nil, storage.ErrNoKeyExists + } + return &storage.KeyValue{ + Key: key, + Value: value, + }, nil +} + +func (s *MemStore) Delete(key string) error { + s.Lock() + delete(s.store, key) + s.Unlock() + return nil +} + +func (s *MemStore) Exists(key string) (bool, error) { + s.Lock() + _, ok := s.store[key] + s.Unlock() + return ok, nil +} + +func (s *MemStore) List(prefix string) ([]*storage.KeyValue, error) { + s.Lock() + kvs := make([]*storage.KeyValue, 0, len(s.store)) + for k, v := range s.store { + if strings.HasPrefix(k, prefix) { + kvs = append(kvs, &storage.KeyValue{Key: k, Value: v}) + } + } + s.Unlock() + return kvs, nil +} diff --git a/services/talk/config.go b/services/talk/config.go index 672fef7b1..acab8c4e6 100644 --- a/services/talk/config.go +++ b/services/talk/config.go @@ -2,15 +2,17 @@ package talk import ( "net/url" + + "github.com/pkg/errors" ) type Config struct { // Whether Talk integration is enabled. - Enabled bool `toml:"enabled"` + Enabled bool `toml:"enabled" override:"enabled"` // The Talk webhook URL, can be obtained by adding Incoming Webhook integration. - URL string `toml:"url"` + URL string `toml:"url" override:"url,redact"` // The default authorName, can be overridden per alert. - AuthorName string `toml:"author_name"` + AuthorName string `toml:"author_name" override:"author_name"` } func NewConfig() Config { @@ -18,9 +20,11 @@ func NewConfig() Config { } func (c Config) Validate() error { - _, err := url.Parse(c.URL) - if err != nil { - return err + if c.Enabled && c.URL == "" { + return errors.New("must specify url") + } + if _, err := url.Parse(c.URL); err != nil { + return errors.Wrapf(err, "invalid url %q", c.URL) } return nil } diff --git a/services/talk/service.go b/services/talk/service.go index 06013a285..6773efa93 100644 --- a/services/talk/service.go +++ b/services/talk/service.go @@ -5,23 +5,24 @@ import ( "encoding/json" "errors" "fmt" + "io" "io/ioutil" "log" "net/http" + "sync/atomic" ) type Service struct { - url string - authorName string - logger *log.Logger + configValue atomic.Value + logger *log.Logger } func NewService(c Config, l *log.Logger) *Service { - return &Service{ - url: c.URL, - authorName: c.AuthorName, - logger: l, + s := &Service{ + logger: l, } + s.configValue.Store(c) + return s } func (s *Service) Open() error { @@ -32,20 +33,28 @@ func (s *Service) Close() error { return nil } -func (s *Service) Alert(title, text string) error { - postData := make(map[string]interface{}) - postData["title"] = title - postData["text"] = text - postData["authorName"] = s.authorName +func (s *Service) config() Config { + return s.configValue.Load().(Config) +} - var post bytes.Buffer - enc := json.NewEncoder(&post) - err := enc.Encode(postData) +func (s *Service) Update(newConfig []interface{}) error { + if l := len(newConfig); l != 1 { + return fmt.Errorf("expected only one new config object, got %d", l) + } + if c, ok := newConfig[0].(Config); !ok { + return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfig[0]) + } else { + s.configValue.Store(c) + } + return nil +} + +func (s *Service) Alert(title, text string) error { + url, post, err := s.preparePost(title, text) if err != nil { return err } - - resp, err := http.Post(s.url, "application/json", &post) + resp, err := http.Post(url, "application/json", post) if err != nil { return err } @@ -65,3 +74,24 @@ func (s *Service) Alert(title, text string) error { } return nil } + +func (s *Service) preparePost(title, text string) (string, io.Reader, error) { + c := s.config() + + if !c.Enabled { + return "", nil, errors.New("service is not enabled") + } + postData := make(map[string]interface{}) + postData["title"] = title + postData["text"] = text + postData["authorName"] = c.AuthorName + + var post bytes.Buffer + enc := json.NewEncoder(&post) + err := enc.Encode(postData) + if err != nil { + return "", nil, err + } + + return c.URL, &post, nil +} diff --git a/services/telegram/config.go b/services/telegram/config.go index 691db833f..c4eaa8e75 100644 --- a/services/telegram/config.go +++ b/services/telegram/config.go @@ -1,29 +1,35 @@ package telegram +import ( + "net/url" + + "github.com/pkg/errors" +) + const DefaultTelegramURL = "https://api.telegram.org/bot" const DefaultTelegramLinksPreviewDisable = false const DefaultTelegramNotificationDisable = false type Config struct { // Whether Telegram integration is enabled. - Enabled bool `toml:"enabled"` + Enabled bool `toml:"enabled" override:"enabled"` // The Telegram Bot URL, should not need to be changed. - URL string `toml:"url"` + URL string `toml:"url" override:"url"` // The Telegram Bot Token, can be obtained From @BotFather. - Token string `toml:"token"` + Token string `toml:"token" override:"token,redact"` // The default channel, can be overridden per alert. - ChatId string `toml:"chat-id"` + ChatId string `toml:"chat-id" override:"chat-id"` // Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. - ParseMode string `toml:"parse-mode"` + ParseMode string `toml:"parse-mode" override:"parse-mode"` // Disables link previews for links in this message - DisableWebPagePreview bool `toml:"disable-web-page-preview"` + DisableWebPagePreview bool `toml:"disable-web-page-preview" override:"disable-web-page-preview"` // Sends the message silently. iOS users will not receive a notification, Android users will receive a notification with no sound. - DisableNotification bool `toml:"disable-notification"` + DisableNotification bool `toml:"disable-notification" override:"disable-notification"` // Whether all alerts should automatically post to Telegram - Global bool `toml:"global"` + Global bool `toml:"global" override:"global"` // Whether all alerts should automatically use stateChangesOnly mode. // Only applies if global is also set. - StateChangesOnly bool `toml:"state-changes-only"` + StateChangesOnly bool `toml:"state-changes-only" override:"state-changes-only"` } func NewConfig() Config { @@ -33,3 +39,18 @@ func NewConfig() Config { DisableNotification: DefaultTelegramNotificationDisable, } } + +func (c Config) Validate() error { + if c.Enabled { + if c.URL == "" { + return errors.New("must specify url") + } + if c.Token == "" { + return errors.New("must specify token") + } + } + if _, err := url.Parse(c.URL); err != nil { + return errors.Wrapf(err, "invalid url %q", c.URL) + } + return nil +} diff --git a/services/telegram/service.go b/services/telegram/service.go index 5e2926500..98bb64977 100644 --- a/services/telegram/service.go +++ b/services/telegram/service.go @@ -4,33 +4,28 @@ import ( "bytes" "encoding/json" "fmt" + "io" "io/ioutil" "log" "net/http" + "net/url" + "path" + "sync/atomic" + + "github.com/pkg/errors" ) type Service struct { - chatId string - parseMode string - disableWebPagePreview bool - disableNotification bool - url string - global bool - stateChangesOnly bool - logger *log.Logger + configValue atomic.Value + logger *log.Logger } func NewService(c Config, l *log.Logger) *Service { - return &Service{ - chatId: c.ChatId, - parseMode: c.ParseMode, - disableWebPagePreview: c.DisableWebPagePreview, - disableNotification: c.DisableNotification, - url: c.URL + c.Token + "/sendMessage", - global: c.Global, - stateChangesOnly: c.StateChangesOnly, - logger: l, + s := &Service{ + logger: l, } + s.configValue.Store(c) + return s } func (s *Service) Open() error { @@ -41,24 +36,80 @@ func (s *Service) Close() error { return nil } +func (s *Service) config() Config { + return s.configValue.Load().(Config) +} + +func (s *Service) Update(newConfig []interface{}) error { + if l := len(newConfig); l != 1 { + return fmt.Errorf("expected only one new config object, got %d", l) + } + if c, ok := newConfig[0].(Config); !ok { + return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfig[0]) + } else { + s.configValue.Store(c) + } + return nil +} + func (s *Service) Global() bool { - return s.global + c := s.config() + return c.Global } func (s *Service) StateChangesOnly() bool { - return s.stateChangesOnly + c := s.config() + return c.StateChangesOnly } func (s *Service) Alert(chatId, parseMode, message string, disableWebPagePreview, disableNotification bool) error { + url, post, err := s.preparePost(chatId, parseMode, message, disableWebPagePreview, disableNotification) + if err != nil { + return err + } + + resp, err := http.Post(url, "application/json", post) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + type response struct { + Description string `json:"description"` + ErrorCode int `json:"error_code"` + Ok bool `json:"ok"` + } + res := &response{} + + err = json.Unmarshal(body, res) + + if err != nil { + return fmt.Errorf("failed to understand Telegram response (err: %s). code: %d content: %s", err.Error(), resp.StatusCode, string(body)) + } + return fmt.Errorf("sendMessage error (%d) description: %s", res.ErrorCode, res.Description) + + } + return nil +} +func (s *Service) preparePost(chatId, parseMode, message string, disableWebPagePreview, disableNotification bool) (string, io.Reader, error) { + c := s.config() + + if !c.Enabled { + return "", nil, errors.New("service is not enabled") + } if chatId == "" { - chatId = s.chatId + chatId = c.ChatId } if parseMode == "" { - parseMode = s.parseMode + parseMode = c.ParseMode } if parseMode != "" && parseMode != "Markdown" && parseMode != "HTML" { - return fmt.Errorf("parseMode %s is not valid, please use 'Markdown' or 'HTML'", parseMode) + return "", nil, fmt.Errorf("parseMode %s is not valid, please use 'Markdown' or 'HTML'", parseMode) } postData := make(map[string]interface{}) @@ -69,11 +120,11 @@ func (s *Service) Alert(chatId, parseMode, message string, disableWebPagePreview postData["parse_mode"] = parseMode } - if disableWebPagePreview || s.disableWebPagePreview { + if disableWebPagePreview || c.DisableWebPagePreview { postData["disable_web_page_preview"] = true } - if disableNotification || s.disableNotification { + if disableNotification || c.DisableNotification { postData["disable_notification"] = true } @@ -81,33 +132,13 @@ func (s *Service) Alert(chatId, parseMode, message string, disableWebPagePreview enc := json.NewEncoder(&post) err := enc.Encode(postData) if err != nil { - return err + return "", nil, err } - resp, err := http.Post(s.url, "application/json", &post) + u, err := url.Parse(c.URL) if err != nil { - return err + return "", nil, errors.Wrap(err, "invalid URL") } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - type response struct { - Description string `json:"description"` - ErrorCode int `json:"error_code"` - Ok bool `json:"ok"` - } - res := &response{} - - err = json.Unmarshal(body, res) - - if err != nil { - return fmt.Errorf("failed to understand Telegram response (err: %s). url: %s data: %v code: %d content: %s", err.Error(), s.url, &postData, resp.StatusCode, string(body)) - } - return fmt.Errorf("sendMessage error (%d) description: %s", res.ErrorCode, res.Description) - - } - return nil + u.Path = path.Join(u.Path+c.Token, "sendMessage") + return u.String(), &post, nil } diff --git a/services/victorops/config.go b/services/victorops/config.go index 81968056a..7a51ca7c6 100644 --- a/services/victorops/config.go +++ b/services/victorops/config.go @@ -1,18 +1,24 @@ package victorops +import ( + "net/url" + + "github.com/pkg/errors" +) + const DefaultVictorOpsAPIURL = "https://alert.victorops.com/integrations/generic/20131114/alert" type Config struct { // Whether to enable Victor Ops integration. - Enabled bool `toml:"enabled"` + Enabled bool `toml:"enabled" override:"enabled"` // The Victor Ops API key. - APIKey string `toml:"api-key"` + APIKey string `toml:"api-key" override:"api-key,redact"` // The default Routing Key, can be overridden per alert. - RoutingKey string `toml:"routing-key"` + RoutingKey string `toml:"routing-key" override:"routing-key"` // The Victor Ops API URL, should not need to be changed. - URL string `toml:"url"` + URL string `toml:"url" override:"url"` // Whether every alert should automatically go to VictorOps. - Global bool `toml:"global"` + Global bool `toml:"global" override:"global"` } func NewConfig() Config { @@ -20,3 +26,13 @@ func NewConfig() Config { URL: DefaultVictorOpsAPIURL, } } + +func (c Config) Validate() error { + if c.URL == "" { + return errors.New("url cannot be empty") + } + if _, err := url.Parse(c.URL); err != nil { + return errors.Wrapf(err, "invalid URL %q", c.URL) + } + return nil +} diff --git a/services/victorops/service.go b/services/victorops/service.go index 6171b9a38..8931a7cd9 100644 --- a/services/victorops/service.go +++ b/services/victorops/service.go @@ -3,30 +3,31 @@ package victorops import ( "bytes" "encoding/json" - "errors" "fmt" + "io" "io/ioutil" "log" "net/http" + "net/url" + "path" + "sync/atomic" "time" "github.com/influxdata/kapacitor" + "github.com/pkg/errors" ) type Service struct { - routingKey string - url string - global bool - logger *log.Logger + configValue atomic.Value + logger *log.Logger } func NewService(c Config, l *log.Logger) *Service { - return &Service{ - routingKey: c.RoutingKey, - url: c.URL + "/" + c.APIKey + "/", - global: c.Global, - logger: l, + s := &Service{ + logger: l, } + s.configValue.Store(c) + return s } func (s *Service) Open() error { @@ -37,38 +38,34 @@ func (s *Service) Close() error { return nil } -func (s *Service) Global() bool { - return s.global +func (s *Service) config() Config { + return s.configValue.Load().(Config) } -func (s *Service) Alert(routingKey, messageType, message, entityID string, t time.Time, details interface{}) error { - voData := make(map[string]interface{}) - voData["message_type"] = messageType - voData["entity_id"] = entityID - voData["state_message"] = message - voData["timestamp"] = t.Unix() - voData["monitoring_tool"] = kapacitor.Product - if details != nil { - b, err := json.Marshal(details) - if err != nil { - return err - } - voData["data"] = string(b) +func (s *Service) Update(newConfig []interface{}) error { + if l := len(newConfig); l != 1 { + return fmt.Errorf("expected only one new config object, got %d", l) } - - if routingKey == "" { - routingKey = s.routingKey + if c, ok := newConfig[0].(Config); !ok { + return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfig[0]) + } else { + s.configValue.Store(c) } + return nil +} - // Post data to VO - var post bytes.Buffer - enc := json.NewEncoder(&post) - err := enc.Encode(voData) +func (s *Service) Global() bool { + c := s.config() + return c.Global +} + +func (s *Service) Alert(routingKey, messageType, message, entityID string, t time.Time, details interface{}) error { + url, post, err := s.preparePost(routingKey, messageType, message, entityID, t, details) if err != nil { return err } - resp, err := http.Post(s.url+routingKey, "application/json", &post) + resp, err := http.Post(url, "application/json", post) if err != nil { return err } @@ -92,3 +89,42 @@ func (s *Service) Alert(routingKey, messageType, message, entityID string, t tim } return nil } + +func (s *Service) preparePost(routingKey, messageType, message, entityID string, t time.Time, details interface{}) (string, io.Reader, error) { + c := s.config() + if !c.Enabled { + return "", nil, errors.New("service is not enabled") + } + + voData := make(map[string]interface{}) + voData["message_type"] = messageType + voData["entity_id"] = entityID + voData["state_message"] = message + voData["timestamp"] = t.Unix() + voData["monitoring_tool"] = kapacitor.Product + if details != nil { + b, err := json.Marshal(details) + if err != nil { + return "", nil, err + } + voData["data"] = string(b) + } + + if routingKey == "" { + routingKey = c.RoutingKey + } + + // Post data to VO + var post bytes.Buffer + enc := json.NewEncoder(&post) + err := enc.Encode(voData) + if err != nil { + return "", nil, err + } + u, err := url.Parse(c.URL) + if err != nil { + return "", nil, errors.Wrap(err, "invalid URL") + } + u.Path = path.Join(u.Path, c.APIKey, routingKey) + return u.String(), &post, nil +} diff --git a/task_master.go b/task_master.go index 749a29a75..31835c8b6 100644 --- a/task_master.go +++ b/task_master.go @@ -58,7 +58,6 @@ type TaskMaster struct { UDFService UDFService InfluxDBService interface { - NewDefaultClient() (influxdb.Client, error) NewNamedClient(name string) (influxdb.Client, error) } SMTPService interface { @@ -116,7 +115,7 @@ type TaskMaster struct { NewTimer(timer.Setter) timer.Timer } K8sService interface { - Client() k8s.Client + Client() (k8s.Client, error) } LogService LogService diff --git a/vendor.list b/vendor.list index 9ec6556d2..0e5d97508 100644 --- a/vendor.list +++ b/vendor.list @@ -1,5 +1,5 @@ -github.com/boltdb/bolt github.com/BurntSushi/toml +github.com/boltdb/bolt github.com/cenkalti/backoff github.com/dgrijalva/jwt-go v3.0.0 github.com/dustin/go-humanize @@ -10,6 +10,8 @@ github.com/influxdata/influxdb 1.0 github.com/influxdata/usage-client github.com/kimor79/gollectd github.com/mattn/go-runewidth +github.com/mitchellh/copystructure +github.com/mitchellh/reflectwalk github.com/pkg/errors github.com/russross/blackfriday github.com/serenize/snaker diff --git a/vendor/github.com/BurntSushi/toml/.gitrepo b/vendor/github.com/BurntSushi/toml/.gitrepo index 53c39317b..b4250295a 100644 --- a/vendor/github.com/BurntSushi/toml/.gitrepo +++ b/vendor/github.com/BurntSushi/toml/.gitrepo @@ -4,8 +4,8 @@ ; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme ; [subrepo] - remote = http://github.com/BurntSushi/toml.git + remote = https://github.com/BurntSushi/toml.git branch = datetimes - commit = f0aeabca5a127c4078abb8c8d64298b147264b55 - parent = db57a2908e5de95a495f97d92e09d628d33d2071 + commit = 99064174e013895bbd9b025c31100bd1d9b590ca + parent = 7ec19ba5a215ea26dbb5717e7543acf1815ffbfa cmdver = 0.3.0 diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml index 43caf6d02..8b8afc4f0 100644 --- a/vendor/github.com/BurntSushi/toml/.travis.yml +++ b/vendor/github.com/BurntSushi/toml/.travis.yml @@ -2,6 +2,10 @@ language: go go: - 1.1 - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 - tip install: - go install ./... @@ -9,4 +13,3 @@ install: script: - export PATH="$PATH:$HOME/gopath/bin" - make test - diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 98c8aa667..b0fd51d5b 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -10,7 +10,9 @@ import ( "time" ) -var e = fmt.Errorf +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} // Unmarshaler is the interface implemented by objects that can unmarshal a // TOML description of themselves. @@ -105,7 +107,7 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { func Decode(data string, v interface{}) (MetaData, error) { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr { - return MetaData{}, e("Decode of non-pointer type %s", reflect.TypeOf(v)) + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) } if rv.IsNil() { return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) @@ -218,7 +220,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { case reflect.Interface: // we only support empty interfaces. if rv.NumMethod() > 0 { - return e("Unsupported type '%s'.", rv.Kind()) + return e("unsupported type %s", rv.Type()) } return md.unifyAnything(data, rv) case reflect.Float32: @@ -226,7 +228,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { case reflect.Float64: return md.unifyFloat64(data, rv) } - return e("Unsupported type '%s'.", rv.Kind()) + return e("unsupported type %s", rv.Kind()) } func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { @@ -235,7 +237,8 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { if mapping == nil { return nil } - return mismatch(rv, "map", mapping) + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) } for key, datum := range tmap { @@ -260,14 +263,13 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { md.decoded[md.context.add(key).String()] = true md.context = append(md.context, key) if err := md.unify(datum, subv); err != nil { - return e("Type mismatch for '%s.%s': %s", - rv.Type().String(), f.name, err) + return err } md.context = md.context[0 : len(md.context)-1] } else if f.name != "" { // Bad user! No soup for you! - return e("Field '%s.%s' is unexported, and therefore cannot "+ - "be loaded with reflection.", rv.Type().String(), f.name) + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) } } } @@ -385,15 +387,15 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { // No bounds checking necessary. case reflect.Int8: if num < math.MinInt8 || num > math.MaxInt8 { - return e("Value '%d' is out of range for int8.", num) + return e("value %d is out of range for int8", num) } case reflect.Int16: if num < math.MinInt16 || num > math.MaxInt16 { - return e("Value '%d' is out of range for int16.", num) + return e("value %d is out of range for int16", num) } case reflect.Int32: if num < math.MinInt32 || num > math.MaxInt32 { - return e("Value '%d' is out of range for int32.", num) + return e("value %d is out of range for int32", num) } } rv.SetInt(num) @@ -404,15 +406,15 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { // No bounds checking necessary. case reflect.Uint8: if num < 0 || unum > math.MaxUint8 { - return e("Value '%d' is out of range for uint8.", num) + return e("value %d is out of range for uint8", num) } case reflect.Uint16: if num < 0 || unum > math.MaxUint16 { - return e("Value '%d' is out of range for uint16.", num) + return e("value %d is out of range for uint16", num) } case reflect.Uint32: if num < 0 || unum > math.MaxUint32 { - return e("Value '%d' is out of range for uint32.", num) + return e("value %d is out of range for uint32", num) } } rv.SetUint(unum) @@ -478,7 +480,7 @@ func rvalue(v interface{}) reflect.Value { // interest to us (like encoding.TextUnmarshaler). func indirect(v reflect.Value) reflect.Value { if v.Kind() != reflect.Ptr { - if v.CanAddr() { + if v.CanSet() { pv := v.Addr() if _, ok := pv.Interface().(TextUnmarshaler); ok { return pv @@ -503,10 +505,5 @@ func isUnifiable(rv reflect.Value) bool { } func badtype(expected string, data interface{}) error { - return e("Expected %s but found '%T'.", expected, data) -} - -func mismatch(user reflect.Value, expected string, data interface{}) error { - return e("Type mismatch for %s. Expected %s but found '%T'.", - user.Type().String(), expected, data) + return e("cannot load TOML value of type %T into a Go %s", data, expected) } diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go index ef6f545fa..b9914a679 100644 --- a/vendor/github.com/BurntSushi/toml/decode_meta.go +++ b/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -77,9 +77,8 @@ func (k Key) maybeQuoted(i int) string { } if quote { return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" - } else { - return k[i] } + return k[i] } func (k Key) add(piece string) Key { diff --git a/vendor/github.com/BurntSushi/toml/decode_test.go b/vendor/github.com/BurntSushi/toml/decode_test.go index d746527a1..62553ebc4 100644 --- a/vendor/github.com/BurntSushi/toml/decode_test.go +++ b/vendor/github.com/BurntSushi/toml/decode_test.go @@ -77,42 +77,52 @@ cauchy = "cat 2" func TestDecodeEmbedded(t *testing.T) { type Dog struct{ Name string } type Age int + type cat struct{ Name string } - tests := map[string]struct { + for _, test := range []struct { + label string input string decodeInto interface{} wantDecoded interface{} }{ - "embedded struct": { + { + label: "embedded struct", input: `Name = "milton"`, decodeInto: &struct{ Dog }{}, wantDecoded: &struct{ Dog }{Dog{"milton"}}, }, - "embedded non-nil pointer to struct": { + { + label: "embedded non-nil pointer to struct", input: `Name = "milton"`, decodeInto: &struct{ *Dog }{}, wantDecoded: &struct{ *Dog }{&Dog{"milton"}}, }, - "embedded nil pointer to struct": { + { + label: "embedded nil pointer to struct", input: ``, decodeInto: &struct{ *Dog }{}, wantDecoded: &struct{ *Dog }{nil}, }, - "embedded int": { + { + label: "unexported embedded struct", + input: `Name = "socks"`, + decodeInto: &struct{ cat }{}, + wantDecoded: &struct{ cat }{cat{"socks"}}, + }, + { + label: "embedded int", input: `Age = -5`, decodeInto: &struct{ Age }{}, wantDecoded: &struct{ Age }{-5}, }, - } - - for label, test := range tests { + } { _, err := Decode(test.input, test.decodeInto) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) { t.Errorf("%s: want decoded == %+v, got %+v", - label, test.wantDecoded, test.decodeInto) + test.label, test.wantDecoded, test.decodeInto) } } } @@ -531,7 +541,7 @@ func TestDecodeFloats(t *testing.T) { continue } if x.N != tt.want { - t.Errorf("Decode(%q): got %d; want %d", input, x.N, tt.want) + t.Errorf("Decode(%q): got %f; want %f", input, x.N, tt.want) } } } @@ -573,7 +583,7 @@ func TestDecodeBadValues(t *testing.T) { v interface{} want string }{ - {3, "non-pointer type"}, + {3, "non-pointer int"}, {(*int)(nil), "nil"}, } { _, err := Decode(`x = 3`, tt.v) @@ -1043,7 +1053,7 @@ rating = 3.1 // // NOTE the example below contains detailed type casting to show how // // the 'data' is retrieved. In operational use, a type cast wrapper - // // may be prefered e.g. + // // may be preferred e.g. // // // // func AsMap(v interface{}) (map[string]interface{}, error) { // // return v.(map[string]interface{}) @@ -1170,7 +1180,7 @@ func (o *order) UnmarshalTOML(data interface{}) error { // NOTE the example below contains detailed type casting to show how // the 'data' is retrieved. In operational use, a type cast wrapper - // may be prefered e.g. + // may be preferred e.g. // // func AsMap(v interface{}) (map[string]interface{}, error) { // return v.(map[string]interface{}) diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index f538261ab..0f2558b2e 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -16,17 +16,17 @@ type tomlEncodeError struct{ error } var ( errArrayMixedElementTypes = errors.New( - "can't encode array with mixed element types") + "toml: cannot encode array with mixed element types") errArrayNilElement = errors.New( - "can't encode array with nil element") + "toml: cannot encode array with nil element") errNonString = errors.New( - "can't encode a map with non-string key type") + "toml: cannot encode a map with non-string key type") errAnonNonStruct = errors.New( - "can't encode an anonymous field that is not a struct") + "toml: cannot encode an anonymous field that is not a struct") errArrayNoTable = errors.New( - "TOML array element can't contain a table") + "toml: TOML array element cannot contain a table") errNoKey = errors.New( - "top-level values must be a Go map or struct") + "toml: top-level values must be Go maps or structs") errAnything = errors.New("") // used in testing ) @@ -148,7 +148,7 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) { case reflect.Struct: enc.eTable(key, rv) default: - panic(e("Unsupported type for key '%s': %s", key, k)) + panic(e("unsupported type for key '%s': %s", key, k)) } } @@ -160,7 +160,7 @@ func (enc *Encoder) eElement(rv reflect.Value) { // Special case time.Time as a primitive. Has to come before // TextMarshaler below because time.Time implements // encoding.TextMarshaler, but we need to always use UTC. - enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z")) + enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) return case TextMarshaler: // Special case. Use text marshaler if it's available for this value. @@ -191,7 +191,7 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.String: enc.writeQuoted(rv.String()) default: - panic(e("Unexpected primitive type: %s", rv.Kind())) + panic(e("unexpected primitive type: %s", rv.Kind())) } } @@ -399,9 +399,8 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { case reflect.Array, reflect.Slice: if typeEqual(tomlHash, tomlArrayType(rv)) { return tomlArrayHash - } else { - return tomlArray } + return tomlArray case reflect.Ptr, reflect.Interface: return tomlTypeOfGo(rv.Elem()) case reflect.String: diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index a016dc230..104ebda21 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -577,11 +577,10 @@ func lexMultilineStringEscape(lx *lexer) stateFn { // Handle the special case first: if isNL(lx.next()) { return lexMultilineString - } else { - lx.backup() - lx.push(lexMultilineString) - return lexStringEscape(lx) } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) } func lexStringEscape(lx *lexer) stateFn { @@ -699,9 +698,8 @@ func lexNumberStart(lx *lexer) stateFn { if !isDigit(r) { if r == '.' { return lx.errorf("Floats must start with a digit, not '.'.") - } else { - return lx.errorf("Expected a digit but got %q.", r) } + return lx.errorf("Expected a digit but got %q.", r) } return lexNumber } @@ -829,13 +827,7 @@ func (itype itemType) String() string { return "EOF" case itemText: return "Text" - case itemString: - return "String" - case itemRawString: - return "String" - case itemMultilineString: - return "String" - case itemRawMultilineString: + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: return "String" case itemBool: return "Bool" diff --git a/vendor/github.com/mitchellh/copystructure/.gitrepo b/vendor/github.com/mitchellh/copystructure/.gitrepo new file mode 100644 index 000000000..6098622e8 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/.gitrepo @@ -0,0 +1,11 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme +; +[subrepo] + remote = https://github.com/mitchellh/copystructure.git + branch = master + commit = ad4c8fe111e90a651ae529a0eb45a12385e2a8eb + parent = c03843776a074db57a4c98f20bf79328a47e5a8c + cmdver = 0.3.0 diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml new file mode 100644 index 000000000..d7b9589ab --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.7 + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 000000000..229851590 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 000000000..bcb8c8d2c --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 000000000..db6a6aa1a --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copier_time_test.go b/vendor/github.com/mitchellh/copystructure/copier_time_test.go new file mode 100644 index 000000000..5506a0ff1 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time_test.go @@ -0,0 +1,17 @@ +package copystructure + +import ( + "testing" + "time" +) + +func TestTimeCopier(t *testing.T) { + v := time.Now().UTC() + result, err := timeCopier(v) + if err != nil { + t.Fatalf("err: %s", err) + } + if result.(time.Time) != v { + t.Fatalf("bad: %#v\n\n%#v", v, result) + } +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 000000000..349d38d61 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,410 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +// Copy returns a deep copy of v. +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +type walker struct { + Result interface{} + + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + ps []bool + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Type().Elem()) + } + m.SetMapIndex(mk, mv) + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + if v.IsValid() { + i := w.valPop().Interface().(int) + s := w.cs[len(w.cs)-1] + se := s.Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.Indirect(reflect.New(m.Type())) + } else { + newMap = reflect.MakeMap(m.Type()) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if w.ignoring() { + return nil + } + + w.ps = append(w.ps, v) + return nil +} + +func (w *walker) PointerExit(bool) error { + if w.ignoring() { + return nil + } + + w.ps = w.ps[:len(w.ps)-1] + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + reflect.Indirect(newV).Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.Indirect(reflect.New(s.Type())) + } else { + newS = reflect.MakeSlice(s.Type(), s.Len(), s.Cap()) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := Copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + v = reflect.ValueOf(dup) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + w.ignore() + return nil + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[len(w.ps)-1] +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + } +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure_examples_test.go b/vendor/github.com/mitchellh/copystructure/copystructure_examples_test.go new file mode 100644 index 000000000..e094b8626 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure_examples_test.go @@ -0,0 +1,22 @@ +package copystructure + +import ( + "fmt" +) + +func ExampleCopy() { + input := map[string]interface{}{ + "bob": map[string]interface{}{ + "emails": []string{"a", "b"}, + }, + } + + dup, err := Copy(input) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", dup) + // Output: + // map[string]interface {}{"bob":map[string]interface {}{"emails":[]string{"a", "b"}}} +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure_test.go b/vendor/github.com/mitchellh/copystructure/copystructure_test.go new file mode 100644 index 000000000..780b0da1d --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure_test.go @@ -0,0 +1,675 @@ +package copystructure + +import ( + "reflect" + "sync" + "testing" + "time" +) + +func TestCopy_complex(t *testing.T) { + v := map[string]interface{}{ + "foo": []string{"a", "b"}, + "bar": "baz", + } + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_primitive(t *testing.T) { + cases := []interface{}{ + 42, + "foo", + 1.2, + } + + for _, tc := range cases { + result, err := Copy(tc) + if err != nil { + t.Fatalf("err: %s", err) + } + if result != tc { + t.Fatalf("bad: %#v", result) + } + } +} + +func TestCopy_primitivePtr(t *testing.T) { + cases := []interface{}{ + 42, + "foo", + 1.2, + } + + for _, tc := range cases { + result, err := Copy(&tc) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, &tc) { + t.Fatalf("bad: %#v", result) + } + } +} + +func TestCopy_map(t *testing.T) { + v := map[string]interface{}{ + "bar": "baz", + } + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_slice(t *testing.T) { + v := []string{"bar", "baz"} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_struct(t *testing.T) { + type test struct { + Value string + } + + v := test{Value: "foo"} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_structPtr(t *testing.T) { + type test struct { + Value string + } + + v := &test{Value: "foo"} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_structNil(t *testing.T) { + type test struct { + Value string + } + + var v *test + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + if v, ok := result.(*test); !ok { + t.Fatalf("bad: %#v", result) + } else if v != nil { + t.Fatalf("bad: %#v", v) + } +} + +func TestCopy_structNested(t *testing.T) { + type TestInner struct{} + + type Test struct { + Test *TestInner + } + + v := Test{} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_structUnexported(t *testing.T) { + type test struct { + Value string + + private string + } + + v := test{Value: "foo"} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_structUnexportedMap(t *testing.T) { + type Sub struct { + Foo map[string]interface{} + } + + type test struct { + Value string + + private Sub + } + + v := test{ + Value: "foo", + private: Sub{ + Foo: map[string]interface{}{ + "yo": 42, + }, + }, + } + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + // private should not be copied + v.private = Sub{} + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad:\n\n%#v\n\n%#v", result, v) + } +} + +// This is testing an unexported field containing a slice of pointers, which +// was a crashing case found in Terraform. +func TestCopy_structUnexportedPtrMap(t *testing.T) { + type Foo interface{} + + type Sub struct { + List []Foo + } + + type test struct { + Value string + + private *Sub + } + + v := test{ + Value: "foo", + private: &Sub{ + List: []Foo{&Sub{}}, + }, + } + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + // private should not be copied + v.private = nil + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad:\n\n%#v\n\n%#v", result, v) + } +} + +func TestCopy_nestedStructUnexported(t *testing.T) { + type subTest struct { + mine string + } + + type test struct { + Value string + private subTest + } + + v := test{Value: "foo"} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_time(t *testing.T) { + type test struct { + Value time.Time + } + + v := test{Value: time.Now().UTC()} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +func TestCopy_aliased(t *testing.T) { + type ( + Int int + Str string + Map map[Int]interface{} + Slice []Str + ) + + v := Map{ + 1: Map{10: 20}, + 2: Map(nil), + 3: Slice{"a", "b"}, + } + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +type EmbeddedLocker struct { + sync.Mutex + Map map[int]int +} + +func TestCopy_embeddedLocker(t *testing.T) { + v := &EmbeddedLocker{ + Map: map[int]int{42: 111}, + } + // start locked to prevent copying + v.Lock() + + var result interface{} + var err error + + copied := make(chan bool) + + go func() { + result, err = Config{Lock: true}.Copy(v) + close(copied) + }() + + // pause slightly to make sure copying is blocked + select { + case <-copied: + t.Fatal("copy completed while locked!") + case <-time.After(100 * time.Millisecond): + v.Unlock() + } + + <-copied + + // test that the mutex is in the correct state + result.(*EmbeddedLocker).Lock() + result.(*EmbeddedLocker).Unlock() + + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +// this will trigger the race detector, and usually panic if the original +// struct isn't properly locked during Copy +func TestCopy_lockRace(t *testing.T) { + v := &EmbeddedLocker{ + Map: map[int]int{}, + } + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < 100; i++ { + v.Lock() + v.Map[i] = i + v.Unlock() + } + }() + wg.Add(1) + go func() { + defer wg.Done() + Config{Lock: true}.Copy(v) + }() + } + + wg.Wait() + result, err := Config{Lock: true}.Copy(v) + + // test that the mutex is in the correct state + result.(*EmbeddedLocker).Lock() + result.(*EmbeddedLocker).Unlock() + + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +type LockedField struct { + String string + Locker *EmbeddedLocker + // this should not get locked or have its state copied + Mutex sync.Mutex + nilMutex *sync.Mutex +} + +func TestCopy_lockedField(t *testing.T) { + v := &LockedField{ + String: "orig", + Locker: &EmbeddedLocker{ + Map: map[int]int{42: 111}, + }, + } + + // start locked to prevent copying + v.Locker.Lock() + v.Mutex.Lock() + + var result interface{} + var err error + + copied := make(chan bool) + + go func() { + result, err = Config{Lock: true}.Copy(v) + close(copied) + }() + + // pause slightly to make sure copying is blocked + select { + case <-copied: + t.Fatal("copy completed while locked!") + case <-time.After(100 * time.Millisecond): + v.Locker.Unlock() + } + + <-copied + + // test that the mutexes are in the correct state + result.(*LockedField).Locker.Lock() + result.(*LockedField).Locker.Unlock() + result.(*LockedField).Mutex.Lock() + result.(*LockedField).Mutex.Unlock() + + // this wasn't blocking, but should be unlocked for DeepEqual + v.Mutex.Unlock() + + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("expected:\n%#v\nbad:\n%#v\n", v, result) + } +} + +// test something that doesn't contain a lock internally +type lockedMap map[int]int + +var mapLock sync.Mutex + +func (m lockedMap) Lock() { mapLock.Lock() } +func (m lockedMap) Unlock() { mapLock.Unlock() } + +func TestCopy_lockedMap(t *testing.T) { + v := lockedMap{1: 2} + v.Lock() + + var result interface{} + var err error + + copied := make(chan bool) + + go func() { + result, err = Config{Lock: true}.Copy(&v) + close(copied) + }() + + // pause slightly to make sure copying is blocked + select { + case <-copied: + t.Fatal("copy completed while locked!") + case <-time.After(100 * time.Millisecond): + v.Unlock() + } + + <-copied + + // test that the mutex is in the correct state + result.(lockedMap).Lock() + result.(lockedMap).Unlock() + + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +// Use an RLock if available +type RLocker struct { + sync.RWMutex + Map map[int]int +} + +func TestCopy_rLocker(t *testing.T) { + v := &RLocker{ + Map: map[int]int{1: 2}, + } + v.Lock() + + var result interface{} + var err error + + copied := make(chan bool) + + go func() { + result, err = Config{Lock: true}.Copy(v) + close(copied) + }() + + // pause slightly to make sure copying is blocked + select { + case <-copied: + t.Fatal("copy completed while locked!") + case <-time.After(100 * time.Millisecond): + v.Unlock() + } + + <-copied + + // test that the mutex is in the correct state + vCopy := result.(*RLocker) + vCopy.Lock() + vCopy.Unlock() + vCopy.RLock() + vCopy.RUnlock() + + // now make sure we can copy during an RLock + v.RLock() + result, err = Config{Lock: true}.Copy(v) + if err != nil { + t.Fatal(err) + } + v.RUnlock() + + vCopy = result.(*RLocker) + vCopy.Lock() + vCopy.Unlock() + vCopy.RLock() + vCopy.RUnlock() + + if !reflect.DeepEqual(result, v) { + t.Fatalf("bad: %#v", result) + } +} + +// Test that we don't panic when encountering nil Lockers +func TestCopy_missingLockedField(t *testing.T) { + v := &LockedField{ + String: "orig", + } + + result, err := Config{Lock: true}.Copy(v) + + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("expected:\n%#v\nbad:\n%#v\n", v, result) + } +} + +type PointerLocker struct { + Mu sync.Mutex +} + +func (p *PointerLocker) Lock() { p.Mu.Lock() } +func (p *PointerLocker) Unlock() { p.Mu.Unlock() } + +func TestCopy_pointerLockerNil(t *testing.T) { + v := struct { + P *PointerLocker + }{} + + _, err := Config{Lock: true}.Copy(&v) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestCopy_sliceWithNil(t *testing.T) { + v := [](*int){nil} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("expected:\n%#v\ngot:\n%#v", v, result) + } +} + +func TestCopy_mapWithNil(t *testing.T) { + v := map[int](*int){0: nil} + + result, err := Copy(v) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(result, v) { + t.Fatalf("expected:\n%#v\ngot:\n%#v", v, result) + } +} + +// While this is safe to lock and copy directly, copystructure requires a +// pointer to reflect the value safely. +func TestCopy_valueWithLockPointer(t *testing.T) { + v := struct { + *sync.Mutex + X int + }{ + Mutex: &sync.Mutex{}, + X: 3, + } + + _, err := Config{Lock: true}.Copy(v) + + if err != errPointerRequired { + t.Fatalf("expected errPointerRequired, got: %v", err) + } +} + +func TestCopy_mapWithPointers(t *testing.T) { + type T struct { + S string + } + v := map[string]interface{}{ + "a": &T{S: "hello"}, + } + + result, err := Copy(v) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(v, result) { + t.Fatalf("%#v", result) + } +} + +func TestCopy_structWithMapWithPointers(t *testing.T) { + type T struct { + S string + M map[string]interface{} + } + v := &T{ + S: "a", + M: map[string]interface{}{ + "b": &T{ + S: "b", + }, + }, + } + + result, err := Copy(v) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(v, result) { + t.Fatal(result) + } +} diff --git a/vendor/github.com/mitchellh/reflectwalk/.gitrepo b/vendor/github.com/mitchellh/reflectwalk/.gitrepo new file mode 100644 index 000000000..eb3f00797 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/.gitrepo @@ -0,0 +1,11 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme +; +[subrepo] + remote = https://github.com/mitchellh/reflectwalk.git + branch = master + commit = 0c9480f65513be815a88d6076a3d8d95d4274236 + parent = 0e69be47a532a3bf9e43ddc6a14a879735d02d5f + cmdver = 0.3.0 diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 000000000..ac82cd2e1 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 000000000..7c59d764c --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,17 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 000000000..d3cfe8545 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=Location location.go; DO NOT EDIT + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 41, 52, 59} + +func (i Location) String() string { + if i+1 >= Location(len(_Location_index)) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 000000000..ecce023e1 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,293 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import "reflect" + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer (pointers + // to interfaces are not allowed). + // + // Check whether the value is then an interface. If so, then set pointer + // to true to notify the user. + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + if pointerV.Kind() == reflect.Interface { + pointerV = pointerV.Elem() + } + if pointerV.Kind() == reflect.Ptr { + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func() { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }() + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + if err := walk(kv, w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + if sw, ok := w.(StructWalker); ok { + if err = sw.Struct(v); err != nil { + return + } + } + + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk_test.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk_test.go new file mode 100644 index 000000000..e52546d20 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk_test.go @@ -0,0 +1,486 @@ +package reflectwalk + +import ( + "reflect" + "testing" +) + +type TestEnterExitWalker struct { + Locs []Location +} + +func (t *TestEnterExitWalker) Enter(l Location) error { + if t.Locs == nil { + t.Locs = make([]Location, 0, 5) + } + + t.Locs = append(t.Locs, l) + return nil +} + +func (t *TestEnterExitWalker) Exit(l Location) error { + t.Locs = append(t.Locs, l) + return nil +} + +type TestPointerWalker struct { + Ps []bool +} + +func (t *TestPointerWalker) PointerEnter(v bool) error { + t.Ps = append(t.Ps, v) + return nil +} + +func (t *TestPointerWalker) PointerExit(v bool) error { + return nil +} + +type TestPrimitiveWalker struct { + Value reflect.Value +} + +func (t *TestPrimitiveWalker) Primitive(v reflect.Value) error { + t.Value = v + return nil +} + +type TestPrimitiveCountWalker struct { + Count int +} + +func (t *TestPrimitiveCountWalker) Primitive(v reflect.Value) error { + t.Count += 1 + return nil +} + +type TestPrimitiveReplaceWalker struct { + Value reflect.Value +} + +func (t *TestPrimitiveReplaceWalker) Primitive(v reflect.Value) error { + v.Set(reflect.ValueOf("bar")) + return nil +} + +type TestMapWalker struct { + MapVal reflect.Value + Keys map[string]bool + Values map[string]bool +} + +func (t *TestMapWalker) Map(m reflect.Value) error { + t.MapVal = m + return nil +} + +func (t *TestMapWalker) MapElem(m, k, v reflect.Value) error { + if t.Keys == nil { + t.Keys = make(map[string]bool) + t.Values = make(map[string]bool) + } + + t.Keys[k.Interface().(string)] = true + t.Values[v.Interface().(string)] = true + return nil +} + +type TestSliceWalker struct { + Count int + SliceVal reflect.Value +} + +func (t *TestSliceWalker) Slice(v reflect.Value) error { + t.SliceVal = v + return nil +} + +func (t *TestSliceWalker) SliceElem(int, reflect.Value) error { + t.Count++ + return nil +} + +type TestStructWalker struct { + Fields []string +} + +func (t *TestStructWalker) Struct(v reflect.Value) error { + return nil +} + +func (t *TestStructWalker) StructField(sf reflect.StructField, v reflect.Value) error { + if t.Fields == nil { + t.Fields = make([]string, 0, 1) + } + + t.Fields = append(t.Fields, sf.Name) + return nil +} + +func TestTestStructs(t *testing.T) { + var raw interface{} + raw = new(TestEnterExitWalker) + if _, ok := raw.(EnterExitWalker); !ok { + t.Fatal("EnterExitWalker is bad") + } + + raw = new(TestPrimitiveWalker) + if _, ok := raw.(PrimitiveWalker); !ok { + t.Fatal("PrimitiveWalker is bad") + } + + raw = new(TestMapWalker) + if _, ok := raw.(MapWalker); !ok { + t.Fatal("MapWalker is bad") + } + + raw = new(TestSliceWalker) + if _, ok := raw.(SliceWalker); !ok { + t.Fatal("SliceWalker is bad") + } + + raw = new(TestStructWalker) + if _, ok := raw.(StructWalker); !ok { + t.Fatal("StructWalker is bad") + } +} + +func TestWalk_Basic(t *testing.T) { + w := new(TestPrimitiveWalker) + + type S struct { + Foo string + } + + data := &S{ + Foo: "foo", + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + if w.Value.Kind() != reflect.String { + t.Fatalf("bad: %#v", w.Value) + } +} + +func TestWalk_Basic_Replace(t *testing.T) { + w := new(TestPrimitiveReplaceWalker) + + type S struct { + Foo string + Bar []interface{} + } + + data := &S{ + Foo: "foo", + Bar: []interface{}{[]string{"what"}}, + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + if data.Foo != "bar" { + t.Fatalf("bad: %#v", data.Foo) + } + if data.Bar[0].([]string)[0] != "bar" { + t.Fatalf("bad: %#v", data.Bar) + } +} + +func TestWalk_Basic_ReplaceInterface(t *testing.T) { + w := new(TestPrimitiveReplaceWalker) + + type S struct { + Foo []interface{} + } + + data := &S{ + Foo: []interface{}{"foo"}, + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestWalk_EnterExit(t *testing.T) { + w := new(TestEnterExitWalker) + + type S struct { + A string + M map[string]string + } + + data := &S{ + A: "foo", + M: map[string]string{ + "a": "b", + }, + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []Location{ + WalkLoc, + Struct, + StructField, + StructField, + StructField, + Map, + MapKey, + MapKey, + MapValue, + MapValue, + Map, + StructField, + Struct, + WalkLoc, + } + if !reflect.DeepEqual(w.Locs, expected) { + t.Fatalf("Bad: %#v", w.Locs) + } +} + +func TestWalk_Interface(t *testing.T) { + w := new(TestPrimitiveCountWalker) + + type S struct { + Foo string + Bar []interface{} + } + + var data interface{} = &S{ + Foo: "foo", + Bar: []interface{}{[]string{"bar", "what"}, "baz"}, + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + if w.Count != 4 { + t.Fatalf("bad: %#v", w.Count) + } +} + +func TestWalk_Interface_nil(t *testing.T) { + w := new(TestPrimitiveCountWalker) + + type S struct { + Bar interface{} + } + + var data interface{} = &S{} + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestWalk_Map(t *testing.T) { + w := new(TestMapWalker) + + type S struct { + Foo map[string]string + } + + data := &S{ + Foo: map[string]string{ + "foo": "foov", + "bar": "barv", + }, + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(w.MapVal.Interface(), data.Foo) { + t.Fatalf("Bad: %#v", w.MapVal.Interface()) + } + + expectedK := map[string]bool{"foo": true, "bar": true} + if !reflect.DeepEqual(w.Keys, expectedK) { + t.Fatalf("Bad keys: %#v", w.Keys) + } + + expectedV := map[string]bool{"foov": true, "barv": true} + if !reflect.DeepEqual(w.Values, expectedV) { + t.Fatalf("Bad values: %#v", w.Values) + } +} + +func TestWalk_Pointer(t *testing.T) { + w := new(TestPointerWalker) + + type S struct { + Foo string + } + + data := &S{ + Foo: "foo", + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []bool{true, false} + if !reflect.DeepEqual(w.Ps, expected) { + t.Fatalf("bad: %#v", w.Ps) + } +} + +func TestWalk_Slice(t *testing.T) { + w := new(TestSliceWalker) + + type S struct { + Foo []string + } + + data := &S{ + Foo: []string{"a", "b", "c"}, + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(w.SliceVal.Interface(), data.Foo) { + t.Fatalf("bad: %#v", w.SliceVal.Interface()) + } + + if w.Count != 3 { + t.Fatalf("Bad count: %d", w.Count) + } +} + +func TestWalk_SliceWithPtr(t *testing.T) { + w := new(TestSliceWalker) + + // This is key, the panic only happened when the slice field was + // an interface! + type I interface{} + + type S struct { + Foo []I + } + + type Empty struct{} + + data := &S{ + Foo: []I{&Empty{}}, + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(w.SliceVal.Interface(), data.Foo) { + t.Fatalf("bad: %#v", w.SliceVal.Interface()) + } + + if w.Count != 1 { + t.Fatalf("Bad count: %d", w.Count) + } +} + +func TestWalk_Struct(t *testing.T) { + w := new(TestStructWalker) + + type S struct { + Foo string + Bar string + } + + data := &S{ + Foo: "foo", + Bar: "bar", + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{"Foo", "Bar"} + if !reflect.DeepEqual(w.Fields, expected) { + t.Fatalf("bad: %#v", w.Fields) + } +} + +type TestInterfaceMapWalker struct { + MapVal reflect.Value + Keys map[string]bool + Values map[interface{}]bool +} + +func (t *TestInterfaceMapWalker) Map(m reflect.Value) error { + t.MapVal = m + return nil +} + +func (t *TestInterfaceMapWalker) MapElem(m, k, v reflect.Value) error { + if t.Keys == nil { + t.Keys = make(map[string]bool) + t.Values = make(map[interface{}]bool) + } + + t.Keys[k.Interface().(string)] = true + t.Values[v.Interface()] = true + return nil +} + +func TestWalk_MapWithPointers(t *testing.T) { + w := new(TestInterfaceMapWalker) + + type S struct { + Foo map[string]interface{} + } + + a := "a" + b := "b" + + data := &S{ + Foo: map[string]interface{}{ + "foo": &a, + "bar": &b, + "baz": 11, + "zab": (*int)(nil), + }, + } + + err := Walk(data, w) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(w.MapVal.Interface(), data.Foo) { + t.Fatalf("Bad: %#v", w.MapVal.Interface()) + } + + expectedK := map[string]bool{"foo": true, "bar": true, "baz": true, "zab": true} + if !reflect.DeepEqual(w.Keys, expectedK) { + t.Fatalf("Bad keys: %#v", w.Keys) + } + + expectedV := map[interface{}]bool{&a: true, &b: true, 11: true, (*int)(nil): true} + if !reflect.DeepEqual(w.Values, expectedV) { + t.Fatalf("Bad values: %#v", w.Values) + } +}