From 0da572289eded9589bff3b0585f2277b7b0387a8 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Fri, 11 May 2018 20:04:48 -0300 Subject: [PATCH 01/10] tests(healthchecks) harden a flaky healthcheck test case The regression test for issue #3304 was flaky because it launches two Kong nodes and it waited for the second one to be ready by reading the logs. This is not a reliable way of determining if a node is immediately ready to proxy a configured route. Reversing the order of proxy calls in the test made it fail more consistently, which helped debugging the issue. This changes the check to verify if the router has been rebuilt, using a dummy route for triggering the routing rebuild before the proper test starts. (Thanks @thibaultcha for the idea!) The changes are also backported to `spec-old-api/`. From #3454 --- .../05-proxy/09-balancer_spec.lua | 65 ++++++++++++------- .../05-proxy/09-balancer_spec.lua | 51 +++++++++------ spec/helpers.lua | 4 +- 3 files changed, 75 insertions(+), 45 deletions(-) diff --git a/spec-old-api/02-integration/05-proxy/09-balancer_spec.lua b/spec-old-api/02-integration/05-proxy/09-balancer_spec.lua index 5153430f7a6..7b56c4b0dc1 100644 --- a/spec-old-api/02-integration/05-proxy/09-balancer_spec.lua +++ b/spec-old-api/02-integration/05-proxy/09-balancer_spec.lua @@ -302,6 +302,7 @@ end local add_upstream local patch_upstream local get_upstream_health +local get_router_version local add_target local add_api local patch_api @@ -318,8 +319,8 @@ do end end - local function api_send(method, path, body) - local api_client = helpers.admin_client() + local function api_send(method, path, body, forced_port) + local api_client = helpers.admin_client(nil, forced_port) local res, err = api_client:send({ method = method, path = path, @@ -353,8 +354,17 @@ do get_upstream_health = function(upstream_name) local path = "/upstreams/" .. upstream_name .."/health" local status, body = api_send("GET", path) - assert.same(200, status) - return body + if status == 200 then + return body + end + end + + get_router_version = function(forced_port) + local path = "/cache/api_router:version" + local status, body = api_send("GET", path, nil, forced_port) + if status == 200 then + return body.message + end end do @@ -419,9 +429,11 @@ local function poll_wait_health(upstream_name, localhost, port, value) local expire = ngx.now() + hard_timeout while ngx.now() < expire do local health = get_upstream_health(upstream_name) - for _, d in ipairs(health.data) do - if d.target == localhost .. ":" .. port and d.health == value then - return + if health then + for _, d in ipairs(health.data) do + if d.target == localhost .. ":" .. port and d.health == value then + return + end end end ngx.sleep(0.01) -- poll-wait @@ -429,16 +441,20 @@ local function poll_wait_health(upstream_name, localhost, port, value) end -local function file_contains(filename, searched) - local fd = assert(io.open(filename, "r")) - for line in fd:lines() do - if line:find(searched, 1, true) then - fd:close() - return true - end - end - fd:close() - return false +local function wait_for_router_update(old_rv, localhost, proxy_port, admin_port) + -- add dummy upstream just to rebuild router + local dummy_upstream_name = add_upstream() + local dummy_port = add_target(dummy_upstream_name, localhost) + local dummy_api_host = add_api(dummy_upstream_name) + local dummy_server = http_server(localhost, dummy_port, { math.huge }) + + helpers.wait_until(function() + client_requests(1, dummy_api_host, "127.0.0.1", proxy_port) + local rv = get_router_version(admin_port) + return rv ~= old_rv + end, 10) + + dummy_server:done() end @@ -448,6 +464,7 @@ local localhosts = { hostname = "localhost", } + for _, strategy in helpers.each_strategy() do describe("Ring-balancer #" .. strategy, function() @@ -493,24 +510,24 @@ for _, strategy in helpers.each_strategy() do it("does not perform health checks when disabled (#3304)", function() - local upstream_name = add_upstream({}) + local old_rv = get_router_version(9011) + + local upstream_name = add_upstream() local port = add_target(upstream_name, localhost) local api_host = add_api(upstream_name) - helpers.wait_until(function() - return file_contains("servroot2/logs/error.log", "balancer:targets") - end, 10) + wait_for_router_update(old_rv, localhost, 9010, 9011) -- server responds, then fails, then responds again local server = http_server(localhost, port, { 20, 20, 20 }) local seq = { - { port = 9000, oks = 10, fails = 0, last_status = 200 }, { port = 9010, oks = 10, fails = 0, last_status = 200 }, - { port = 9000, oks = 0, fails = 10, last_status = 500 }, - { port = 9010, oks = 0, fails = 10, last_status = 500 }, { port = 9000, oks = 10, fails = 0, last_status = 200 }, + { port = 9010, oks = 0, fails = 10, last_status = 500 }, + { port = 9000, oks = 0, fails = 10, last_status = 500 }, { port = 9010, oks = 10, fails = 0, last_status = 200 }, + { port = 9000, oks = 10, fails = 0, last_status = 200 }, } for i, test in ipairs(seq) do local oks, fails, last_status = client_requests(10, api_host, "127.0.0.1", test.port) diff --git a/spec/02-integration/05-proxy/09-balancer_spec.lua b/spec/02-integration/05-proxy/09-balancer_spec.lua index 31414b3d536..35ac9970b9e 100644 --- a/spec/02-integration/05-proxy/09-balancer_spec.lua +++ b/spec/02-integration/05-proxy/09-balancer_spec.lua @@ -302,6 +302,7 @@ end local add_upstream local patch_upstream local get_upstream_health +local get_router_version local add_target local add_api local patch_api @@ -318,8 +319,8 @@ do end end - local function api_send(method, path, body) - local api_client = helpers.admin_client() + local function api_send(method, path, body, forced_port) + local api_client = helpers.admin_client(nil, forced_port) local res, err = api_client:send({ method = method, path = path, @@ -358,6 +359,14 @@ do end end + get_router_version = function(forced_port) + local path = "/cache/router:version" + local status, body = api_send("GET", path, nil, forced_port) + if status == 200 then + return body.message + end + end + do local port = FIRST_PORT gen_port = function() @@ -436,16 +445,20 @@ local function poll_wait_health(upstream_name, localhost, port, value) end -local function file_contains(filename, searched) - local fd = assert(io.open(filename, "r")) - for line in fd:lines() do - if line:find(searched, 1, true) then - fd:close() - return true - end - end - fd:close() - return false +local function wait_for_router_update(old_rv, localhost, proxy_port, admin_port) + -- add dummy upstream just to rebuild router + local dummy_upstream_name = add_upstream() + local dummy_port = add_target(dummy_upstream_name, localhost) + local dummy_api_host = add_api(dummy_upstream_name) + local dummy_server = http_server(localhost, dummy_port, { math.huge }) + + helpers.wait_until(function() + client_requests(1, dummy_api_host, "127.0.0.1", proxy_port) + local rv = get_router_version(admin_port) + return rv ~= old_rv + end, 10) + + dummy_server:done() end @@ -501,24 +514,24 @@ for _, strategy in helpers.each_strategy() do it("does not perform health checks when disabled (#3304)", function() - local upstream_name = add_upstream({}) + local old_rv = get_router_version(9011) + + local upstream_name = add_upstream() local port = add_target(upstream_name, localhost) local api_host = add_api(upstream_name) - helpers.wait_until(function() - return file_contains("servroot2/logs/error.log", "balancer:targets") - end, 10) + wait_for_router_update(old_rv, localhost, 9010, 9011) -- server responds, then fails, then responds again local server = http_server(localhost, port, { 20, 20, 20 }) local seq = { - { port = 9000, oks = 10, fails = 0, last_status = 200 }, { port = 9010, oks = 10, fails = 0, last_status = 200 }, - { port = 9000, oks = 0, fails = 10, last_status = 500 }, - { port = 9010, oks = 0, fails = 10, last_status = 500 }, { port = 9000, oks = 10, fails = 0, last_status = 200 }, + { port = 9010, oks = 0, fails = 10, last_status = 500 }, + { port = 9000, oks = 0, fails = 10, last_status = 500 }, { port = 9010, oks = 10, fails = 0, last_status = 200 }, + { port = 9000, oks = 10, fails = 0, last_status = 200 }, } for i, test in ipairs(seq) do local oks, fails, last_status = client_requests(10, api_host, "127.0.0.1", test.port) diff --git a/spec/helpers.lua b/spec/helpers.lua index f3299047298..54afe067bd0 100644 --- a/spec/helpers.lua +++ b/spec/helpers.lua @@ -395,7 +395,7 @@ end --- returns a pre-configured `http_client` for the Kong admin port. -- @name admin_client -local function admin_client(timeout) +local function admin_client(timeout, forced_port) local admin_ip, admin_port for _, entry in ipairs(conf.admin_listeners) do if entry.ssl == false then @@ -404,7 +404,7 @@ local function admin_client(timeout) end end assert(admin_ip, "No http-admin found in the configuration") - return http_client(admin_ip, admin_port, timeout) + return http_client(admin_ip, forced_port or admin_port, timeout) end --- returns a pre-configured `http_client` for the Kong admin SSL port. From b79bc15c73a5a9c661b341508bef9293b11f3ddf Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Sun, 13 May 2018 13:44:47 -0700 Subject: [PATCH 02/10] docs(*) reflect org renaming in Gitter and Vagrant links --- CONTRIBUTING.md | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5ed01b09a98..7e3cb37df46 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -56,7 +56,7 @@ or the maintainers of this project: asynchronous and lengthy chatter and staying up-to-date with the latest announcements or usage tips - Gitter, for faster, but more ephemeral conversations. The room is - hosted at https://gitter.im/Mashape/kong + hosted at https://gitter.im/Kong/kong - The IRC channel, registered on freenode as [#kong ](https://webchat.freenode.net/?channels=kong) diff --git a/README.md b/README.md index a432d351c7f..4745a363cb1 100644 --- a/README.md +++ b/README.md @@ -119,7 +119,7 @@ Reference](https://getkong.org/docs/latest/lua-reference/). #### Vagrant You can use a Vagrant box running Kong and Postgres that you can find at -[Mashape/kong-vagrant](https://github.com/Kong/kong-vagrant). +[Kong/kong-vagrant](https://github.com/Kong/kong-vagrant). #### Source Install From 607bd47ecdd56f41fdb66b11b320cb604074f115 Mon Sep 17 00:00:00 2001 From: Pas Date: Tue, 15 May 2018 20:18:34 +0200 Subject: [PATCH 03/10] fix(dao) ensure ScyllaDB compatibility in C* core migrations Explicitly drop index before dropping table. Note: ScyllaDB is not an officially supported target for Kong as of today. See #754 From #3457 --- kong/dao/migrations/cassandra.lua | 1 + 1 file changed, 1 insertion(+) diff --git a/kong/dao/migrations/cassandra.lua b/kong/dao/migrations/cassandra.lua index 6c86592e6e3..7f276ca9294 100644 --- a/kong/dao/migrations/cassandra.lua +++ b/kong/dao/migrations/cassandra.lua @@ -487,6 +487,7 @@ return { { name = "2017-05-19-173100_remove_nodes_table", up = [[ + DROP INDEX IF EXISTS nodes_cluster_listening_address_idx; DROP TABLE nodes; ]], }, From b29710db10410263dfb57ef90abab66dd66b23ba Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Wed, 16 May 2018 22:04:53 -0300 Subject: [PATCH 04/10] fix(healthchecks) ensure health posted via cluster_events can be read The problem was that when an upstream health update in propagated via cluster events, only the upstream name (and not the id) was being forwarded. This resulted in `post_health` having the correct behaviour in the Kong node where the `/healthy` or `/unhealthy` endpoint was used, but the information wasn't propagated to the cluster as advertised. This also includes a regression test (also, replaced hard-coded port numbers with variables in the multi-node tests). From #3464 --- kong/api/routes/upstreams.lua | 4 +- kong/runloop/balancer.lua | 2 +- kong/runloop/handler.lua | 5 +- .../05-proxy/09-balancer_spec.lua | 67 ++++++++++++++----- 4 files changed, 56 insertions(+), 22 deletions(-) diff --git a/kong/api/routes/upstreams.lua b/kong/api/routes/upstreams.lua index 644ace34a62..cc10456d40b 100644 --- a/kong/api/routes/upstreams.lua +++ b/kong/api/routes/upstreams.lua @@ -84,7 +84,9 @@ local function post_health(is_healthy) end local health = is_healthy and 1 or 0 - local packet = ("%s|%d|%d|%s"):format(ip, port, health, self.upstream.name) + local packet = ("%s|%d|%d|%s|%s"):format(ip, port, health, + self.upstream.id, + self.upstream.name) cluster_events:broadcast("balancer:post_health", packet) return responses.send_HTTP_NO_CONTENT() diff --git a/kong/runloop/balancer.lua b/kong/runloop/balancer.lua index 83ccd7b9dba..e38565e4bd4 100644 --- a/kong/runloop/balancer.lua +++ b/kong/runloop/balancer.lua @@ -799,7 +799,7 @@ end -------------------------------------------------------------------------------- -- Update health status and broadcast to workers --- @param upstream a table with upstream data +-- @param upstream a table with upstream data: must have `name` and `id` -- @param hostname target hostname -- @param port target port -- @param is_healthy boolean: true if healthy, false if unhealthy diff --git a/kong/runloop/handler.lua b/kong/runloop/handler.lua index 162477493e5..9d3f6acaa94 100644 --- a/kong/runloop/handler.lua +++ b/kong/runloop/handler.lua @@ -336,9 +336,10 @@ return { -- manual health updates cluster_events:subscribe("balancer:post_health", function(data) - local ip, port, health, name = data:match("([^|]+)|([^|]+)|([^|]+)|(.*)") + local pattern = "([^|]+)|([^|]+)|([^|]+)|([^|]+)|(.*)" + local ip, port, health, id, name = data:match(pattern) port = tonumber(port) - local upstream = { name = name } + local upstream = { id = id, name = name } local ok, err = balancer.post_health(upstream, ip, port, health == "1") if not ok then log(ERR, "failed posting health of ", name, " to workers: ", err) diff --git a/spec/02-integration/05-proxy/09-balancer_spec.lua b/spec/02-integration/05-proxy/09-balancer_spec.lua index 35ac9970b9e..2f85e8d3f3b 100644 --- a/spec/02-integration/05-proxy/09-balancer_spec.lua +++ b/spec/02-integration/05-proxy/09-balancer_spec.lua @@ -351,9 +351,9 @@ do assert.same(200, api_send("PATCH", "/upstreams/" .. upstream_name, data)) end - get_upstream_health = function(upstream_name) + get_upstream_health = function(upstream_name, forced_port) local path = "/upstreams/" .. upstream_name .."/health" - local status, body = api_send("GET", path) + local status, body = api_send("GET", path, nil, forced_port) if status == 200 then return body end @@ -428,20 +428,22 @@ local function truncate_relevant_tables(db, dao) end -local function poll_wait_health(upstream_name, localhost, port, value) +local function poll_wait_health(upstream_name, host, port, value, admin_port) local hard_timeout = 300 local expire = ngx.now() + hard_timeout while ngx.now() < expire do - local health = get_upstream_health(upstream_name) + local health = get_upstream_health(upstream_name, admin_port) if health then for _, d in ipairs(health.data) do - if d.target == localhost .. ":" .. port and d.health == value then + if d.target == host .. ":" .. port and d.health == value then return end end end ngx.sleep(0.01) -- poll-wait end + assert(false, "timed out waiting for " .. host .. ":" .. port " in " .. + upstream_name .. " to become " .. value) end @@ -490,14 +492,18 @@ for _, strategy in helpers.each_strategy() do describe("#healthchecks (#cluster)", function() + -- second node ports are Kong test ports + 10 + local proxy_port_1 = 9000 + local admin_port_1 = 9001 + local proxy_port_2 = 9010 + local admin_port_2 = 9011 + setup(function() - -- start a second Kong instance (ports are Kong test ports + 10) + -- start a second Kong instance helpers.start_kong({ database = strategy, - admin_listen = "127.0.0.1:9011", - proxy_listen = "127.0.0.1:9010", - proxy_listen_ssl = "127.0.0.1:9453", - admin_listen_ssl = "127.0.0.1:9454", + admin_listen = "127.0.0.1:" .. admin_port_2, + proxy_listen = "127.0.0.1:" .. proxy_port_2, prefix = "servroot2", log_level = "debug", db_update_frequency = 0.1, @@ -514,24 +520,24 @@ for _, strategy in helpers.each_strategy() do it("does not perform health checks when disabled (#3304)", function() - local old_rv = get_router_version(9011) + local old_rv = get_router_version(admin_port_2) local upstream_name = add_upstream() local port = add_target(upstream_name, localhost) local api_host = add_api(upstream_name) - wait_for_router_update(old_rv, localhost, 9010, 9011) + wait_for_router_update(old_rv, localhost, proxy_port_2, admin_port_2) -- server responds, then fails, then responds again local server = http_server(localhost, port, { 20, 20, 20 }) local seq = { - { port = 9010, oks = 10, fails = 0, last_status = 200 }, - { port = 9000, oks = 10, fails = 0, last_status = 200 }, - { port = 9010, oks = 0, fails = 10, last_status = 500 }, - { port = 9000, oks = 0, fails = 10, last_status = 500 }, - { port = 9010, oks = 10, fails = 0, last_status = 200 }, - { port = 9000, oks = 10, fails = 0, last_status = 200 }, + { port = proxy_port_2, oks = 10, fails = 0, last_status = 200 }, + { port = proxy_port_1, oks = 10, fails = 0, last_status = 200 }, + { port = proxy_port_2, oks = 0, fails = 10, last_status = 500 }, + { port = proxy_port_1, oks = 0, fails = 10, last_status = 500 }, + { port = proxy_port_2, oks = 10, fails = 0, last_status = 200 }, + { port = proxy_port_1, oks = 10, fails = 0, last_status = 200 }, } for i, test in ipairs(seq) do local oks, fails, last_status = client_requests(10, api_host, "127.0.0.1", test.port) @@ -546,6 +552,31 @@ for _, strategy in helpers.each_strategy() do assert.same(20, server_fails) end) + + it("propagates posted health info", function() + + local old_rv = get_router_version(admin_port_2) + + local upstream_name = add_upstream({ + healthchecks = healthchecks_config {} + }) + local port = add_target(upstream_name, localhost) + + wait_for_router_update(old_rv, localhost, proxy_port_2, admin_port_2) + + local health1 = get_upstream_health(upstream_name, admin_port_1) + local health2 = get_upstream_health(upstream_name, admin_port_2) + + assert.same("HEALTHY", health1.data[1].health) + assert.same("HEALTHY", health2.data[1].health) + + post_target_endpoint(upstream_name, localhost, port, "unhealthy") + + poll_wait_health(upstream_name, localhost, port, "UNHEALTHY", admin_port_1) + poll_wait_health(upstream_name, localhost, port, "UNHEALTHY", admin_port_2) + + end) + end) end end) From a959ed45890b2528a1f69ed003403b40e5bfa565 Mon Sep 17 00:00:00 2001 From: Aapo Talvensaari Date: Thu, 17 May 2018 14:11:05 +0300 Subject: [PATCH 05/10] docs(readme) move images to konghq.com Signed-off-by: Thibault Charbonnier --- README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 4745a363cb1..9f225cc3e12 100644 --- a/README.md +++ b/README.md @@ -246,9 +246,8 @@ limitations under the License. ``` [kong-url]: https://konghq.com/ -[kong-logo]: https://d2ffutrenqvap3.cloudfront.net/items/1946191x3s1H0M2u3J18/slack-imgs.png -[kong-benefits]: https://cl.ly/002i2Z432A1s/Image%202017-10-16%20at%2012.30.08%20AM.png -[google-groups-url]: https://groups.google.com/forum/#!forum/konglayer +[kong-logo]: https://konghq.com/wp-content/uploads/2018/05/kong-logo-github-readme.png +[kong-benefits]: https://konghq.com/wp-content/uploads/2018/05/kong-benefits-github-readme.png [badge-travis-url]: https://travis-ci.org/Kong/kong/branches [badge-travis-image]: https://travis-ci.org/Kong/kong.svg?branch=master From 63624f9943f3cd046d184ac0d6b7c460cb013c55 Mon Sep 17 00:00:00 2001 From: Darren Jennings Date: Thu, 17 May 2018 10:14:04 -0700 Subject: [PATCH 06/10] docs(config) add lua_package_path default value From #3470 Signed-off-by: Thibault Charbonnier --- kong.conf.default | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kong.conf.default b/kong.conf.default index 96ba0f16f99..db9793020b0 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -485,10 +485,10 @@ # This includes the certificates configured # for Kong's database connections. -#lua_package_path = # Sets the Lua module search path (LUA_PATH). - # Useful when developing or using custom - # plugins not stored in the default search - # path. +#lua_package_path = ./?.lua;./?/init.lua; # Sets the Lua module search path + # (LUA_PATH). Useful when developing + # or using custom plugins not stored + # in the default search path. #lua_package_cpath = # Sets the Lua C module search path # (LUA_CPATH). From 96071b4f91a75eb384cfe53c9e1a6d2ba1ab6d0c Mon Sep 17 00:00:00 2001 From: Mark van Holsteijn Date: Sat, 19 May 2018 02:33:37 +0200 Subject: [PATCH 07/10] fix(hmac) ensure empty body validation Changes the hmac-auth request validation logic to pass if: - There is no `Digest` and no body. - There is `Digest` for an empty body and no body. - There is a `Digest` for the body and a valid body. This is to make sure that we can put `digest` in the `enforce_headers` for all requests. Fix #3345 Fix #3346 From #3347 --- kong/plugins/hmac-auth/access.lua | 18 ++--- .../20-hmac-auth/03-access_spec.lua | 67 +++++++++++++++++++ 2 files changed, 74 insertions(+), 11 deletions(-) diff --git a/kong/plugins/hmac-auth/access.lua b/kong/plugins/hmac-auth/access.lua index c6ceaca34c4..532a1620eb1 100644 --- a/kong/plugins/hmac-auth/access.lua +++ b/kong/plugins/hmac-auth/access.lua @@ -206,24 +206,20 @@ local function validate_clock_skew(headers, date_header_name, allowed_clock_skew return true end -local function validate_body(digest_recieved) - -- client doesnt want body validation - if not digest_recieved then - return true - end - +local function validate_body(digest_received) req_read_body() local body = req_get_body_data() - -- request must have body as client sent a digest header - if not body then - return false + + if not digest_received then + -- if there is no digest and no body, it is ok + return not body end local sha256 = resty_sha256:new() - sha256:update(body) + sha256:update(body or '') local digest_created = "SHA-256=" .. ngx_encode_base64(sha256:final()) - return digest_created == digest_recieved + return digest_created == digest_received end local function load_consumer_into_memory(consumer_id, anonymous) diff --git a/spec/03-plugins/20-hmac-auth/03-access_spec.lua b/spec/03-plugins/20-hmac-auth/03-access_spec.lua index 95a6d0ff53b..27f20118d7a 100644 --- a/spec/03-plugins/20-hmac-auth/03-access_spec.lua +++ b/spec/03-plugins/20-hmac-auth/03-access_spec.lua @@ -881,6 +881,73 @@ for _, strategy in helpers.each_strategy() do assert.is_nil(body.headers["x-anonymous-consumer"]) end) + it("should return 403 when body validation enabled and no digest header is present", function() + local date = os.date("!%a, %d %b %Y %H:%M:%S GMT") + local postBody = '{"a":"apple","b":"ball"}' + local sha256 = resty_sha256:new() + sha256:update(postBody) + + local encodedSignature = ngx.encode_base64( + hmac_sha1_binary("secret", "date: "..date)) + local hmacAuth = [["hmac username="bob",algorithm="hmac-sha1",]] + ..[[headers="date",signature="]]..encodedSignature..[["]] + local res = assert(proxy_client:send { + method = "POST", + path = "/request", + body = postBody, + headers = { + ["HOST"] = "hmacauth4.com", + date = date, + authorization = hmacAuth, + } + }) + local body = assert.res_status(403, res) + body = cjson.decode(body) + assert.equal("HMAC signature does not match", body.message) + end) + + it("should return 200 when body validation enabled and no body and no digest header is present", function() + local date = os.date("!%a, %d %b %Y %H:%M:%S GMT") + + local encodedSignature = ngx.encode_base64( + hmac_sha1_binary("secret", "date: "..date)) + local hmacAuth = [["hmac username="bob",algorithm="hmac-sha1",]] + ..[[headers="date",signature="]]..encodedSignature..[["]] + local res = assert(proxy_client:send { + method = "GET", + path = "/request", + headers = { + ["HOST"] = "hmacauth4.com", + date = date, + authorization = hmacAuth, + } + }) + assert.res_status(200, res) + end) + + it("should return 200 when body validation enabled and no body and an digest header is present", function() + local date = os.date("!%a, %d %b %Y %H:%M:%S GMT") + local sha256 = resty_sha256:new() + sha256:update('') + local digest = "SHA-256=" .. ngx.encode_base64(sha256:final()) + + local encodedSignature = ngx.encode_base64( + hmac_sha1_binary("secret", "date: "..date.."\n".."digest: "..digest)) + local hmacAuth = [["hmac username="bob",algorithm="hmac-sha1",]] + ..[[headers="date digest",signature="]]..encodedSignature..[["]] + local res = assert(proxy_client:send { + method = "GET", + path = "/request", + headers = { + ["HOST"] = "hmacauth4.com", + date = date, + digest = digest, + authorization = hmacAuth, + } + }) + assert.res_status(200, res) + end) + it("should pass with invalid credentials and anonymous", function() local res = assert(proxy_client:send { method = "GET", From dd97bcdf9fa2a5fde21b84b0a35fb1fec702b575 Mon Sep 17 00:00:00 2001 From: Thijs Schreijer Date: Tue, 22 May 2018 01:18:15 +0200 Subject: [PATCH 08/10] chore(deps) bump lua-resty-dns-client to 2.1.0 See https://github.com/Kong/lua-resty-dns-client#history From #3478 --- kong-0.13.1-0.rockspec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kong-0.13.1-0.rockspec b/kong-0.13.1-0.rockspec index c1ee34667aa..d6e690a0eb4 100644 --- a/kong-0.13.1-0.rockspec +++ b/kong-0.13.1-0.rockspec @@ -27,7 +27,7 @@ dependencies = { "luaossl == 20171028", "luasyslog == 1.0.0", "lua_pack == 1.0.5", - "lua-resty-dns-client == 2.0.0", + "lua-resty-dns-client == 2.1.0", "lua-resty-worker-events == 0.3.3", "lua-resty-mediador == 0.1.2", "lua-resty-healthcheck == 0.4.0", From 6a0779c9d549192240ccafc2ea25b2175d326aa9 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Mon, 21 May 2018 14:10:28 -0300 Subject: [PATCH 09/10] fix(balancer) avoid needless rebuild of balancer when adding target This bug was introduced when the loop to find `last_equal_index` was changed from looping the new targets list into looping the old targets list. Looping the old targets list is more efficient, but the logic for checking the index needed to be updated. Includes a regression test that verifies that adding a target works and does not rebuild the balancer. --- kong/runloop/balancer.lua | 5 +++-- spec/01-unit/011-balancer_spec.lua | 36 ++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 2 deletions(-) diff --git a/kong/runloop/balancer.lua b/kong/runloop/balancer.lua index e38565e4bd4..c7ba48cab9b 100644 --- a/kong/runloop/balancer.lua +++ b/kong/runloop/balancer.lua @@ -416,8 +416,9 @@ local function check_target_history(upstream, balancer) -- compare balancer history with db-loaded history local last_equal_index = 0 -- last index where history is the same for i, entry in ipairs(old_history) do - if entry.order ~= (new_history[i] or EMPTY_T).order then - last_equal_index = i - 1 + if new_history[i] and entry.order == new_history[i].order then + last_equal_index = i + else break end end diff --git a/spec/01-unit/011-balancer_spec.lua b/spec/01-unit/011-balancer_spec.lua index 6df27fb49dc..726fc58085e 100644 --- a/spec/01-unit/011-balancer_spec.lua +++ b/spec/01-unit/011-balancer_spec.lua @@ -8,6 +8,7 @@ describe("Balancer", function() local uuid = require("kong.tools.utils").uuid local upstream_hc local upstream_ph + local upstream_ote teardown(function() ngx.log:revert() @@ -78,9 +79,11 @@ describe("Balancer", function() [6] = { id = "f", name = "upstream_f", slots = 10, healthchecks = hc_defaults }, [7] = { id = "hc", name = "upstream_hc", slots = 10, healthchecks = passive_hc }, [8] = { id = "ph", name = "upstream_ph", slots = 10, healthchecks = passive_hc }, + [9] = { id = "ote", name = "upstream_ote", slots = 10, healthchecks = hc_defaults }, } upstream_hc = UPSTREAMS_FIXTURES[7] upstream_ph = UPSTREAMS_FIXTURES[8] + upstream_ote = UPSTREAMS_FIXTURES[9] TARGETS_FIXTURES = { -- 1st upstream; a @@ -187,6 +190,14 @@ describe("Balancer", function() target = "127.0.0.1:2222", weight = 10, }, + -- upstream_ote + { + id = "ote1", + created_at = "001", + upstream_id = "ote", + target = "localhost:1111", + weight = 10, + }, } local function find_all_in_fixture_fn(fixture) @@ -374,6 +385,31 @@ describe("Balancer", function() end) end) + describe("on_target_event()", function() + setup(function() + balancer._load_targets_into_memory("ote") + end) + + it("adding a target does not recreate a balancer", function() + local b1 = balancer._create_balancer(upstream_ote) + assert.same(1, #(balancer._get_target_history(b1))) + + table.insert(TARGETS_FIXTURES, { + id = "ote2", + created_at = "002", + upstream_id = "ote", + target = "localhost:1112", + weight = 10, + }) + balancer.on_target_event("create", { upstream_id = "ote" }) + + local b2 = balancer._create_balancer(upstream_ote) + assert.same(2, #(balancer._get_target_history(b2))) + + assert(b1 == b2) + end) + end) + describe("post_health()", function() local hc, my_balancer From c8e4942c7e4b25428d29585835e577d6d71a8080 Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Wed, 30 May 2018 16:58:16 -0300 Subject: [PATCH 10/10] chore(deps) bump lua-resty-healthcheck to 0.4.2 * Send `Host` header in probe requests during active health checking. * Refactor health check counters management. See: https://github.com/Kong/lua-resty-healthcheck#042-23-may-2018 From #3496 Signed-off-by: Thibault Charbonnier --- kong-0.13.1-0.rockspec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kong-0.13.1-0.rockspec b/kong-0.13.1-0.rockspec index d6e690a0eb4..116fb57058b 100644 --- a/kong-0.13.1-0.rockspec +++ b/kong-0.13.1-0.rockspec @@ -30,7 +30,7 @@ dependencies = { "lua-resty-dns-client == 2.1.0", "lua-resty-worker-events == 0.3.3", "lua-resty-mediador == 0.1.2", - "lua-resty-healthcheck == 0.4.0", + "lua-resty-healthcheck == 0.4.2", "lua-resty-mlcache == 2.0.2", } build = {