diff --git a/README.md b/README.md index 3c09bb8..d0dcab3 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,6 @@ -[![Build Status][badge-travis-image]][badge-travis-url] +# Kong proxy-cache-redis plugin -# Kong proxy-cache plugin - -HTTP Proxy Caching for Kong +HTTP Proxy Redis Caching for Kong ## Synopsis @@ -14,9 +12,38 @@ to the same resource will re-fetch and re-store the resource. Cache entities can also be forcefully purged via the Admin API prior to their expiration time. +It caches all responses in a Redis server. + +## Cache TTL + +TTL for serving the cached data. Kong sends a `X-Cache-Status` with value `Refresh` if the resource was found in cache, but could not satisfy the request, due to Cache-Control behaviors or reaching its hard-coded cache_ttl threshold. + +## Storage TTL +Kong can store resource entities in the storage engine longer than the prescribed cache_ttl or Cache-Control values indicate. This allows Kong to maintain a cached copy of a resource past its expiration. This allows clients capable of using max-age and max-stale headers to request stale copies of data if necessary. + ## Documentation -* [Documentation for the Proxy Cache plugin](https://docs.konghq.com/hub/kong-inc/proxy-cache/) +The plugin works in the same way as the official `proxy-cache` plugin, in terms of the way it generates the cache key, or how to assign it to a service or route. [Documentation for the Proxy Cache plugin](https://docs.konghq.com/hub/kong-inc/proxy-cache/) + +## Configuration -[badge-travis-url]: https://travis-ci.com/Kong/kong-plugin-proxy-cache/branches -[badge-travis-image]: https://travis-ci.com/Kong/kong-plugin-proxy-cache.svg?token=BfzyBZDa3icGPsKGmBHb&branch=master +|Parameter|Type|Required|Default|Description| +|---|---|---|---|---| +`name`|string|*required*| |The name of the plugin to use, in this case: `proxy-cache-redis` +`service.id`|string|*optional*| |The ID of the Service the plugin targets. +`route.id`|string|*optional*| |The ID of the Route the plugin targets. +`consumer.id`|string|*optional*| |The ID of the Consumer the plugin targets. +`enabled`|boolean|*optional*|true|Whether this plugin will be applied. +`config.response_code`|array of integers|*required*|[200, 301, 404]|Upstream response status code considered cacheable. +`config.request_method`|array of strings|*required*|["GET","HEAD"]|Downstream request methods considered cacheable. +`config.content_type`|array of strings|*required*|["text/plain", "application/json"]|Upstream response content types considered cacheable. The plugin performs an exact match against each specified value; for example, if the upstream is expected to respond with an application/json; charset=utf-8 content-type, the plugin configuration must contain said value or a Bypass cache status is returned. +`config.vary_headers`|array of strings|*optional*| |Relevant headers considered for the cache key. If undefined, none of the headers are taken into consideration. +`config.vary_query_params`|array of strings|*optional*| |Relevant query parameters considered for the cache key. If undefined, all params are taken into consideration. +`config.cache_ttl`|integer|*required*|300|TTL, in seconds, of cache resources. +`config.cache_control`|boolean|*required*|false|When enabled, respect the Cache-Control behaviors defined in RFC7234. +`config.storage_ttl`|integer|*required*| |Number of seconds to keep resources in the storage backend. This value is independent of cache_ttl or resource TTLs defined by Cache-Control behaviors. The resources may be stored for up to `storage_ttl` secs but served only for `cache_ttl`. +`config.redis_host`|string|*required*| |The hostname or IP address of the redis server. +`config.redis_port`|integer|*optional*|6379|The port of the redis server. +`config.redis_timeout`|integer|*optional*|2000|The timeout in milliseconds for the redis connection. +`config.redis_password`|string|*optional*| |The password (if required) to authenticate to the redis server. +`config.redis_database`|string|*optional*|0|The Redis database to use for caching the resources. diff --git a/kong-proxy-cache-plugin-1.3.1-1.rockspec b/kong-proxy-cache-redis-plugin-1.0.0-0.rockspec similarity index 64% rename from kong-proxy-cache-plugin-1.3.1-1.rockspec rename to kong-proxy-cache-redis-plugin-1.0.0-0.rockspec index cae626d..f132aee 100644 --- a/kong-proxy-cache-plugin-1.3.1-1.rockspec +++ b/kong-proxy-cache-redis-plugin-1.0.0-0.rockspec @@ -1,8 +1,8 @@ -package = "kong-proxy-cache-redis-redis-plugin" -version = "1.3.1-1" +package = "kong-proxy-cache-redis-plugin" +version = "1.0.0-0" source = { - url = "git://github.com/ligreman/kong-proxy-cache-redis-redis-plugin" + url = "git://github.com/ligreman/kong-proxy-cache-redis-plugin" } supported_platforms = {"linux", "macosx"} @@ -23,7 +23,6 @@ build = { ["kong.plugins.proxy-cache-redis.cache_key"] = "kong/plugins/proxy-cache-redis/cache_key.lua", ["kong.plugins.proxy-cache-redis.schema"] = "kong/plugins/proxy-cache-redis/schema.lua", ["kong.plugins.proxy-cache-redis.api"] = "kong/plugins/proxy-cache-redis/api.lua", - ["kong.plugins.proxy-cache-redis.strategies"] = "kong/plugins/proxy-cache-redis/strategies/init.lua", - ["kong.plugins.proxy-cache-redis.strategies.memory"] = "kong/plugins/proxy-cache-redis/strategies/memory.lua", + ["kong.plugins.proxy-cache-redis.redis"] = "kong/plugins/proxy-cache-redis/redis.lua", } } diff --git a/kong/plugins/proxy-cache-redis/api.lua b/kong/plugins/proxy-cache-redis/api.lua index bb00dde..c9a5456 100644 --- a/kong/plugins/proxy-cache-redis/api.lua +++ b/kong/plugins/proxy-cache-redis/api.lua @@ -1,198 +1,90 @@ -local STRATEGY_PATH = "kong.plugins.proxy-cache.strategies" - - -local require = require +local redis = require "kong.plugins.proxy-cache-redis.redis" local kong = kong -local fmt = string.format - - -local function broadcast_purge(plugin_id, cache_key) - local data = fmt("%s:%s", plugin_id, cache_key or "nil") - kong.log.debug("broadcasting purge '", data, "'") - return kong.cluster_events:broadcast("proxy-cache:purge", data) -end - - -local function each_proxy_cache() - local iter = kong.db.plugins:each() - - return function() - while true do - local plugin, err = iter() - if err then - return kong.response.exit(500, { message = err }) - end - if not plugin then - return - end - if plugin.name == "proxy-cache" then - return plugin - end - end - end -end - return { - ["/proxy-cache-redis"] = { - resource = "proxy-cache-redis", + ["/plugins/:plugin_id/proxy-cache-redis"] = { - DELETE = function() - for plugin in each_proxy_cache() do + DELETE = function(self) + -- Busco el plugin + local plugin, errp = kong.db.plugins:select({ id = self.params.plugin_id }) - local strategy = require(STRATEGY_PATH)({ - strategy_name = plugin.config.strategy, - strategy_opts = plugin.config[plugin.config.strategy], - }) - - local ok, err = strategy:flush(true) - if not ok then - return kong.response.exit(500, { message = err }) - end - - if require(STRATEGY_PATH).LOCAL_DATA_STRATEGIES[plugin.config.strategy] - then - local ok, err = broadcast_purge(plugin.id, nil) - if not ok then - kong.log.err("failed broadcasting proxy cache purge to cluster: ", err) - end - end + if errp then + kong.log.err("Error retrieving the plugin: " .. errp) + return nil + end - end + if not plugin then + kong.log.err("Could not find plugin.") + return nil + end - return kong.response.exit(204) - end - }, - ["/proxy-cache-redis/:cache_key"] = { - resource = "proxy-cache-redis", + local ok, err = redis:flush(plugin.config) + if not ok then + return kong.response.exit(500, { message = err }) + end - GET = function(self) - for plugin in each_proxy_cache() do + return kong.response.exit(204) + end + }, + ["/plugins/:plugin_id/proxy-cache-redis/:cache_key"] = { - local strategy = require(STRATEGY_PATH)({ - strategy_name = plugin.config.strategy, - strategy_opts = plugin.config[plugin.config.strategy], - }) + GET = function(self) + -- Busco el plugin + local plugin, errp = kong.db.plugins:select({ id = self.params.plugin_id }) - local cache_val, err = strategy:fetch(self.params.cache_key) - if err and err ~= "request object not in cache" then - return kong.response.exit(500, err) - end + if errp then + kong.log.err("Error retrieving the plugin: " .. errp) + return nil + end - if cache_val then - return kong.response.exit(200, cache_val) - end + if not plugin then + kong.log.err("Could not find plugin.") + return nil + end - end + local cache_val, err = redis:fetch(plugin.config, self.params.cache_key) + if err and err ~= "request object not in cache" then + return kong.response.exit(500, err) + end - -- fell through, not found - return kong.response.exit(404) - end, + if cache_val then + return kong.response.exit(200, cache_val) + end - DELETE = function(self) - for plugin in each_proxy_cache() do + -- fell through, not found + return kong.response.exit(404) + end, - local strategy = require(STRATEGY_PATH)({ - strategy_name = plugin.config.strategy, - strategy_opts = plugin.config[plugin.config.strategy], - }) + DELETE = function(self) + -- Busco el plugin + local plugin, errp = kong.db.plugins:select({ id = self.params.plugin_id }) - local cache_val, err = strategy:fetch(self.params.cache_key) - if err and err ~= "request object not in cache" then - return kong.response.exit(500, err) - end + if errp then + kong.log.err("Error retrieving the plugin: " .. errp) + return nil + end - if cache_val then - local _, err = strategy:purge(self.params.cache_key) - if err then - return kong.response.exit(500, err) - end + if not plugin then + kong.log.err("Could not find plugin.") + return nil + end - if require(STRATEGY_PATH).LOCAL_DATA_STRATEGIES[plugin.config.strategy] - then - local ok, err = broadcast_purge(plugin.id, self.params.cache_key) - if not ok then - kong.log.err("failed broadcasting proxy cache purge to cluster: ", err) + local cache_val, err = redis:fetch(plugin.config, self.params.cache_key) + if err and err ~= "request object not in cache" then + return kong.response.exit(500, err) end - end - return kong.response.exit(204) - end + if cache_val then + local _, err2 = redis:purge(plugin.config, self.params.cache_key) + if err2 then + return kong.response.exit(500, err2) + end - end - - -- fell through, not found - return kong.response.exit(404) - end, - }, - ["/proxy-cache-redis/:plugin_id/caches/:cache_key"] = { - resource = "proxy-cache-redis", - - GET = function(self) - local plugin, err = kong.db.plugins:select { - id = self.params.plugin_id, - } - if err then - return kong.response.exit(500, err) - end - - if not plugin then - return kong.response.exit(404) - end - - local conf = plugin.config - local strategy = require(STRATEGY_PATH)({ - strategy_name = conf.strategy, - strategy_opts = conf[conf.strategy], - }) - - local cache_val, err = strategy:fetch(self.params.cache_key) - if err == "request object not in cache" then - return kong.response.exit(404) - elseif err then - return kong.response.exit(500, err) - end - - return kong.response.exit(200, cache_val) - end, - DELETE = function(self) - local plugin, err = kong.db.plugins:select { - id = self.params.plugin_id, - } - if err then - return kong.response.exit(500, err) - end - - if not plugin then - return kong.response.exit(404) - end - - local conf = plugin.config - local strategy = require(STRATEGY_PATH)({ - strategy_name = conf.strategy, - strategy_opts = conf[conf.strategy], - }) - - local _, err = strategy:fetch(self.params.cache_key) - if err == "request object not in cache" then - return kong.response.exit(404) - elseif err then - return kong.response.exit(500, err) - end - - local _, err = strategy:purge(self.params.cache_key) - if err then - return kong.response.exit(500, err) - end - - if require(STRATEGY_PATH).LOCAL_DATA_STRATEGIES[conf.strategy] then - local ok, err = broadcast_purge(plugin.id, self.params.cache_key) - if not ok then - kong.log.err("failed broadcasting proxy cache purge to cluster: ", err) - end - end + return kong.response.exit(204) + end - return kong.response.exit(204) - end - }, + -- fell through, not found + return kong.response.exit(404) + end, + } } diff --git a/kong/plugins/proxy-cache-redis/handler.lua b/kong/plugins/proxy-cache-redis/handler.lua index 30722a3..2cb9b40 100644 --- a/kong/plugins/proxy-cache-redis/handler.lua +++ b/kong/plugins/proxy-cache-redis/handler.lua @@ -1,30 +1,25 @@ -local require = require -local cache_key = require "kong.plugins.proxy-cache-redis.cache_key" -local utils = require "kong.tools.utils" - - -local ngx = ngx -local kong = kong -local type = type -local pairs = pairs -local tostring = tostring -local tonumber = tonumber -local max = math.max -local floor = math.floor -local lower = string.lower -local concat = table.concat -local time = ngx.time -local resp_get_headers = ngx.resp and ngx.resp.get_headers -local ngx_re_gmatch = ngx.re.gmatch -local ngx_re_sub = ngx.re.gsub -local ngx_re_match = ngx.re.match -local parse_http_time = ngx.parse_http_time - - +local require = require +local cache_key = require "kong.plugins.proxy-cache-redis.cache_key" +local redis = require "kong.plugins.proxy-cache-redis.redis" local tab_new = require("table.new") +local ngx = ngx +local kong = kong +local type = type +local pairs = pairs +local tostring = tostring +local tonumber = tonumber +local max = math.max +local floor = math.floor +local lower = string.lower +local concat = table.concat +local time = ngx.time +local resp_get_headers = ngx.resp and ngx.resp.get_headers +local ngx_re_gmatch = ngx.re.gmatch +local ngx_re_sub = ngx.re.gsub +local ngx_re_match = ngx.re.match +local parse_http_time = ngx.parse_http_time -local STRATEGY_PATH = "kong.plugins.proxy-cache.strategies" local CACHE_VERSION = 1 local EMPTY = {} @@ -33,412 +28,363 @@ local EMPTY = {} -- note content-length is not strictly hop-by-hop but we will be -- adjusting it here anyhow local hop_by_hop_headers = { - ["connection"] = true, - ["keep-alive"] = true, - ["proxy-authenticate"] = true, - ["proxy-authorization"] = true, - ["te"] = true, - ["trailers"] = true, - ["transfer-encoding"] = true, - ["upgrade"] = true, - ["content-length"] = true, + ["connection"] = true, + ["keep-alive"] = true, + ["proxy-authenticate"] = true, + ["proxy-authorization"] = true, + ["te"] = true, + ["trailers"] = true, + ["transfer-encoding"] = true, + ["upgrade"] = true, + ["content-length"] = true, } - local function overwritable_header(header) - local n_header = lower(header) + local n_header = lower(header) - return not hop_by_hop_headers[n_header] - and not ngx_re_match(n_header, "ratelimit-remaining") + return not hop_by_hop_headers[n_header] + and not ngx_re_match(n_header, "ratelimit-remaining") end - local function parse_directive_header(h) - if not h then - return EMPTY - end - - if type(h) == "table" then - h = concat(h, ", ") - end - - local t = {} - local res = tab_new(3, 0) - local iter = ngx_re_gmatch(h, "([^,]+)", "oj") - - local m = iter() - while m do - local _, err = ngx_re_match(m[0], [[^\s*([^=]+)(?:=(.+))?]], - "oj", nil, res) - if err then - kong.log.err(err) + if not h then + return EMPTY end - -- store the directive token as a numeric value if it looks like a number; - -- otherwise, store the string value. for directives without token, we just - -- set the key to true - t[lower(res[1])] = tonumber(res[2]) or res[2] or true + if type(h) == "table" then + h = concat(h, ", ") + end - m = iter() - end + local t = {} + local res = tab_new(3, 0) + local iter = ngx_re_gmatch(h, "([^,]+)", "oj") - return t -end + local m = iter() + while m do + local _, err = ngx_re_match(m[0], [[^\s*([^=]+)(?:=(.+))?]], + "oj", nil, res) + if err then + kong.log.err(err) + end + -- store the directive token as a numeric value if it looks like a number; + -- otherwise, store the string value. for directives without token, we just + -- set the key to true + t[lower(res[1])] = tonumber(res[2]) or res[2] or true -local function req_cc() - return parse_directive_header(ngx.var.http_cache_control) + m = iter() + end + + return t end +local function req_cc() + return parse_directive_header(ngx.var.http_cache_control) +end local function res_cc() - return parse_directive_header(ngx.var.sent_http_cache_control) + return parse_directive_header(ngx.var.sent_http_cache_control) end - local function resource_ttl(res_cc) - local max_age = res_cc["s-maxage"] or res_cc["max-age"] + local max_age = res_cc["s-maxage"] or res_cc["max-age"] - if not max_age then - local expires = ngx.var.sent_http_expires + if not max_age then + local expires = ngx.var.sent_http_expires - -- if multiple Expires headers are present, last one wins - if type(expires) == "table" then - expires = expires[#expires] - end + -- if multiple Expires headers are present, last one wins + if type(expires) == "table" then + expires = expires[#expires] + end - local exp_time = parse_http_time(tostring(expires)) - if exp_time then - max_age = exp_time - time() + local exp_time = parse_http_time(tostring(expires)) + if exp_time then + max_age = exp_time - time() + end end - end - return max_age and max(max_age, 0) or 0 + return max_age and max(max_age, 0) or 0 end - +-- Comprueba si la petición es cacheble local function cacheable_request(conf, cc) - -- TODO refactor these searches to O(1) - do - local method = kong.request.get_method() - local method_match = false - for i = 1, #conf.request_method do - if conf.request_method[i] == method then - method_match = true - break - end + do + local method = kong.request.get_method() + local method_match = false + for i = 1, #conf.request_method do + if conf.request_method[i] == method then + method_match = true + break + end + end + + if not method_match then + return false + end end - if not method_match then - return false + -- check for explicit disallow directives + if conf.cache_control and (cc["no-store"] or cc["no-cache"] or + ngx.var.authorization) then + return false end - end - - -- check for explicit disallow directives - -- TODO note that no-cache isnt quite accurate here - if conf.cache_control and (cc["no-store"] or cc["no-cache"] or - ngx.var.authorization) then - return false - end - return true + return true end - +-- Comprueba si la respuesta es cacheable local function cacheable_response(conf, cc) - -- TODO refactor these searches to O(1) - do - local status = kong.response.get_status() - local status_match = false - for i = 1, #conf.response_code do - if conf.response_code[i] == status then - status_match = true - break - end + do + local status = kong.response.get_status() + local status_match = false + + for i = 1, #conf.response_code do + if conf.response_code[i] == status then + status_match = true + break + end + end + + if not status_match then + return false + end end - if not status_match then - return false + do + local content_type = ngx.var.sent_http_content_type + + -- bail if we cannot examine this content type + if not content_type or type(content_type) == "table" or content_type == "" then + return false + end + + local content_match = false + for i = 1, #conf.content_type do + if conf.content_type[i] == content_type then + content_match = true + break + end + end + + if not content_match then + return false + end end - end - - do - local content_type = ngx.var.sent_http_content_type - -- bail if we cannot examine this content type - if not content_type or type(content_type) == "table" or - content_type == "" then - - return false - end - - local content_match = false - for i = 1, #conf.content_type do - if conf.content_type[i] == content_type then - content_match = true - break - end + if conf.cache_control and (cc["private"] or cc["no-store"] or cc["no-cache"]) + then + return false end - if not content_match then - return false + if conf.cache_control and resource_ttl(cc) <= 0 then + return false end - end - - if conf.cache_control and (cc["private"] or cc["no-store"] or cc["no-cache"]) - then - return false - end - if conf.cache_control and resource_ttl(cc) <= 0 then - return false - end - - return true + return true end -- indicate that we should attempt to cache the response to this request -- intentar guardar esta respuesta en caché -local function signal_cache_req(ctx, cache_key, cache_status) - ctx.proxy_cache_redis = { - cache_key = cache_key, - } +local function signal_cache_req(ctx, this_cache_key, cache_status) + ctx.proxy_cache_redis = { + cache_key = this_cache_key, + } - kong.response.set_header("X-Cache-Status", cache_status or "Miss") + kong.response.set_header("X-Cache-Status", cache_status or "Miss") end +-- Guardar un valor en el Store +local function store_cache_value(premature, conf, req_body, status, proxy_cache) + --kong.log.err("%% mira que ttl me viene de conf ", conf.cache_ttl) -local ProxyCacheHandler = { - VERSION = "1.3.1-1", - PRIORITY = 101, -} + local res = { + status = status, + headers = proxy_cache.res_headers, + body = proxy_cache.res_body, + body_len = #proxy_cache.res_body, + timestamp = time(), + ttl = proxy_cache.res_ttl, + version = CACHE_VERSION, + req_body = req_body, + } + local ttl = conf.storage_ttl or conf.cache_control and proxy_cache.res_ttl or conf.cache_ttl --- Executed upon every Nginx worker process’s startup. -function ProxyCacheHandler:init_worker() - -- catch notifications from other nodes that we purged a cache entry - -- only need one worker to handle purges like this - -- if/when we introduce inline LRU caching this needs to involve - -- worker events as well - local unpack = unpack - - kong.cluster_events:subscribe("proxy-cache:purge", function(data) - kong.log.err("handling purge of '", data, "'") - - local plugin_id, cache_key = unpack(utils.split(data, ":")) - local plugin, err = kong.db.plugins:select({ - id = plugin_id, - }) - if err then - kong.log.err("error in retrieving plugins: ", err) - return + -- Almaceno la respuesta y sus datos en caché + local ok, err = redis:store(conf, proxy_cache.cache_key, res, ttl) + if not ok then + kong.log.err(err) end +end - local strategy = require(STRATEGY_PATH)({ - strategy_name = plugin.config.strategy, - strategy_opts = plugin.config[plugin.config.strategy], - }) +local ProxyCacheHandler = { + VERSION = "1.0.0-0", + PRIORITY = 101, +} - if cache_key ~= "nil" then - local ok, err = strategy:purge(cache_key) - if not ok then - kong.log.err("failed to purge cache key '", cache_key, "': ", err) - return - end - else - local ok, err = strategy:flush(true) - if not ok then - kong.log.err("error in flushing cache data: ", err) - end - end - end) +-- Executed upon every Nginx worker process’s startup. +function ProxyCacheHandler:init_worker() end -- Executed for every request from a client and before it is being proxied to the upstream service. function ProxyCacheHandler:access(conf) - local cc = req_cc() - - -- if we know this request isnt cacheable, bail out - if not cacheable_request(conf, cc) then - kong.response.set_header("X-Cache-Status", "Bypass") - return - end - - -- construye la clave o hash de esta petición - local consumer = kong.client.get_consumer() - local route = kong.router.get_route() - local uri = ngx_re_sub(ngx.var.request, "\\?.*", "", "oj") - local cache_key = cache_key.build_cache_key(consumer and consumer.id, - route and route.id, - kong.request.get_method(), - uri, - kong.request.get_query(), - kong.request.get_headers(), - conf) - - kong.response.set_header("X-Cache-Key", cache_key) - - -- try to fetch the cached object from the computed cache key - local strategy = require(STRATEGY_PATH)({ - strategy_name = conf.strategy, - strategy_opts = conf[conf.strategy], - }) - - local ctx = kong.ctx.plugin - -- Intenta recoger la caché correspondiente a esta key - local res, err = strategy:fetch(cache_key) - -- Si obtengo un error de que no consigo obtener la cache - if err == "request object not in cache" then -- TODO make this a utils enum err - - -- this request wasn't found in the data store, but the client only wanted - -- cache data. see https://tools.ietf.org/html/rfc7234#section-5.2.1.7 - if conf.cache_control and cc["only-if-cached"] then - return kong.response.exit(ngx.HTTP_GATEWAY_TIMEOUT) - end + kong.ctx.shared.plugin_configuration = conf + + local cc = req_cc() - ctx.req_body = kong.request.get_raw_body() - - -- this request is cacheable but wasn't found in the data store - -- make a note that we should store it in cache later, - -- and pass the request upstream - return signal_cache_req(ctx, cache_key) - - elseif err then - kong.log.err(err) - return - end - - -- Si la versión de los datos cacheados no es la misma que la actual, purgo (para evitar errores) - if res.version ~= CACHE_VERSION then - kong.log.notice("cache format mismatch, purging ", cache_key) - strategy:purge(cache_key) - return signal_cache_req(ctx, cache_key, "Bypass") - end - - -- figure out if the client will accept our cache value - if conf.cache_control then - if cc["max-age"] and time() - res.timestamp > cc["max-age"] then - return signal_cache_req(ctx, cache_key, "Refresh") + -- if we know this request isnt cacheable, bail out + if not cacheable_request(conf, cc) then + kong.response.set_header("X-Cache-Status", "Bypass") + return end - if cc["max-stale"] and time() - res.timestamp - res.ttl > cc["max-stale"] - then - return signal_cache_req(ctx, cache_key, "Refresh") + -- construye la clave o hash de esta petición + local consumer = kong.client.get_consumer() + local route = kong.router.get_route() + local uri = ngx_re_sub(ngx.var.request, "\\?.*", "", "oj") + local cache_key = cache_key.build_cache_key(consumer and consumer.id, + route and route.id, + kong.request.get_method(), + uri, + kong.request.get_query(), + kong.request.get_headers(), + conf) + + kong.response.set_header("X-Cache-Key", cache_key) + + -- try to fetch the cached object from the computed cache key + local ctx = kong.ctx.plugin + -- Intenta recoger la caché correspondiente a esta key + local res, err = redis:fetch(conf, cache_key) + -- Si obtengo un error de que no consigo obtener la cache + if err == "request object not in cache" then + + -- this request wasn't found in the data store, but the client only wanted + -- cache data. see https://tools.ietf.org/html/rfc7234#section-5.2.1.7 + if conf.cache_control and cc["only-if-cached"] then + return kong.response.exit(ngx.HTTP_GATEWAY_TIMEOUT) + end + + ctx.req_body = kong.request.get_raw_body() + + -- this request is cacheable but wasn't found in the data store + -- make a note that we should store it in cache later, + -- and pass the request upstream + return signal_cache_req(ctx, cache_key) + + elseif err then + kong.log.err(err) + return end - if cc["min-fresh"] and res.ttl - (time() - res.timestamp) < cc["min-fresh"] - then - return signal_cache_req(ctx, cache_key, "Refresh") + -- Si la versión de los datos cacheados no es la misma que la actual, purgo (para evitar errores) + if res.version ~= CACHE_VERSION then + kong.log.notice("cache format mismatch, purging ", cache_key) + redis:purge(conf, cache_key) + return signal_cache_req(ctx, cache_key, "Bypass") end - else - -- don't serve stale data; res may be stored for up to `conf.storage_ttl` secs - -- no servir datos obsoletos; se guardará res el número de segundos indicados en el ttl - if time() - res.timestamp > conf.cache_ttl then - return signal_cache_req(ctx, cache_key, "Refresh") + -- figure out if the client will accept our cache value + if conf.cache_control then + if cc["max-age"] and time() - res.timestamp > cc["max-age"] then + return signal_cache_req(ctx, cache_key, "Refresh") + end + + if cc["max-stale"] and time() - res.timestamp - res.ttl > cc["max-stale"] + then + return signal_cache_req(ctx, cache_key, "Refresh") + end + + if cc["min-fresh"] and res.ttl - (time() - res.timestamp) < cc["min-fresh"] + then + return signal_cache_req(ctx, cache_key, "Refresh") + end + + else + -- don't serve stale data; res may be stored for up to `conf.storage_ttl` secs but served only for conf.cache_ttl + -- no servir datos obsoletos; se guardará res los segundos indicados en conf.storage_ttl + -- pero sólo se sirven durante conf.cache_ttl + if time() - res.timestamp > conf.cache_ttl then + return signal_cache_req(ctx, cache_key, "Refresh") + end end - end - - -- we have cache data yo! - -- expose response data for logging plugins - local response_data = { - res = res, - req = { - body = res.req_body, - }, - server_addr = ngx.var.server_addr, - } - - kong.ctx.shared.proxy_cache_hit = response_data - - local nctx = ngx.ctx - nctx.proxy_cache_hit = response_data -- TODO: deprecated - nctx.KONG_PROXIED = true - - for k in pairs(res.headers) do - if not overwritable_header(k) then - res.headers[k] = nil + + -- we have cache data yo! + -- expose response data for logging plugins + local response_data = { + res = res, + req = { + body = res.req_body, + }, + server_addr = ngx.var.server_addr, + } + + kong.ctx.shared.proxy_cache_hit = response_data + + local nctx = ngx.ctx + nctx.proxy_cache_hit = response_data + nctx.KONG_PROXIED = true + + for k in pairs(res.headers) do + if not overwritable_header(k) then + res.headers[k] = nil + end end - end - res.headers["Age"] = floor(time() - res.timestamp) - res.headers["X-Cache-Status"] = "Hit" + res.headers["Age"] = floor(time() - res.timestamp) + res.headers["X-Cache-Status"] = "Hit" - return kong.response.exit(res.status, res.body, res.headers) + return kong.response.exit(res.status, res.body, res.headers) end -- Executed when all response headers bytes have been received from the upstream service. function ProxyCacheHandler:header_filter(conf) - local ctx = kong.ctx.plugin - local proxy_cache = ctx.proxy_cache_redis - -- don't look at our headers if - -- a) the request wasn't cacheable, or - -- b) the request was served from cache - if not proxy_cache then - return - end - - local cc = res_cc() - - -- if this is a cacheable request, gather the headers and mark it so - if cacheable_response(conf, cc) then - proxy_cache.res_headers = resp_get_headers(0, true) - proxy_cache.res_ttl = conf.cache_control and resource_ttl(cc) or conf.cache_ttl - - else - kong.response.set_header("X-Cache-Status", "Bypass") - ctx.proxy_cache_redis = nil - end - - -- TODO handle Vary header + local ctx = kong.ctx.plugin + local proxy_cache = ctx.proxy_cache_redis + -- don't look at our headers if + -- a) the request wasn't cacheable, or + -- b) the request was served from cache + if not proxy_cache then + return + end + + local cc = res_cc() + + -- if this is a cacheable request, gather the headers and mark it so + if cacheable_response(conf, cc) then + proxy_cache.res_headers = resp_get_headers(0, true) + proxy_cache.res_ttl = conf.cache_control and resource_ttl(cc) or conf.cache_ttl + else + kong.response.set_header("X-Cache-Status", "Bypass") + ctx.proxy_cache_redis = nil + end + + -- TODO handle Vary header end -- Executed for each chunk of the response body received from the upstream service. Since the response is streamed back to the client, -- it can exceed the buffer size and be streamed chunk by chunk. hence this method can be called multiple times if the response is large. function ProxyCacheHandler:body_filter(conf) - local ctx = kong.ctx.plugin - local proxy_cache = ctx.proxy_cache_redis - if not proxy_cache then - return - end - - local chunk = ngx.arg[1] - local eof = ngx.arg[2] - - proxy_cache.res_body = (proxy_cache.res_body or "") .. (chunk or "") + local ctx = kong.ctx.plugin + local proxy_cache = ctx.proxy_cache_redis + if not proxy_cache then + return + end - if eof then - local strategy = require(STRATEGY_PATH)({ - strategy_name = conf.strategy, - strategy_opts = conf[conf.strategy], - }) + local chunk = ngx.arg[1] + local eof = ngx.arg[2] - local res = { - status = kong.response.get_status(), - headers = proxy_cache.res_headers, - body = proxy_cache.res_body, - body_len = #proxy_cache.res_body, - timestamp = time(), - ttl = proxy_cache.res_ttl, - version = CACHE_VERSION, - req_body = ctx.req_body, - } + proxy_cache.res_body = (proxy_cache.res_body or "") .. (chunk or "") - local ttl = conf.storage_ttl or conf.cache_control and proxy_cache.res_ttl or - conf.cache_ttl - - -- Almaceno la respuesta y sus datos en caché - local ok, err = strategy:store(proxy_cache.cache_key, res, ttl) - if not ok then - kong.log(err) + if eof then + -- Retardo el guardado ya que en body_filter no puedo hacer conexiones cosocket que son las necesarias para conectar a redis + ngx.timer.at(0, store_cache_value, conf, ctx.req_body, kong.response.get_status(), proxy_cache) end - end end - return ProxyCacheHandler diff --git a/kong/plugins/proxy-cache-redis/redis.lua b/kong/plugins/proxy-cache-redis/redis.lua new file mode 100644 index 0000000..ae86c23 --- /dev/null +++ b/kong/plugins/proxy-cache-redis/redis.lua @@ -0,0 +1,211 @@ +local cjson = require "cjson.safe" +local redis = require "resty.redis" + +local ngx = ngx +local type = type + +local function is_present(str) + return str and str ~= "" and str ~= null +end + +local _M = {} + +-- Conecta a redis +local function red_connect(opts) + local red, err_redis = redis:new() + + if err_redis then + kong.log.err("error connecting to Redis: ", err_redis); + return nil, err_redis + end + + local redis_opts = {} + -- use a special pool name only if database is set to non-zero + -- otherwise use the default pool name host:port + redis_opts.pool = opts.redis_database and opts.redis_host .. ":" .. opts.redis_port .. ":" .. opts.redis_database + + red:set_timeout(opts.redis_timeout) + + -- conecto + local ok, err = red:connect(opts.redis_host, opts.redis_port, redis_opts) + if not ok then + kong.log.err("failed to connect to Redis: ", err) + return nil, err + end + + local times, err2 = red:get_reused_times() + if err2 then + kong.log.err("failed to get connect reused times: ", err2) + return nil, err + end + + if times == 0 then + if is_present(opts.redis_password) then + local ok3, err3 = red:auth(opts.redis_password) + if not ok3 then + kong.log.err("failed to auth Redis: ", err3) + return nil, err + end + end + + if opts.redis_database ~= 0 then + -- Only call select first time, since we know the connection is shared + -- between instances that use the same redis database + local ok4, err4 = red:select(opts.redis_database) + if not ok4 then + kong.log.err("failed to change Redis database: ", err4) + return nil, err + end + end + end + + return red +end + +-- Obtiene un dato de Redis +function _M:fetch(conf, key) + local red, err_redis = red_connect(conf) + + -- Compruebo si he conectado a Redis bien + if not red then + kong.log.err("failed to get the Redis connection: ", err_redis) + return nil, "there is no Redis connection established" + end + + if type(key) ~= "string" then + return nil, "key must be a string" + end + + -- retrieve object from shared dict + local req_json, err = red:get(key) + if req_json == ngx.null then + if not err then + -- devuelvo nulo pero diciendo que no está en la caché, no que haya habido error realmente + -- habrá que guardar la respuesta entonces + return nil, "request object not in cache" + else + return nil, err + end + end + + local ok, err2 = red:set_keepalive(10000, 100) + if not ok then + kong.log.err("failed to set Redis keepalive: ", err2) + return nil, err2 + end + + -- decode object from JSON to table + local req_obj = cjson.decode(req_json) + + if not req_obj then + return nil, "could not decode request object" + end + + return req_obj +end + +-- Guarda un dato en Redis +function _M:store(conf, key, req_obj, req_ttl) + local red, err_redis = red_connect(conf) + + -- Compruebo si he conectado a Redis bien + if not red then + kong.log.err("failed to get the Redis connection: ", err_redis) + return nil, "there is no Redis connection established" + end + + local ttl = req_ttl or conf.cache_ttl + + if type(key) ~= "string" then + return nil, "key must be a string" + end + + -- encode request table representation as JSON + local req_json = cjson.encode(req_obj) + if not req_json then + return nil, "could not encode request object" + end + + -- Hago efectivo el guardado + -- inicio la transacción + red:init_pipeline() + -- guardo + red:set(key, req_json) + -- TTL + red:expire(key, ttl) + + -- ejecuto la transacción + local _, err = red:commit_pipeline() + if err then + kong.log.err("failed to commit the cache value to Redis: ", err) + return nil, err + end + + -- keepalive de la conexión: max_timeout, connection pool + local ok, err2 = red:set_keepalive(10000, 100) + if not ok then + kong.log.err("failed to set Redis keepalive: ", err2) + return nil, err2 + end + + return true and req_json or nil, err +end + + +-- Elimina una clave +function _M:purge(conf, key) + local red, err_redis = red_connect(conf) + + -- Compruebo si he conectado a Redis bien + if not red then + kong.log.err("failed to get the Redis connection: ", err_redis) + return nil, "there is no Redis connection established" + end + + if type(key) ~= "string" then + return nil, "key must be a string" + end + + -- borro entrada de redis + local deleted, err = red:del(key) + if err then + kong.log.err("failed to delete the key from Redis: ", err) + return nil, err + end + + local ok, err2 = red:set_keepalive(10000, 100) + if not ok then + kong.log.err("failed to set Redis keepalive: ", err2) + return nil, err2 + end + + return true +end + +-- Elimina todas las entradas de la base de datos +function _M:flush(conf) + local red, err_redis = red_connect(conf) + + -- Compruebo si he conectado a Redis bien + if not red then + kong.log.err("failed to get the Redis connection: ", err_redis) + return nil, "there is no Redis connection established" + end + + -- aquí borro toda la cache de redis de forma asíncrona + local flushed, err = red:flushdb("async") + if err then + kong.log.err("failed to flush the database from Redis: ", err) + return nil, err + end + + local ok, err2 = red:set_keepalive(10000, 100) + if not ok then + kong.log.err("failed to set Redis keepalive: ", err2) + return nil, err2 + end + + return true +end + +return _M diff --git a/kong/plugins/proxy-cache-redis/schema.lua b/kong/plugins/proxy-cache-redis/schema.lua index 25d53d4..1313858 100644 --- a/kong/plugins/proxy-cache-redis/schema.lua +++ b/kong/plugins/proxy-cache-redis/schema.lua @@ -1,124 +1,80 @@ -local typedefs = require "kong.db.schema.typedefs" - - -local strategies = require "kong.plugins.proxy-cache-redis.strategies" - - -local ngx = ngx - - -local function check_shdict(name) - if not ngx.shared[name] then - return false, "missing shared dict '" .. name .. "'" - end - - return true -end - - return { - name = "proxy-cache", - fields = { - { config = { - type = "record", - fields = { - { response_code = { - type = "array", - default = { 200, 301, 404 }, - elements = { type = "integer", between = {100, 900} }, - len_min = 1, - required = true, - }}, - { request_method = { - type = "array", - default = { "GET", "HEAD" }, - elements = { - type = "string", - one_of = { "HEAD", "GET", "POST", "PATCH", "PUT" }, - }, - required = true - }}, - { content_type = { - type = "array", - default = { "text/plain","application/json" }, - elements = { type = "string" }, - required = true, - }}, - { cache_ttl = { - type = "integer", - default = 300, - gt = 0, - }}, - { strategy = { - type = "string", - one_of = strategies.STRATEGY_TYPES, - required = true, - }}, - { cache_control = { - type = "boolean", - default = false, - required = true, - }}, - { storage_ttl = { - type = "integer", - }}, - { memory = { - type = "record", - fields = { - { dictionary_name = { - type = "string", - required = true, - default = "kong_db_cache", - }}, - }, - }}, - { vary_query_params = { - type = "array", - elements = { type = "string" }, - }}, - { vary_headers = { - type = "array", - elements = { type = "string" }, - }}, - { redis = { + name = "proxy-cache-redis", + fields = { + { config = { type = "record", fields = { - { host = typedefs.host }, - { port = typedefs.port({ default = 6379 }), }, - { password = { type = "string", len_min = 0 }, }, - { timeout = { type = "number", default = 2000, }, }, - { database = { type = "integer", default = 0 }, }, + { response_code = { + type = "array", + default = { 200, 301, 404 }, + elements = { type = "integer", between = { 100, 900 } }, + len_min = 1, + required = true, + } }, + { request_method = { + type = "array", + default = { "GET", "HEAD" }, + elements = { + type = "string", + one_of = { "HEAD", "GET", "POST", "PATCH", "PUT" }, + }, + required = true + } }, + { content_type = { + type = "array", + default = { "text/plain", "application/json" }, + elements = { type = "string" }, + required = true, + } }, + { cache_ttl = { + type = "integer", + default = 300, + required = true, + gt = 0, + } }, + { cache_control = { + type = "boolean", + default = false, + required = true, + } }, + { storage_ttl = { + type = "integer", + gt = 0, + } }, + { vary_query_params = { + type = "array", + elements = { type = "string" }, + } }, + { vary_headers = { + type = "array", + elements = { type = "string" }, + } }, + { redis_host = { + type = "string", + required = true, + } }, + { redis_port = { + between = { 0, 65535 }, + type = "integer", + default = 6379, + } }, + { redis_password = { + type = "string", + len_min = 0, + } }, + { redis_timeout = { + type = "number", + default = 2000, + } }, + { redis_database = { + type = "integer", + default = 0, + } }, }, - }}, + } }, - } }, - }, - entity_checks = { - { custom_entity_check = { - field_sources = { "config" }, - fn = function(entity) - local config = entity.config - - if config.strategy == "memory" then - local ok, err = check_shdict(config.memory.dictionary_name) - if not ok then - return nil, err - end - - end - - return true - end - }}, - { conditional = { - if_field = "config.strategy", if_match = { eq = "redis" }, - then_field = "config.redis.host", then_match = { required = true }, - } }, - { conditional = { - if_field = "config.strategy", if_match = { eq = "redis" }, - then_field = "config.redis.port", then_match = { required = true }, - } }, - }, + entity_checks = { + }, } diff --git a/kong/plugins/proxy-cache-redis/strategies/init.lua b/kong/plugins/proxy-cache-redis/strategies/init.lua deleted file mode 100644 index a9453ed..0000000 --- a/kong/plugins/proxy-cache-redis/strategies/init.lua +++ /dev/null @@ -1,27 +0,0 @@ -local require = require -local setmetatable = setmetatable - - -local _M = {} - -_M.STRATEGY_TYPES = { - "memory", - "redis", -} - --- strategies that store cache data only on the node, instead of --- cluster-wide. this is typically used to handle purge notifications -_M.LOCAL_DATA_STRATEGIES = { - memory = true, - [1] = "memory", -} - -local function require_strategy(name) - return require("kong.plugins.proxy-cache-redis.strategies." .. name) -end - -return setmetatable(_M, { - __call = function(_, opts) - return require_strategy(opts.strategy_name).new(opts.strategy_opts) - end -}) diff --git a/kong/plugins/proxy-cache-redis/strategies/memory.lua b/kong/plugins/proxy-cache-redis/strategies/memory.lua deleted file mode 100644 index 0182727..0000000 --- a/kong/plugins/proxy-cache-redis/strategies/memory.lua +++ /dev/null @@ -1,141 +0,0 @@ -local cjson = require "cjson.safe" - - -local ngx = ngx -local type = type -local time = ngx.time -local shared = ngx.shared -local setmetatable = setmetatable - - -local _M = {} - - ---- Create new memory strategy object --- @table opts Strategy options: contains 'dictionary_name' and 'ttl' fields -function _M.new(opts) - local dict = shared[opts.dictionary_name] - - local self = { - dict = dict, - opts = opts, - } - - return setmetatable(self, { - __index = _M, - }) -end - - ---- Store a new request entity in the shared memory --- @string key The request key --- @table req_obj The request object, represented as a table containing --- everything that needs to be cached --- @int[opt] ttl The TTL for the request; if nil, use default TTL specified --- at strategy instantiation time -function _M:store(key, req_obj, req_ttl) - local ttl = req_ttl or self.opts.ttl - - if type(key) ~= "string" then - return nil, "key must be a string" - end - - -- encode request table representation as JSON - local req_json = cjson.encode(req_obj) - if not req_json then - return nil, "could not encode request object" - end - - local succ, err = self.dict:set(key, req_json, ttl) - return succ and req_json or nil, err -end - - ---- Fetch a cached request --- @string key The request key --- @return Table representing the request -function _M:fetch(key) - if type(key) ~= "string" then - return nil, "key must be a string" - end - - -- retrieve object from shared dict - local req_json, err = self.dict:get(key) - if not req_json then - if not err then - return nil, "request object not in cache" - - else - return nil, err - end - end - - -- decode object from JSON to table - local req_obj = cjson.decode(req_json) - if not req_obj then - return nil, "could not decode request object" - end - - return req_obj -end - - ---- Purge an entry from the request cache --- @return true on success, nil plus error message otherwise -function _M:purge(key) - if type(key) ~= "string" then - return nil, "key must be a string" - end - - self.dict:delete(key) - return true -end - - ---- Reset TTL for a cached request -function _M:touch(key, req_ttl, timestamp) - if type(key) ~= "string" then - return nil, "key must be a string" - end - - -- check if entry actually exists - local req_json, err = self.dict:get(key) - if not req_json then - if not err then - return nil, "request object not in cache" - - else - return nil, err - end - end - - -- decode object from JSON to table - local req_obj = cjson.decode(req_json) - if not req_json then - return nil, "could not decode request object" - end - - -- refresh timestamp field - req_obj.timestamp = timestamp or time() - - -- store it again to reset the TTL - return _M:store(key, req_obj, req_ttl) -end - - ---- Marks all entries as expired and remove them from the memory --- @param free_mem Boolean indicating whether to free the memory; if false, --- entries will only be marked as expired --- @return true on success, nil plus error message otherwise -function _M:flush(free_mem) - -- mark all items as expired - self.dict:flush_all() - -- flush items from memory - if free_mem then - self.dict:flush_expired() - end - - return true -end - -return _M diff --git a/kong/plugins/proxy-cache-redis/strategies/redis.lua b/kong/plugins/proxy-cache-redis/strategies/redis.lua deleted file mode 100644 index b11a115..0000000 --- a/kong/plugins/proxy-cache-redis/strategies/redis.lua +++ /dev/null @@ -1,276 +0,0 @@ -local cjson = require "cjson.safe" -local redis = require "resty.redis" - -local ngx = ngx -local type = type -local time = ngx.time -local setmetatable = setmetatable - - -local _M = {} - --- TODO aquí creo la conexión a Redis ---- Create new memory strategy object --- @table opts Strategy options: contains las variables de redis -function _M.new(opts) - local red = redis:new() - local redis_opts = {} - - red:set_timeout(opts.timeout) - - -- use a special pool name only if database is set to non-zero - -- otherwise use the default pool name host:port - redis_opts.pool = opts.database and - opts.host .. ":" .. opts.port .. - ":" .. opts.database - - -- conecto - local ok, err = red:connect(opts.host, opts.port, redis_opts) - if not ok then - kong.log.err("failed to connect to Redis: ", err) - --return nil, err - return setmetatable({red = nil, opts = opts, err = err}, {__index = _M,}) - end - - local times, err2 = red:get_reused_times() - if err2 then - kong.log.err("failed to get connect reused times: ", err2) - --return nil, err - return setmetatable({red = nil, opts = opts, err = err2}, {__index = _M,}) - end - - if times == 0 then - if is_present(opts.password) then - local ok3, err3 = red:auth(opts.password) - if not ok3 then - kong.log.err("failed to auth Redis: ", err3) - --return nil, err - return setmetatable({red = nil, opts = opts, err = err3}, {__index = _M,}) - end - end - - if opts.database ~= 0 then - -- Only call select first time, since we know the connection is shared - -- between instances that use the same redis database - local ok4, err4 = red:select(opts.database) - if not ok4 then - kong.log.err("failed to change Redis database: ", err4) - --return nil, err - return setmetatable({red = nil, opts = opts, err = err4}, {__index = _M,}) - end - end - end - - local self = { red = red, opts = opts, err = nil, } - - return setmetatable(self, {__index = _M,}) -end - - ---- Store a new request entity in Redis --- @string key The request key --- @table req_obj The request object, represented as a table containing --- everything that needs to be cached --- @int[opt] ttl The TTL for the request; if nil, use default TTL specified --- at strategy instantiation time -function _M:store(key, req_obj, req_ttl) - local ttl = req_ttl or self.opts.ttl - local red = self.red - - -- Compruebo si he conectado a Redis bien - if not red then - return nil, "there is no Redis connection established" - end - - if type(key) ~= "string" then - return nil, "key must be a string" - end - - -- encode request table representation as JSON - local req_json = cjson.encode(req_obj) - if not req_json then - return nil, "could not encode request object" - end - - -- TODO aquí guardo en redis - -- Hago efectivo el guardado - --local succ, err = self.dict:set(key, req_json, ttl) - -- inicio la transacción - red:init_pipeline() - -- guardo - red:set(key, req_json) - -- TTL - red:expire(key, ttl) - - -- ejecuto la transacción - local _, err = red:commit_pipeline() - if err then - kong.log.err("failed to commit the cache value to Redis: ", err) - return nil, err - end - - -- keepalive de la conexión: max_timeout, connection pool - local ok, err2 = red:set_keepalive(10000, 100) - if not ok then - kong.log.err("failed to set Redis keepalive: ", err2) - return nil, err2 - end - - return true and req_json or nil, err -end - - ---- Fetch a cached request --- @string key The request key --- @return Table representing the request -function _M:fetch(key) - local red = self.red - - -- Compruebo si he conectado a Redis bien - if not red then - return nil, "there is no Redis connection established" - end - - if type(key) ~= "string" then - return nil, "key must be a string" - end - - -- TODO aquí obtengo entrada desde redis - -- retrieve object from shared dict - --local req_json, err = self.dict:get(key) - local req_json, err = red:get(key) - if not req_json then - if not err then - -- devuelvo nulo pero diciendo que no está en la caché, no que haya habido error realmente - return nil, "request object not in cache" - else - return nil, err - end - end - - local ok, err2 = red:set_keepalive(10000, 100) - if not ok then - kong.log.err("failed to set Redis keepalive: ", err2) - end - - -- decode object from JSON to table - local req_obj = cjson.decode(req_json) - if not req_json then - return nil, "could not decode request object" - end - - return req_obj -end - - ---- Purge an entry from the request cache (borra una entrada) --- @return true on success, nil plus error message otherwise -function _M:purge(key) - local red = self.red - - -- Compruebo si he conectado a Redis bien - if not red then - return nil, "there is no Redis connection established" - end - - if type(key) ~= "string" then - return nil, "key must be a string" - end - - -- TODO borro entrada de redis - --self.dict:delete(key) - local deleted, err = red:del(key) - if err then - kong.log.err("failed to delete the key from Redis: ", err) - return nil, err - end - - local ok, err2 = red:set_keepalive(10000, 100) - if not ok then - kong.log.err("failed to set Redis keepalive: ", err2) - end - - return true -end - - ---- Reset TTL for a cached request -function _M:touch(key, req_ttl, timestamp) - local red = self.red - - -- Compruebo si he conectado a Redis bien - if not red then - return nil, "there is no Redis connection established" - end - - if type(key) ~= "string" then - return nil, "key must be a string" - end - - -- TODO cojo entrada de redis - -- check if entry actually exists - --local req_json, err = self.dict:get(key) - local req_json, err = red:get(key) - if not req_json then - if not err then - return nil, "request object not in cache" - - else - return nil, err - end - end - - local ok, err2 = red:set_keepalive(10000, 100) - if not ok then - kong.log.err("failed to set Redis keepalive: ", err2) - end - - -- decode object from JSON to table - local req_obj = cjson.decode(req_json) - if not req_obj then - return nil, "could not decode request object" - end - - -- refresh timestamp field - req_obj.timestamp = timestamp or time() - - -- store it again to reset the TTL - return _M:store(key, req_obj, req_ttl) -end - - ---- Marks all entries as expired and remove them from the memory --- @param free_mem Boolean indicating whether to free the memory; if false, --- entries will only be marked as expired --- @return true on success, nil plus error message otherwise -function _M:flush(free_mem) - local red = self.red - - -- Compruebo si he conectado a Redis bien - if not red then - return nil, "there is no Redis connection established" - end - - local flushed, err = red:flush("async") - if err then - kong.log.err("failed to flush the database from Redis: ", err) - return nil, err - end - - -- TODO aquí borro toda la cache de redis - -- mark all items as expired - --self.dict:flush_all() - -- flush items from memory - --if free_mem then - -- self.dict:flush_expired() - --end - - local ok, err2 = red:set_keepalive(10000, 100) - if not ok then - kong.log.err("failed to set Redis keepalive: ", err2) - end - - return true -end - -return _M diff --git a/spec/01-schema_spec.lua b/spec/01-schema_spec.lua index d11cb29..d458f1c 100644 --- a/spec/01-schema_spec.lua +++ b/spec/01-schema_spec.lua @@ -1,7 +1,7 @@ local proxy_cache_schema = require "kong.plugins.proxy-cache-redis.schema" local v = require("spec.helpers").validate_plugin_config_schema -describe("proxy-cache schema", function() +describe("proxy-cache-redis schema", function() it("accepts a minimal config", function() local entity, err = v({ strategy = "memory", diff --git a/spec/02-access_spec.lua b/spec/02-access_spec.lua index 5dbfb11..5966222 100644 --- a/spec/02-access_spec.lua +++ b/spec/02-access_spec.lua @@ -14,7 +14,7 @@ local strategies = require("kong.plugins.proxy-cache-redis.strategies") do local policy = "memory" - describe("proxy-cache access with policy: " .. policy, function() + describe("proxy-cache-redis access with policy: " .. policy, function() local client, admin_client --local cache_key local policy_config = { dictionary_name = "kong", } @@ -26,7 +26,7 @@ do setup(function() - local bp = helpers.get_db_utils(nil, nil, {"proxy-cache"}) + local bp = helpers.get_db_utils(nil, nil, {"proxy-cache-redis"}) strategy:flush(true) local route1 = assert(bp.routes:insert { @@ -119,7 +119,7 @@ do }) assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route1.id }, config = { strategy = policy, @@ -129,7 +129,7 @@ do }) assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route2.id }, config = { strategy = policy, @@ -140,7 +140,7 @@ do -- global plugin for routes 3 and 4 assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", config = { strategy = policy, content_type = { "text/plain", "application/json" }, @@ -149,7 +149,7 @@ do }) assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route5.id }, config = { strategy = policy, @@ -159,7 +159,7 @@ do }) assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route6.id }, config = { strategy = policy, @@ -170,7 +170,7 @@ do }) assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route7.id }, config = { strategy = policy, @@ -181,7 +181,7 @@ do }) assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route8.id }, config = { strategy = policy, @@ -193,7 +193,7 @@ do }) assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route9.id }, config = { strategy = policy, @@ -205,7 +205,7 @@ do }) assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route10.id }, config = { strategy = policy, @@ -217,7 +217,7 @@ do }) assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route11.id }, config = { strategy = policy, @@ -230,7 +230,7 @@ do }) assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route12.id }, config = { strategy = policy, @@ -243,7 +243,7 @@ do }) assert(helpers.start_kong({ - plugins = "bundled,proxy-cache", + plugins = "bundled,proxy-cache-redis", nginx_conf = "spec/fixtures/custom_nginx.template", })) end) diff --git a/spec/03-api_spec.lua b/spec/03-api_spec.lua index 684e773..644c1d1 100644 --- a/spec/03-api_spec.lua +++ b/spec/03-api_spec.lua @@ -2,18 +2,18 @@ local helpers = require "spec.helpers" local cjson = require "cjson" -describe("Plugin: proxy-cache", function() +describe("Plugin: proxy-cache-redis", function() local bp local proxy_client, admin_client, cache_key, plugin1, route1 setup(function() - bp = helpers.get_db_utils(nil, nil, {"proxy-cache"}) + bp = helpers.get_db_utils(nil, nil, {"proxy-cache-redis"}) route1 = assert(bp.routes:insert { hosts = { "route-1.com" }, }) plugin1 = assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route1.id }, config = { strategy = "memory", @@ -36,7 +36,7 @@ describe("Plugin: proxy-cache", function() }) assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route2.id }, config = { strategy = "memory", @@ -48,7 +48,7 @@ describe("Plugin: proxy-cache", function() }) assert(helpers.start_kong({ - plugins = "proxy-cache,request-transformer", + plugins = "proxy-cache-redis,request-transformer", nginx_conf = "spec/fixtures/custom_nginx.template", })) @@ -75,7 +75,7 @@ describe("Plugin: proxy-cache", function() method = "POST", path = "/plugins", body = { - name = "proxy-cache", + name = "proxy-cache-redis", config = { strategy = "memory", memory = { @@ -104,7 +104,7 @@ describe("Plugin: proxy-cache", function() method = "POST", path = "/plugins", body = { - name = "proxy-cache", + name = "proxy-cache-redis", config = { strategy = "memory", memory = { @@ -129,7 +129,7 @@ describe("Plugin: proxy-cache", function() method = "POST", path = "/plugins", body = { - name = "proxy-cache", + name = "proxy-cache-redis", config = { strategy = "memory", memory = { @@ -154,7 +154,7 @@ describe("Plugin: proxy-cache", function() method = "POST", path = "/plugins", body = { - name = "proxy-cache", + name = "proxy-cache-redis", config = { strategy = "memory", memory = { @@ -180,7 +180,7 @@ describe("Plugin: proxy-cache", function() method = "POST", path = "/plugins", body = { - name = "proxy-cache", + name = "proxy-cache-redis", config = { strategy = "memory", memory = { @@ -238,7 +238,7 @@ describe("Plugin: proxy-cache", function() -- delete the key res = assert(admin_client:send { method = "DELETE", - path = "/proxy-cache/" .. plugin1.id .. "/caches/" .. cache_key, + path = "/proxy-cache-redis/" .. plugin1.id .. "/caches/" .. cache_key, }) assert.res_status(204, res) @@ -256,7 +256,7 @@ describe("Plugin: proxy-cache", function() -- delete directly, having to look up all proxy-cache instances res = assert(admin_client:send { method = "DELETE", - path = "/proxy-cache/" .. cache_key, + path = "/proxy-cache-redis/" .. cache_key, }) assert.res_status(204, res) @@ -317,7 +317,7 @@ describe("Plugin: proxy-cache", function() -- delete all the cache keys res = assert(admin_client:send { method = "DELETE", - path = "/proxy-cache", + path = "/proxy-cache-redis", }) assert.res_status(204, res) @@ -347,13 +347,13 @@ describe("Plugin: proxy-cache", function() -- delete all the cache keys local res = assert(admin_client:send { method = "DELETE", - path = "/proxy-cache", + path = "/proxy-cache-redis", }) assert.res_status(204, res) local res = assert(admin_client:send { method = "DELETE", - path = "/proxy-cache/" .. plugin1.id .. "/caches/" .. "123", + path = "/proxy-cache-redis/" .. plugin1.id .. "/caches/" .. "123", }) assert.res_status(404, res) end) @@ -361,13 +361,13 @@ describe("Plugin: proxy-cache", function() -- delete all the cache keys local res = assert(admin_client:send { method = "DELETE", - path = "/proxy-cache", + path = "/proxy-cache-redis", }) assert.res_status(204, res) local res = assert(admin_client:send { method = "DELETE", - path = "/proxy-cache/" .. route1.id .. "/caches/" .. "123", + path = "/proxy-cache-redis/" .. route1.id .. "/caches/" .. "123", }) assert.res_status(404, res) end) @@ -377,20 +377,20 @@ describe("Plugin: proxy-cache", function() -- delete all the cache keys local res = assert(admin_client:send { method = "DELETE", - path = "/proxy-cache", + path = "/proxy-cache-redis", }) assert.res_status(204, res) local res = assert(admin_client:send { method = "GET", - path = "/proxy-cache/" .. plugin1.id .. "/caches/" .. cache_key, + path = "/proxy-cache-redis/" .. plugin1.id .. "/caches/" .. cache_key, }) assert.res_status(404, res) -- attempt to list an entry directly via cache key local res = assert(admin_client:send { method = "GET", - path = "/proxy-cache/" .. cache_key, + path = "/proxy-cache-redis/" .. cache_key, }) assert.res_status(404, res) end) @@ -407,7 +407,7 @@ describe("Plugin: proxy-cache", function() local res = assert(admin_client:send { method = "GET", - path = "/proxy-cache/" .. plugin1.id .. "/caches/" .. cache_key, + path = "/proxy-cache-redis/" .. plugin1.id .. "/caches/" .. cache_key, }) local body = assert.res_status(200, res) local json_body = cjson.decode(body) @@ -416,7 +416,7 @@ describe("Plugin: proxy-cache", function() -- list an entry directly via cache key local res = assert(admin_client:send { method = "GET", - path = "/proxy-cache/" .. cache_key, + path = "/proxy-cache-redis/" .. cache_key, }) local body = assert.res_status(200, res) local json_body = cjson.decode(body) diff --git a/spec/04-invalidations_spec.lua b/spec/04-invalidations_spec.lua index ed2085b..50fcaff 100644 --- a/spec/04-invalidations_spec.lua +++ b/spec/04-invalidations_spec.lua @@ -18,7 +18,7 @@ describe("proxy-cache invalidations via: " .. strategy, function() local bp setup(function() - bp = helpers.get_db_utils(strategy, nil, {"proxy-cache"}) + bp = helpers.get_db_utils(strategy, nil, {"proxy-cache-redis"}) route1 = assert(bp.routes:insert { hosts = { "route-1.com" }, @@ -29,7 +29,7 @@ describe("proxy-cache invalidations via: " .. strategy, function() }) plugin1 = assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route1.id }, config = { strategy = "memory", @@ -41,7 +41,7 @@ describe("proxy-cache invalidations via: " .. strategy, function() }) plugin2 = assert(bp.plugins:insert { - name = "proxy-cache", + name = "proxy-cache-redis", route = { id = route2.id }, config = { strategy = "memory", @@ -66,7 +66,7 @@ describe("proxy-cache invalidations via: " .. strategy, function() admin_gui_ssl = false, db_update_frequency = POLL_INTERVAL, db_update_propagation = db_update_propagation, - plugins = "proxy-cache", + plugins = "proxy-cache-redis", nginx_conf = "spec/fixtures/custom_nginx.template", }) @@ -82,7 +82,7 @@ describe("proxy-cache invalidations via: " .. strategy, function() admin_gui_ssl = false, db_update_frequency = POLL_INTERVAL, db_update_propagation = db_update_propagation, - plugins = "proxy-cache", + plugins = "proxy-cache-redis", }) client_1 = helpers.http_client("127.0.0.1", 8000) @@ -190,7 +190,7 @@ describe("proxy-cache invalidations via: " .. strategy, function() -- now purge the entry local res = assert(admin_client_1:send { method = "DELETE", - path = "/proxy-cache/" .. plugin1.id .. "/caches/" .. cache_key, + path = "/proxy-cache-redis/" .. plugin1.id .. "/caches/" .. cache_key, }) assert.res_status(204, res) @@ -199,7 +199,7 @@ describe("proxy-cache invalidations via: " .. strategy, function() -- assert that the entity was purged from the second instance res = assert(admin_client_2:send { method = "GET", - path = "/proxy-cache/" .. plugin1.id .. "/caches/" .. cache_key, + path = "/proxy-cache-redis/" .. plugin1.id .. "/caches/" .. cache_key, }) res:read_body() return res.status == 404 @@ -232,7 +232,7 @@ describe("proxy-cache invalidations via: " .. strategy, function() -- now purge the entry res = assert(admin_client_1:send { method = "DELETE", - path = "/proxy-cache/" .. cache_key, + path = "/proxy-cache-redis/" .. cache_key, }) assert.res_status(204, res) @@ -244,7 +244,7 @@ describe("proxy-cache invalidations via: " .. strategy, function() -- assert that the entity was purged from the second instance res = assert(admin_client_2:send { method = "GET", - path = "/proxy-cache/" .. cache_key, + path = "/proxy-cache-redis/" .. cache_key, }) res:read_body() return res.status == 404 @@ -254,14 +254,14 @@ describe("proxy-cache invalidations via: " .. strategy, function() it("does not affect cache entries under other plugin instances", function() local res = assert(admin_client_1:send { method = "GET", - path = "/proxy-cache/" .. plugin2.id .. "/caches/" .. cache_key2, + path = "/proxy-cache-redis/" .. plugin2.id .. "/caches/" .. cache_key2, }) assert.res_status(200, res) res = assert(admin_client_2:send { method = "GET", - path = "/proxy-cache/" .. plugin2.id .. "/caches/" .. cache_key2, + path = "/proxy-cache-redis/" .. plugin2.id .. "/caches/" .. cache_key2, }) assert.res_status(200, res) @@ -271,16 +271,16 @@ describe("proxy-cache invalidations via: " .. strategy, function() do local res = assert(admin_client_1:send { method = "DELETE", - path = "/proxy-cache/", + path = "/proxy-cache-redis/", }) - + assert.res_status(204, res) end helpers.wait_until(function() local res = assert(admin_client_1:send { method = "GET", - path = "/proxy-cache/" .. plugin2.id .. "/caches/" .. cache_key2, + path = "/proxy-cache-redis/" .. plugin2.id .. "/caches/" .. cache_key2, }) res:read_body() return res.status == 404 @@ -289,7 +289,7 @@ describe("proxy-cache invalidations via: " .. strategy, function() helpers.wait_until(function() local res = assert(admin_client_2:send { method = "GET", - path = "/proxy-cache/" .. plugin2.id .. "/caches/" .. cache_key2, + path = "/proxy-cache-redis/" .. plugin2.id .. "/caches/" .. cache_key2, }) res:read_body() return res.status == 404 diff --git a/spec/kong_tests.conf b/spec/kong_tests.conf index 9475a8f..c742120 100644 --- a/spec/kong_tests.conf +++ b/spec/kong_tests.conf @@ -25,7 +25,7 @@ nginx_worker_processes = 1 nginx_optimizations = off plugins=bundled,dummy,rewriter -custom_plugins = proxy-cache +custom_plugins = proxy-cache-redis prefix = servroot log_level = debug