Hello I am rather new to Kong and am trying to write a plugin takes one part of a users request to a lambda function, and the other part to an s3 bucket (i would prefer to handle any data splitting and sorting within the lambda function, but those have a 6mb invokation limit and the requests contain pdfs > 6mb as base64 strings)
My idea so far was for the plugin to:
- send the first part (emailtext and filename) to a lambda which returns a uid
- use the uid as new filename for the pdf and store it in a s3 bucket
- lastly, return the uid
right now I try to get an kong gateway via docker running which does this.
my kong.yml:
_format_version: "3.0"
_transform: true
services:
- name: online-test # <just to check if connection is working
url:
- name: aws-ping-test # <just to check if connection is working
url:
- name: lambda-s3-service
url:
routes:
- name: my-online-test
service: online-test
paths:
- /online
- name: my-aws-ping-test
service: aws-ping-test
paths:
- /aws-ping
- name: my-lambda-route
paths:
- /lambda
- name: my-lambda-route_2
service: lambda-s3-service
paths:
- /lambda-2
plugins:
- name: aws-lambda
route: my-lambda-route
config:
aws_region: "regionname"
function_name: "lambdafunctionname"
invocation_type: "RequestResponse"
log_type: "Tail"
forward_request_body: true
forward_request_headers: true
forward_request_uri: true
- name: lambda_s3_handler
route: my-lambda-route_2
my docker-compose.yml:
version: '3.3'
services:
kong:
image: kong/kong-gateway:latest
container_name: kong
volumes:
- "./config:/usr/local/kong/declarative"
#- "./.aws:/usr/local/kong/declarative"
- "./plugins:/usr/local/share/lua/5.1/kong/plugins"
environment:
- KONG_DATABASE=off
- KONG_DECLARATIVE_CONFIG=/usr/local/kong/declarative/kong.yml
- KONG_PROXY_ACCESS_LOG=/dev/stdout
- KONG_ADMIN_ACCESS_LOG=/dev/stdout
- KONG_PROXY_ERROR_LOG=/dev/stderr
- KONG_ADMIN_ERROR_LOG=/dev/stderr
- KONG_ADMIN_LISTEN=0.0.0.0:8001, 0.0.0.0:8444 ssl
- KONG_LOG_LEVEL=debug
- KONG_PLUGINS=bundled, lambda_s3_handler
- KONG_ADMIN_GUI_URL=http://localhost:8002
- AWS_DEFAULT_REGION=regionname
- AWS_REGION=regionname
- AWS_SHARED_CREDENTIALS_FILE=/usr/local/kong/declarative/credentials
- AWS_PROFILE=profilename
ports:
- "8000:8000/tcp"
- "127.0.0.1:7990:8001/tcp"
- "8001:8001/tcp"
- "8443:8443/tcp"
- "127.0.0.1:8444:8444/tcp"
#- "127.0.0.1:8001:8001/tcp"
networks:
- learn-networking
networks:
learn-networking:
external: true
driver: bridge
my handler.lua (within \plugins\lambda_s3_handler\ of my docker project folder):
local cjson = require "cjson"
local http = require "resty.http"
local base64 = require "ngx.base64"
local LambdaS3Handler = {
PRIORITY = 1000,
VERSION = "1.0.0"
}
-- AWS credentials (ensure they are set in environment variables)
local AWS_REGION = os.getenv("AWS_REGION") or "regionname"
local LAMBDA_FUNCTION = os.getenv("AWS_LAMBDA_FUNCTION") or "lambdafunctionname"
local S3_BUCKET = os.getenv("AWS_S3_BUCKET") or "bucketname"
-- Function to invoke AWS Lambda
local function invoke_lambda(filename, email_text)
local function_name = LAMBDA_FUNCTION
local payload = { FileName = filename, EmailText = email_text}
local httpc = http.new()
local res, err = httpc:request_uri("https://lambda." .. AWS_REGION .. ".amazonaws/2015-03-31/functions/" .. function_name .. "/invocations", {
method = "POST",
body = cjson.encode(payload),
headers = {
["Content-Type"] = "application/json",
["Authorization"] = "AWS4-HMAC-SHA256 Credential=" .. os.getenv("AWS_ACCESS_KEY_ID"),
},
})
if not res then
return nil, "Failed to invoke Lambda: " .. (err or "Unknown error")
end
local lambda_result = cjson.decode(res.body)
return lambda_result.uuid
end
-- Function to upload file to S3
local function upload_to_s3(uuid, file_data)
local httpc = http.new()
local s3_url = "https://" .. S3_BUCKET .. ".s3." .. AWS_REGION .. ".amazonaws/" .. uuid .. ".pdf"
local res, err = httpc:request_uri(s3_url, {
method = "PUT",
body = file_data,
headers = {
["Content-Type"] = "application/pdf",
["x-amz-acl"] = "bucket-owner-full-control",
}
})
if not res then
return false, "Failed to upload file to S3: " .. (err or "Unknown error")
end
return true
end
-- Main Kong request handler
function LambdaS3Handler:access(conf)
-- Read JSON body from the request
ngx.req.read_body()
local body = ngx.req.get_body_data()
if not body then
return kong.response.exit(400, { message = "Invalid request, no body provided" })
end
local data = cjson.decode(body)
if not data.FileName or not data.EmailText or not data.Base64File then
return kong.response.exit(400, { message = "Missing required fields" })
end
-- Step 1: Invoke Lambda
local uuid, err = invoke_lambda(data.FileName, data.EmailText)
if not uuid then
return kong.response.exit(500, { message = "Lambda invocation failed: " .. err })
end
-- Step 2: Decode Base64 and upload to S3
local file_bytes = ngx.decode_base64(data.Base64File)
local success, upload_err = upload_to_s3(uuid, file_bytes)
if not success then
return kong.response.exit(500, { message = "S3 upload failed: " .. upload_err })
end
-- Step 3: Return UUID to user
return kong.response.exit(200, { uuid = uuid })
end
return LambdaS3Handler
I am pretty new to this and try to debug it with google and chatgpt, with rather modest success. right now my container keeps crashing with this error:
1#0: init_by_lua error: /usr/local/share/lua/5.1/kong/rbac/init.lua:17: module 'kong.plugins.oauth2.secret' not found:
kong | no field package.preload['kong.plugins.oauth2.secret']
kong | no file './kong/plugins/oauth2/secret.lua'
kong | no file './kong/plugins/oauth2/secret/init.lua'
has anyone an ideo on how to fix it, or things in general that gonna be a problem with the plugin? Thanks in advance.
Hello I am rather new to Kong and am trying to write a plugin takes one part of a users request to a lambda function, and the other part to an s3 bucket (i would prefer to handle any data splitting and sorting within the lambda function, but those have a 6mb invokation limit and the requests contain pdfs > 6mb as base64 strings)
My idea so far was for the plugin to:
- send the first part (emailtext and filename) to a lambda which returns a uid
- use the uid as new filename for the pdf and store it in a s3 bucket
- lastly, return the uid
right now I try to get an kong gateway via docker running which does this.
my kong.yml:
_format_version: "3.0"
_transform: true
services:
- name: online-test # <just to check if connection is working
url: https://httpstat.us/200
- name: aws-ping-test # <just to check if connection is working
url: https://lambda.eu-central-1.amazonaws
- name: lambda-s3-service
url: https://lambda.eu-central-1.amazonaws
routes:
- name: my-online-test
service: online-test
paths:
- /online
- name: my-aws-ping-test
service: aws-ping-test
paths:
- /aws-ping
- name: my-lambda-route
paths:
- /lambda
- name: my-lambda-route_2
service: lambda-s3-service
paths:
- /lambda-2
plugins:
- name: aws-lambda
route: my-lambda-route
config:
aws_region: "regionname"
function_name: "lambdafunctionname"
invocation_type: "RequestResponse"
log_type: "Tail"
forward_request_body: true
forward_request_headers: true
forward_request_uri: true
- name: lambda_s3_handler
route: my-lambda-route_2
my docker-compose.yml:
version: '3.3'
services:
kong:
image: kong/kong-gateway:latest
container_name: kong
volumes:
- "./config:/usr/local/kong/declarative"
#- "./.aws:/usr/local/kong/declarative"
- "./plugins:/usr/local/share/lua/5.1/kong/plugins"
environment:
- KONG_DATABASE=off
- KONG_DECLARATIVE_CONFIG=/usr/local/kong/declarative/kong.yml
- KONG_PROXY_ACCESS_LOG=/dev/stdout
- KONG_ADMIN_ACCESS_LOG=/dev/stdout
- KONG_PROXY_ERROR_LOG=/dev/stderr
- KONG_ADMIN_ERROR_LOG=/dev/stderr
- KONG_ADMIN_LISTEN=0.0.0.0:8001, 0.0.0.0:8444 ssl
- KONG_LOG_LEVEL=debug
- KONG_PLUGINS=bundled, lambda_s3_handler
- KONG_ADMIN_GUI_URL=http://localhost:8002
- AWS_DEFAULT_REGION=regionname
- AWS_REGION=regionname
- AWS_SHARED_CREDENTIALS_FILE=/usr/local/kong/declarative/credentials
- AWS_PROFILE=profilename
ports:
- "8000:8000/tcp"
- "127.0.0.1:7990:8001/tcp"
- "8001:8001/tcp"
- "8443:8443/tcp"
- "127.0.0.1:8444:8444/tcp"
#- "127.0.0.1:8001:8001/tcp"
networks:
- learn-networking
networks:
learn-networking:
external: true
driver: bridge
my handler.lua (within \plugins\lambda_s3_handler\ of my docker project folder):
local cjson = require "cjson"
local http = require "resty.http"
local base64 = require "ngx.base64"
local LambdaS3Handler = {
PRIORITY = 1000,
VERSION = "1.0.0"
}
-- AWS credentials (ensure they are set in environment variables)
local AWS_REGION = os.getenv("AWS_REGION") or "regionname"
local LAMBDA_FUNCTION = os.getenv("AWS_LAMBDA_FUNCTION") or "lambdafunctionname"
local S3_BUCKET = os.getenv("AWS_S3_BUCKET") or "bucketname"
-- Function to invoke AWS Lambda
local function invoke_lambda(filename, email_text)
local function_name = LAMBDA_FUNCTION
local payload = { FileName = filename, EmailText = email_text}
local httpc = http.new()
local res, err = httpc:request_uri("https://lambda." .. AWS_REGION .. ".amazonaws/2015-03-31/functions/" .. function_name .. "/invocations", {
method = "POST",
body = cjson.encode(payload),
headers = {
["Content-Type"] = "application/json",
["Authorization"] = "AWS4-HMAC-SHA256 Credential=" .. os.getenv("AWS_ACCESS_KEY_ID"),
},
})
if not res then
return nil, "Failed to invoke Lambda: " .. (err or "Unknown error")
end
local lambda_result = cjson.decode(res.body)
return lambda_result.uuid
end
-- Function to upload file to S3
local function upload_to_s3(uuid, file_data)
local httpc = http.new()
local s3_url = "https://" .. S3_BUCKET .. ".s3." .. AWS_REGION .. ".amazonaws/" .. uuid .. ".pdf"
local res, err = httpc:request_uri(s3_url, {
method = "PUT",
body = file_data,
headers = {
["Content-Type"] = "application/pdf",
["x-amz-acl"] = "bucket-owner-full-control",
}
})
if not res then
return false, "Failed to upload file to S3: " .. (err or "Unknown error")
end
return true
end
-- Main Kong request handler
function LambdaS3Handler:access(conf)
-- Read JSON body from the request
ngx.req.read_body()
local body = ngx.req.get_body_data()
if not body then
return kong.response.exit(400, { message = "Invalid request, no body provided" })
end
local data = cjson.decode(body)
if not data.FileName or not data.EmailText or not data.Base64File then
return kong.response.exit(400, { message = "Missing required fields" })
end
-- Step 1: Invoke Lambda
local uuid, err = invoke_lambda(data.FileName, data.EmailText)
if not uuid then
return kong.response.exit(500, { message = "Lambda invocation failed: " .. err })
end
-- Step 2: Decode Base64 and upload to S3
local file_bytes = ngx.decode_base64(data.Base64File)
local success, upload_err = upload_to_s3(uuid, file_bytes)
if not success then
return kong.response.exit(500, { message = "S3 upload failed: " .. upload_err })
end
-- Step 3: Return UUID to user
return kong.response.exit(200, { uuid = uuid })
end
return LambdaS3Handler
I am pretty new to this and try to debug it with google and chatgpt, with rather modest success. right now my container keeps crashing with this error:
1#0: init_by_lua error: /usr/local/share/lua/5.1/kong/rbac/init.lua:17: module 'kong.plugins.oauth2.secret' not found:
kong | no field package.preload['kong.plugins.oauth2.secret']
kong | no file './kong/plugins/oauth2/secret.lua'
kong | no file './kong/plugins/oauth2/secret/init.lua'
has anyone an ideo on how to fix it, or things in general that gonna be a problem with the plugin? Thanks in advance.
Share Improve this question asked Mar 3 at 13:25 MaxS.MaxS. 255 bronze badges 2- note: if i use "image: kong" instead of "image: kong/kong-gateway:latest" in the docker-compose.yml, the container crashes, being unable to locate any plugins. – MaxS. Commented Mar 3 at 15:09
- the error happened because this line: - "./plugins:/usr/local/share/lua/5.1/kong/plugins" does not add to the pluginsfolder in the kong container, but replaces it, resulting in all default plugins being gone. used this instead: - "./plugins/split_request:/usr/local/share/lua/5.1/kong/plugins/split_request" – MaxS. Commented Mar 12 at 9:07
1 Answer
Reset to default 0okay, by now, i figured out, by myself, how to get it working.
I was basically breaking down the plugin tasks into sub tasks and asking google and chatgpt to do the sub-tasks, and debugged whatever was not working.
Note: before this, I had pretty much no idea on how to kong, docker or lua
kong.yml has not changed.
docker-compose.yml, changing the plugin volume to "./plugins/split_request:/usr/local/share/lua/5.1/kong/plugins/split_request"
should get the container runningalso, plugin name changed to split_request
working handler.lua
kong.log.notice("\n### importing libraries ... ### \n")
local cjson = require "cjson"
local http = require "resty.http"
local AWS = require "resty.aws"
local config = require("resty.aws.config").global
kong.log.inspect(config.region)
kong.log.notice("\n### libraries imported ### \n")
local kong = kong
local ngx = ngx
local SplitRequestHandler = {
PRIORITY = 1000,
VERSION = "1.0.0"
}
function SplitRequestHandler:access(conf)
kong.log.notice("\n### start SplitRequestHandler function ### \n")
ngx.req.read_body()
--local body = ngx.req.get_body_data()
local body = ngx.req.get_body_data()
kong.log.notice("\n### SplitRequestHandler function, body: \n")
--kong.log.inspect(body)
kong.log.notice("\n###\n")
if not body then
-- If the body is too large, Nginx will write it to a temporary file instead
kong.log.notice("\n### no body found ### \n")
local body_file = ngx.req.get_body_file()
if body_file then
kong.log.notice("\n### loading body file ### \n")
local file = io.open(body_file, "rb")
if file then
body = file:read("*a") -- Read full content
file:close()
end
end
end
if not body then
kong.log.err("Request body is empty or too large to process.")
return kong.response.exit(400, { message = "Invalid request, body too large" })
end
kong.log.notice("\n### preparations done, get credentials ... ### \n")
local aws = AWS:new(config)
-- generate Lambda Session
local lambda = aws:Lambda(config)
-- generate S3 session
local s3 = aws:S3(config)
local data = cjson.decode(body)
kong.log.notice("\n### SplitRequestHandler function, data: \n")
--kong.log.inspect(data)
kong.log.notice("\n###\n")
if not data.description then
kong.log.notice("\n### no description found ### \n")
elseif not data.file then
kong.log.notice("\n### no file found ### \n")
end
--if not data.FileName or not data.description or not data.file then
if not data.description or not data.file then
return kong.response.exit(400, { message = "Missing required fields" })
end
-- Prepare payloads
kong.log.notice("\n### prepare payloads ### \n")
local echo_payload = {
description = data.description
}
kong.log.notice("\n### description: \n")
kong.log.inspect(data.description)
kong.log.notice("\n### file: \n")
--kong.log.inspect(data.file)
kong.log.notice("\n###\n")
-- Send requests
kong.log.notice("\n### send requests ### \n")
---- sending filename and description to lambda
local lambda_function_name = "your-lambda-function"
local lambda_payload = cjson.encode(echo_payload) -- JSON encode payload
kong.log.notice("\n### Invoking Lambda function: " .. lambda_function_name .. " ###\n")
local lambda_response, lambda_err = lambda:invoke({
FunctionName = lambda_function_name,
Payload = lambda_payload
})
if not lambda_response then
kong.response.exit(500, { message = "Failed to invoke Lambda", error = lambda_err })
end
kong.log.notice("\n### Lambda Response: ###\n")
kong.log.inspect(lambda_response)
kong.log.notice("\n### Lambda Response UUID: ###\n")
kong.log.inspect(lambda_response.body.body.uuid)
lambda_response_uuid = lambda_response.body.body.uuid
---- sending file to S3
kong.log.notice("\n### prepare to upload file to s3 ... ### \n")
-- Upload file to S3 with UUID as filename
local s3_bucket = "your-bucket-name"
local s3_path = s3_bucket .. "/" .. "bucket-folder/" .. lambda_response_uuid .. ".pdf"-- data.FileName
kong.log.notice("\n### decoding base64 string ... ### \n")
-- Decode Base64 file data
local decoded_file = ngx.decode_base64(data.file)
if not decoded_file then
kong.response.exit(400, { error = "Invalid Base64 file data" })
return
end
kong.log.debug("S3 Object Created:", s3)
kong.log.notice("\n### preparations done, uploading ... ### \n")
kong.log.notice("\n### Target Bucket: \n")
kong.log.inspect(s3_bucket)
kong.log.notice("\n### Key: \n")
kong.log.inspect(s3_path)
kong.log.notice("\n### Credentials: \n")
kong.log.inspect(config)
local success, upload_err = s3:putObject({
Bucket = s3_bucket,
Key = s3_path,
Body = decoded_file,
ContentType = "application/pdf"
})
if not success then
kong.response.exit(500, { error = "Failed to upload to S3: " .. s3_err })
return
end
kong.log.notice("\n### upload to s3 succeeded ... ### \n")
kong.log.inspect(success)
kong.log.notice("\n### ### \n")
-- Respond with S3 URL
local s3_url = "https://" .. s3_bucket .. ".s3.<your-region>.amazonaws/" .. s3_path
if lambda_err or upload_err then
return kong.response.exit(500, { message = "Request forwarding failed", error1 = lambda_err, error2 = upload_err })
end
kong.log.debug("\n### Received request body (length: " .. #body .. ")")
-- Return combined response
--------------------------------------------------------------
kong.log.notice("\n### return combined response ### \n")
return kong.response.exit(200, {
--echo = cjson.decode(echo_response),
lambda_response = lambda_response,
--echo2 = cjson.decode(echo2_response)
s3_response = {
message= "File uploaded successfully",
file_url= s3_url
}
})
end
return SplitRequestHandler
schema.lua:
local typedefs = require "kong.db.schema.typedefs"
return {
name = "split_request",
fields = {
{ config = {
type = "record",
fields = {
{ s3_bucket = { type = "string", required = true, default = "your-default-bucket" } }
}
}}
}
}