diff --git a/Dockerfile.local b/Dockerfile.local new file mode 100644 index 00000000..b55a7974 --- /dev/null +++ b/Dockerfile.local @@ -0,0 +1,5 @@ +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY bin/controller /bin/controller +USER 65532:65532 +ENTRYPOINT ["/bin/controller"] diff --git a/README.md b/README.md index f561cb63..b7868e8d 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,11 @@ You can also learn more about our [Governance](/GOVERNANCE.md) structure. [coc]: https://aws.github.io/code-of-conduct +## Testing + +See our [testing documentation](/test/e2e/README.md) for details on how to +set up lambda controller for testing + ## License This project is [licensed](/LICENSE) under the Apache-2.0 License. diff --git a/test/e2e/.gitignore b/test/e2e/.gitignore index 1700a504..819764fd 100644 --- a/test/e2e/.gitignore +++ b/test/e2e/.gitignore @@ -1,3 +1,6 @@ __pycache__/ *.py[cod] -**/bootstrap.yaml \ No newline at end of file +**/bootstrap.yaml +bootstrap.pkl* +bootstrap_resources.env + diff --git a/test/e2e/README.md b/test/e2e/README.md new file mode 100644 index 00000000..9ca18e02 --- /dev/null +++ b/test/e2e/README.md @@ -0,0 +1,78 @@ + +## Testing + +You will need an AWS account, a kubernetes cluster (running locally is fine) +and python + +### Container image build + +This assumes a target docker architecture of linux/arm64 (typically for OS X) + +1. `CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o bin/controller ./cmd/controller/` +2. `docker build -f Dockerfile.local -t lambda-controller:local .` + +### Kubernetes setup + +Create a temporary credentials file in `~/.aws.temporary.credss` that you don't mind using with ack-controller + +``` +[temp-profile] +aws_access_key_id= +aws_secret_access_key= +aws_session_token= +``` + +1. `kubectl create namespace ack-system` +2. `kubectl create secret generic aws-credentials --from-file=credentials=$HOME/.aws.temporary.creds -n ack-system +3. ``` + helm install ack-lambda-controller ./helm \ + --namespace ack-system \ + --set image.repository=lambda-controller \ + --set image.tag=local \ + --set aws.region=ap-southeast-2 \ + --set aws.credentials.secretName=aws-credentials \ + --set aws.credentials.secretKey=credentials \ + --set aws.credentials.profile=temp-profile \ + --set installScope=cluster \ + --set leaderElection.enabled=false + ``` + +### Python set-up + +In `test/e2e`: + +1. Create a virtual environment `python -m venv venv` +2. Activate the virtual environment `source venv/bin/activate` +3. Install testing requirements `pip install -r requirements.txt` + +### AWS setup + +In `test/e2e`: + +1. Run `AWS_PROFILE=my-profile ./setup.sh --pickle` (or run `setup.sh` and then + `pickle.sh`) + +### Setup the docker test image used by the lambda + +In `test/e2e/resources/lambda_function`: + +1. `AWS_PROFILE=my-profile aws ecr create-repository --repository-name ack-e2e-testing-lambda-controller` +2. Run `AWS_PROFILE=my-profile make` +3. ``` + repo=$(AWS_PROFILE=my-profile aws ecr describe-repositories --repository-names ack-e2e-testing-lambda-controller --query 'repositories[].repositoryUri' --output text) + AWS_PROFILE=my-profile aws ecr get-login-password | docker login --password-stdin -u "${repo%/*}" + docker push ${repo}:v1 + ``` +4. Run `zip main.zip main.py` and `zip updated_main.zip updated_main.py` + +### Run the test suite + +In `test/e2e`: + +1. Run `AWS_PROFILE=my-profile pytest` + +### Clean up + +In test/e2e: + +1. Run `AWS_PROFILE=my-profile ./teardown.sh` diff --git a/test/e2e/pickle.sh b/test/e2e/pickle.sh new file mode 100755 index 00000000..6b005313 --- /dev/null +++ b/test/e2e/pickle.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +# Generates bootstrap.pkl from bootstrap_resources.env. +# The pickle file contains serialized acktest BootstrapResources objects +# that the e2e tests load at startup. +# +# Usage: ./pickle.sh + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ENV_FILE="${SCRIPT_DIR}/bootstrap_resources.env" +PKL_FILE="${SCRIPT_DIR}/bootstrap.pkl" + +if [ ! -f "${ENV_FILE}" ]; then + echo "Error: ${ENV_FILE} not found. Run setup.sh first." + exit 1 +fi + +# shellcheck source=/dev/null +source "${ENV_FILE}" + +# Back up existing pickle file +if [ -f "${PKL_FILE}" ]; then + BACKUP="${PKL_FILE}.$(date +%s)" + echo "Backing up existing bootstrap.pkl to ${BACKUP}" + cp "${PKL_FILE}" "${BACKUP}" +fi + +echo "Generating bootstrap.pkl..." + +python3 << PYEOF +import sys +sys.path.insert(0, "${SCRIPT_DIR}/..") + +from pathlib import Path +from acktest.bootstrapping.s3 import Bucket +from acktest.bootstrapping.dynamodb import Table +from acktest.bootstrapping.signer import SigningProfile +from acktest.bootstrapping.sqs import Queue +from acktest.bootstrapping.iam import Role +from e2e.bootstrap_resources import BootstrapResources + +# Construct resource objects with the correct output attributes set + +bucket = Bucket("ack-lambda-controller-tests") +bucket.name = "${BUCKET_NAME}" + +signing_profile = SigningProfile("ack_testing_signer", signing_platform_id="AWSLambda-SHA384-ECDSA") +signing_profile.name = "${SIGNING_PROFILE_NAME}" +signing_profile.signing_profile_arn = "${SIGNING_PROFILE_ARN}" + +basic_role = Role("ack-lambda-controller-basic-role", principal_service="lambda.amazonaws.com", + managed_policies=["arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"]) +basic_role.name = "${BASIC_ROLE_NAME}" +basic_role.arn = "${BASIC_ROLE_ARN}" + +esm_role = Role("ack-lambda-controller-esm-role", principal_service="lambda.amazonaws.com", + managed_policies=[ + "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", + "arn:aws:iam::aws:policy/service-role/AWSLambdaDynamoDBExecutionRole", + "arn:aws:iam::aws:policy/AmazonSQSFullAccess", + ]) +esm_role.name = "${ESM_ROLE_NAME}" +esm_role.arn = "${ESM_ROLE_ARN}" + +esm_table = Table("ack-lambda-controller-table", + attribute_definitions=[ + {"AttributeName": "id", "AttributeType": "N"}, + {"AttributeName": "createdAt", "AttributeType": "S"}, + ], + key_schema=[ + {"AttributeName": "id", "KeyType": "HASH"}, + {"AttributeName": "createdAt", "KeyType": "RANGE"}, + ], + stream_specification={"StreamEnabled": True, "StreamViewType": "NEW_IMAGE"}, + provisioned_throughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}) +esm_table.name = "${ESM_TABLE_NAME}" +esm_table.latest_stream_arn = "${ESM_TABLE_STREAM_ARN}" + +esm_queue = Queue("ack-lambda-controller-queue") +esm_queue.name = "${ESM_QUEUE_NAME}" +esm_queue.arn = "${ESM_QUEUE_ARN}" +esm_queue.url = "${ESM_QUEUE_URL}" + +eic_role = Role("ack-lambda-controller-eic-role", principal_service="lambda.amazonaws.com", + managed_policies=[ + "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", + "arn:aws:iam::aws:policy/AmazonSQSFullAccess", + ]) +eic_role.name = "${EIC_ROLE_NAME}" +eic_role.arn = "${EIC_ROLE_ARN}" + +eic_queue_onsuccess = Queue("ack-lambda-controller-function-queue-eic-onsuccess") +eic_queue_onsuccess.name = "${EIC_QUEUE_ONSUCCESS_NAME}" +eic_queue_onsuccess.arn = "${EIC_QUEUE_ONSUCCESS_ARN}" +eic_queue_onsuccess.url = "${EIC_QUEUE_ONSUCCESS_URL}" + +eic_queue_onfailure = Queue("ack-lambda-controller-function-queue-eic-onfailure") +eic_queue_onfailure.name = "${EIC_QUEUE_ONFAILURE_NAME}" +eic_queue_onfailure.arn = "${EIC_QUEUE_ONFAILURE_ARN}" +eic_queue_onfailure.url = "${EIC_QUEUE_ONFAILURE_URL}" + +resources = BootstrapResources( + FunctionsBucket=bucket, + SigningProfile=signing_profile, + BasicRole=basic_role, + ESMRole=esm_role, + ESMTable=esm_table, + ESMQueue=esm_queue, + EICRole=eic_role, + EICQueueOnSuccess=eic_queue_onsuccess, + EICQueueOnFailure=eic_queue_onfailure, +) + +output_dir = Path("${SCRIPT_DIR}") +resources.serialize(output_dir) +print(f"bootstrap.pkl written to {output_dir / 'bootstrap.pkl'}") +PYEOF diff --git a/test/e2e/resources/lambda_function/Makefile b/test/e2e/resources/lambda_function/Makefile index a44cb5ce..3dd08f8b 100644 --- a/test/e2e/resources/lambda_function/Makefile +++ b/test/e2e/resources/lambda_function/Makefile @@ -1,9 +1,9 @@ -AWS_REGION ?= "us-west-2" +AWS_REGION ?= $(shell aws configure get region 2>/dev/null || echo "us-west-2") ECR_REPOSITORY ?= ack-e2e-testing-lambda-controller IMAGE_TAG ?= v1 AWS_ACCOUNT_ID ?= $(shell aws sts get-caller-identity --query "Account" --output text) -IMAGE_URL ?= $(AWS_ACCOUNT_ID).dkr.ecr.us-west-2.amazonaws.com/$(ECR_REPOSITORY):$(IMAGE_TAG) +IMAGE_URL ?= $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/$(ECR_REPOSITORY):$(IMAGE_TAG) build-image: docker build -t $(IMAGE_URL) . @@ -15,8 +15,8 @@ create-ecr-repository: aws ecr create-repository --region $(AWS_REGION) --repository-name $(ECR_REPOSITORY) >/dev/null docker-ecr-auth: - aws ecr get-login-password --region us-west-2 | \ + aws ecr get-login-password --region $(AWS_REGION) | \ docker login --username AWS --password-stdin\ - $(AWS_ACCOUNT_ID).dkr.ecr.us-west-2.amazonaws.com + $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com -all: build-image publish-image \ No newline at end of file +all: build-image publish-image diff --git a/test/e2e/service_bootstrap.py b/test/e2e/service_bootstrap.py index 613c00fb..ddfc8e04 100644 --- a/test/e2e/service_bootstrap.py +++ b/test/e2e/service_bootstrap.py @@ -64,6 +64,7 @@ def upload_function_to_bucket(file_path: str, bucket_name: str): file_path, bucket_name, object_name, + ExtraArgs={'ChecksumAlgorithm': 'SHA256'}, ) except ClientError as e: logging.error(e) diff --git a/test/e2e/setup.sh b/test/e2e/setup.sh new file mode 100755 index 00000000..68db665b --- /dev/null +++ b/test/e2e/setup.sh @@ -0,0 +1,205 @@ +#!/usr/bin/env bash +# Creates AWS resources needed for Lambda controller e2e tests. +# Writes resource names/ARNs to bootstrap_resources.env for use by pickle.sh. +# +# Usage: ./setup.sh [--pickle] + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ENV_FILE="${SCRIPT_DIR}/bootstrap_resources.env" + +# Generate random 8-char suffix +SUFFIX="$(LC_ALL=C tr -dc 'a-z0-9' < /dev/urandom | head -c 8 || true)" +echo "Using random suffix: ${SUFFIX}" + +# Resource names +BUCKET_NAME="ack-lambda-controller-tests-${SUFFIX}" +SIGNING_PROFILE_NAME="ack_testing_signer_${SUFFIX}" +BASIC_ROLE_NAME="ack-lambda-controller-basic-role-${SUFFIX}" +ESM_ROLE_NAME="ack-lambda-controller-esm-role-${SUFFIX}" +ESM_TABLE_NAME="ack-lambda-controller-table-${SUFFIX}" +ESM_QUEUE_NAME="ack-lambda-controller-queue-${SUFFIX}" +EIC_ROLE_NAME="ack-lambda-controller-eic-role-${SUFFIX}" +EIC_QUEUE_ONSUCCESS_NAME="ack-lambda-controller-function-queue-eic-onsuccess-${SUFFIX}" +EIC_QUEUE_ONFAILURE_NAME="ack-lambda-controller-function-queue-eic-onfailure-${SUFFIX}" + +AWS_REGION="${AWS_DEFAULT_REGION:-$(aws configure get region 2>/dev/null || echo "us-west-2")}" +AWS_ACCOUNT_ID="$(aws sts get-caller-identity --query Account --output text)" + +ASSUME_ROLE_POLICY='{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "lambda.amazonaws.com" }, + "Action": "sts:AssumeRole" + } + ] +}' + +LAMBDA_BASIC_EXECUTION_ARN="arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" +LAMBDA_DYNAMODB_EXECUTION_ROLE="arn:aws:iam::aws:policy/service-role/AWSLambdaDynamoDBExecutionRole" +LAMBDA_SQS_QUEUE_EXECUTION_ROLE="arn:aws:iam::aws:policy/AmazonSQSFullAccess" + +# --- S3 Bucket --- +echo "Creating S3 bucket: ${BUCKET_NAME}" +if [ "${AWS_REGION}" = "us-east-1" ]; then + aws s3api create-bucket --bucket "${BUCKET_NAME}" +else + aws s3api create-bucket --bucket "${BUCKET_NAME}" \ + --create-bucket-configuration LocationConstraint="${AWS_REGION}" +fi + +# --- Signing Profile --- +echo "Creating signing profile: ${SIGNING_PROFILE_NAME}" +SIGNING_PROFILE_OUTPUT="$(aws signer put-signing-profile \ + --profile-name "${SIGNING_PROFILE_NAME}" \ + --platform-id AWSLambda-SHA384-ECDSA)" +SIGNING_PROFILE_ARN="$(echo "${SIGNING_PROFILE_OUTPUT}" | python3 -c "import sys,json; print(json.load(sys.stdin)['profileVersionArn'])")" +echo " Signing profile ARN: ${SIGNING_PROFILE_ARN}" + +# --- IAM Roles --- +create_role() { + local role_name="$1" + shift + local policies=("$@") + + echo "Creating IAM role: ${role_name}" >&2 + local role_arn + role_arn="$(aws iam create-role \ + --role-name "${role_name}" \ + --assume-role-policy-document "${ASSUME_ROLE_POLICY}" \ + --output text --query 'Role.Arn')" + + for policy_arn in "${policies[@]}"; do + echo " Attaching policy: ${policy_arn}" >&2 + aws iam attach-role-policy \ + --role-name "${role_name}" \ + --policy-arn "${policy_arn}" + done + + echo "${role_arn}" +} + +BASIC_ROLE_ARN="$(create_role "${BASIC_ROLE_NAME}" \ + "${LAMBDA_BASIC_EXECUTION_ARN}")" + +ESM_ROLE_ARN="$(create_role "${ESM_ROLE_NAME}" \ + "${LAMBDA_BASIC_EXECUTION_ARN}" \ + "${LAMBDA_DYNAMODB_EXECUTION_ROLE}" \ + "${LAMBDA_SQS_QUEUE_EXECUTION_ROLE}")" + +EIC_ROLE_ARN="$(create_role "${EIC_ROLE_NAME}" \ + "${LAMBDA_BASIC_EXECUTION_ARN}" \ + "${LAMBDA_SQS_QUEUE_EXECUTION_ROLE}")" + +echo "Waiting 30s for IAM role propagation..." +sleep 30 + +# --- DynamoDB Table --- +echo "Creating DynamoDB table: ${ESM_TABLE_NAME}" +aws dynamodb create-table \ + --table-name "${ESM_TABLE_NAME}" \ + --attribute-definitions \ + AttributeName=id,AttributeType=N \ + AttributeName=createdAt,AttributeType=S \ + --key-schema \ + AttributeName=id,KeyType=HASH \ + AttributeName=createdAt,KeyType=RANGE \ + --provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5 \ + --stream-specification StreamEnabled=true,StreamViewType=NEW_IMAGE \ + --output text --query 'TableDescription.TableArn' > /dev/null + +echo "Waiting for table to become ACTIVE..." +aws dynamodb wait table-exists --table-name "${ESM_TABLE_NAME}" + +ESM_TABLE_STREAM_ARN="$(aws dynamodb describe-table \ + --table-name "${ESM_TABLE_NAME}" \ + --query 'Table.LatestStreamArn' --output text)" +echo " Table stream ARN: ${ESM_TABLE_STREAM_ARN}" + +# --- SQS Queues --- +create_queue() { + local queue_name="$1" + echo "Creating SQS queue: ${queue_name}" >&2 + local queue_url + queue_url="$(aws sqs create-queue --queue-name "${queue_name}" \ + --query 'QueueUrl' --output text)" + local queue_arn + queue_arn="$(aws sqs get-queue-attributes --queue-url "${queue_url}" \ + --attribute-names QueueArn --query 'Attributes.QueueArn' --output text)" + echo " Queue URL: ${queue_url}" >&2 + echo " Queue ARN: ${queue_arn}" >&2 + echo "${queue_url} ${queue_arn}" +} + +ESM_QUEUE_OUTPUT="$(create_queue "${ESM_QUEUE_NAME}")" +ESM_QUEUE_URL="$(echo "${ESM_QUEUE_OUTPUT}" | tail -1 | cut -d' ' -f1)" +ESM_QUEUE_ARN="$(echo "${ESM_QUEUE_OUTPUT}" | tail -1 | cut -d' ' -f2)" + +EIC_ONSUCCESS_OUTPUT="$(create_queue "${EIC_QUEUE_ONSUCCESS_NAME}")" +EIC_QUEUE_ONSUCCESS_URL="$(echo "${EIC_ONSUCCESS_OUTPUT}" | tail -1 | cut -d' ' -f1)" +EIC_QUEUE_ONSUCCESS_ARN="$(echo "${EIC_ONSUCCESS_OUTPUT}" | tail -1 | cut -d' ' -f2)" + +EIC_ONFAILURE_OUTPUT="$(create_queue "${EIC_QUEUE_ONFAILURE_NAME}")" +EIC_QUEUE_ONFAILURE_URL="$(echo "${EIC_ONFAILURE_OUTPUT}" | tail -1 | cut -d' ' -f1)" +EIC_QUEUE_ONFAILURE_ARN="$(echo "${EIC_ONFAILURE_OUTPUT}" | tail -1 | cut -d' ' -f2)" + +# --- Zip and upload Lambda functions --- +echo "Zipping and uploading Lambda functions to bucket..." +MAIN_ZIP="$(mktemp -d)/main.zip" +UPDATED_ZIP="$(mktemp -d)/updated_main.zip" + +(cd "${SCRIPT_DIR}" && zip -j "${MAIN_ZIP}" ./resources/lambda_function/main.py) +(cd "${SCRIPT_DIR}" && zip -j "${UPDATED_ZIP}" ./resources/lambda_function/updated_main.py) + +aws s3 cp "${MAIN_ZIP}" "s3://${BUCKET_NAME}/main.zip" --checksum-algorithm SHA256 +aws s3 cp "${UPDATED_ZIP}" "s3://${BUCKET_NAME}/updated_main.zip" --checksum-algorithm SHA256 +rm -f "${MAIN_ZIP}" "${UPDATED_ZIP}" + +# --- Write env file --- +cat > "${ENV_FILE}" << EOF +# Generated by setup.sh on $(date -u +%Y-%m-%dT%H:%M:%SZ) +BUCKET_NAME='${BUCKET_NAME}' +SIGNING_PROFILE_NAME='${SIGNING_PROFILE_NAME}' +SIGNING_PROFILE_ARN='${SIGNING_PROFILE_ARN}' +BASIC_ROLE_NAME='${BASIC_ROLE_NAME}' +BASIC_ROLE_ARN='${BASIC_ROLE_ARN}' +ESM_ROLE_NAME='${ESM_ROLE_NAME}' +ESM_ROLE_ARN='${ESM_ROLE_ARN}' +ESM_TABLE_NAME='${ESM_TABLE_NAME}' +ESM_TABLE_STREAM_ARN='${ESM_TABLE_STREAM_ARN}' +ESM_QUEUE_NAME='${ESM_QUEUE_NAME}' +ESM_QUEUE_URL='${ESM_QUEUE_URL}' +ESM_QUEUE_ARN='${ESM_QUEUE_ARN}' +EIC_ROLE_NAME='${EIC_ROLE_NAME}' +EIC_ROLE_ARN='${EIC_ROLE_ARN}' +EIC_QUEUE_ONSUCCESS_NAME='${EIC_QUEUE_ONSUCCESS_NAME}' +EIC_QUEUE_ONSUCCESS_URL='${EIC_QUEUE_ONSUCCESS_URL}' +EIC_QUEUE_ONSUCCESS_ARN='${EIC_QUEUE_ONSUCCESS_ARN}' +EIC_QUEUE_ONFAILURE_NAME='${EIC_QUEUE_ONFAILURE_NAME}' +EIC_QUEUE_ONFAILURE_URL='${EIC_QUEUE_ONFAILURE_URL}' +EIC_QUEUE_ONFAILURE_ARN='${EIC_QUEUE_ONFAILURE_ARN}' +EOF + +echo "" +echo "=== Bootstrap Resources Created ===" +echo " S3 Bucket: ${BUCKET_NAME}" +echo " Signing Profile: ${SIGNING_PROFILE_NAME}" +echo " Basic Role: ${BASIC_ROLE_NAME}" +echo " ESM Role: ${ESM_ROLE_NAME}" +echo " ESM Table: ${ESM_TABLE_NAME}" +echo " ESM Queue: ${ESM_QUEUE_NAME}" +echo " EIC Role: ${EIC_ROLE_NAME}" +echo " EIC Queue Success: ${EIC_QUEUE_ONSUCCESS_NAME}" +echo " EIC Queue Failure: ${EIC_QUEUE_ONFAILURE_NAME}" +echo "" +echo "Resource details written to: ${ENV_FILE}" + +# Optionally run pickle.sh +if [[ "${1:-}" == "--pickle" ]]; then + echo "" + echo "Running pickle.sh..." + "${SCRIPT_DIR}/pickle.sh" +fi diff --git a/test/e2e/teardown.sh b/test/e2e/teardown.sh new file mode 100755 index 00000000..d3caa3cf --- /dev/null +++ b/test/e2e/teardown.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash +# Tears down AWS resources created by setup.sh. +# Reads resource names from bootstrap.pkl (via Python deserialization) +# or falls back to bootstrap_resources.env. +# +# Usage: ./teardown.sh + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ENV_FILE="${SCRIPT_DIR}/bootstrap_resources.env" +PKL_FILE="${SCRIPT_DIR}/bootstrap.pkl" + +# Try to load resource info from pickle file first, fall back to env file +if [ -f "${PKL_FILE}" ]; then + echo "Reading resources from bootstrap.pkl..." + eval "$(python3 << PYEOF +import sys +sys.path.insert(0, "${SCRIPT_DIR}/..") + +from pathlib import Path +from e2e.bootstrap_resources import BootstrapResources + +resources = BootstrapResources.deserialize(Path("${SCRIPT_DIR}")) + +print(f'BUCKET_NAME="{resources.FunctionsBucket.name}"') +print(f'SIGNING_PROFILE_NAME="{resources.SigningProfile.name}"') +print(f'BASIC_ROLE_NAME="{resources.BasicRole.name}"') +print(f'BASIC_ROLE_ARN="{resources.BasicRole.arn}"') +print(f'ESM_ROLE_NAME="{resources.ESMRole.name}"') +print(f'ESM_ROLE_ARN="{resources.ESMRole.arn}"') +print(f'ESM_TABLE_NAME="{resources.ESMTable.name}"') +print(f'ESM_QUEUE_NAME="{resources.ESMQueue.name}"') +print(f'ESM_QUEUE_URL="{resources.ESMQueue.url}"') +print(f'EIC_ROLE_NAME="{resources.EICRole.name}"') +print(f'EIC_ROLE_ARN="{resources.EICRole.arn}"') +print(f'EIC_QUEUE_ONSUCCESS_NAME="{resources.EICQueueOnSuccess.name}"') +print(f'EIC_QUEUE_ONSUCCESS_URL="{resources.EICQueueOnSuccess.url}"') +print(f'EIC_QUEUE_ONFAILURE_NAME="{resources.EICQueueOnFailure.name}"') +print(f'EIC_QUEUE_ONFAILURE_URL="{resources.EICQueueOnFailure.url}"') +PYEOF +)" +elif [ -f "${ENV_FILE}" ]; then + echo "Reading resources from bootstrap_resources.env..." + # shellcheck source=/dev/null + source "${ENV_FILE}" +else + echo "Error: Neither bootstrap.pkl nor bootstrap_resources.env found." + echo "Nothing to tear down." + exit 1 +fi + +echo "" +echo "=== Tearing down resources ===" + +# --- Empty and delete S3 bucket --- +echo "Deleting S3 bucket: ${BUCKET_NAME}" +aws s3 rm "s3://${BUCKET_NAME}" --recursive 2>/dev/null || true +aws s3api delete-bucket --bucket "${BUCKET_NAME}" 2>/dev/null || echo " Warning: bucket deletion failed (may already be deleted)" + +# --- Cancel signing profile --- +echo "Cancelling signing profile: ${SIGNING_PROFILE_NAME}" +aws signer cancel-signing-profile --profile-name "${SIGNING_PROFILE_NAME}" 2>/dev/null || echo " Warning: signing profile cancellation failed (may already be cancelled)" + +# --- Delete SQS queues --- +for queue_var in ESM_QUEUE EIC_QUEUE_ONSUCCESS EIC_QUEUE_ONFAILURE; do + name_var="${queue_var}_NAME" + url_var="${queue_var}_URL" + echo "Deleting SQS queue: ${!name_var}" + aws sqs delete-queue --queue-url "${!url_var}" 2>/dev/null || echo " Warning: queue deletion failed (may already be deleted)" +done + +# --- Delete DynamoDB table --- +echo "Deleting DynamoDB table: ${ESM_TABLE_NAME}" +aws dynamodb delete-table --table-name "${ESM_TABLE_NAME}" 2>/dev/null || echo " Warning: table deletion failed (may already be deleted)" + +# --- Detach policies and delete IAM roles --- +LAMBDA_BASIC_EXECUTION_ARN="arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" +LAMBDA_DYNAMODB_EXECUTION_ROLE="arn:aws:iam::aws:policy/service-role/AWSLambdaDynamoDBExecutionRole" +LAMBDA_SQS_QUEUE_EXECUTION_ROLE="arn:aws:iam::aws:policy/AmazonSQSFullAccess" + +delete_role() { + local role_name="$1" + shift + local policies=("$@") + + echo "Deleting IAM role: ${role_name}" + for policy_arn in "${policies[@]}"; do + aws iam detach-role-policy --role-name "${role_name}" --policy-arn "${policy_arn}" 2>/dev/null || true + done + aws iam delete-role --role-name "${role_name}" 2>/dev/null || echo " Warning: role deletion failed (may already be deleted)" +} + +delete_role "${EIC_ROLE_NAME}" \ + "${LAMBDA_BASIC_EXECUTION_ARN}" \ + "${LAMBDA_SQS_QUEUE_EXECUTION_ROLE}" + +delete_role "${ESM_ROLE_NAME}" \ + "${LAMBDA_BASIC_EXECUTION_ARN}" \ + "${LAMBDA_DYNAMODB_EXECUTION_ROLE}" \ + "${LAMBDA_SQS_QUEUE_EXECUTION_ROLE}" + +delete_role "${BASIC_ROLE_NAME}" \ + "${LAMBDA_BASIC_EXECUTION_ARN}" + +# --- Clean up local files --- +if [ -f "${ENV_FILE}" ]; then + echo "Removing ${ENV_FILE}" + rm -f "${ENV_FILE}" +fi + +echo "" +echo "=== Teardown complete ===" diff --git a/test/e2e/tests/helper.py b/test/e2e/tests/helper.py index ec2258dc..f38f95fd 100644 --- a/test/e2e/tests/helper.py +++ b/test/e2e/tests/helper.py @@ -16,6 +16,19 @@ import logging +import boto3 + + +def get_s3_object_sha256(bucket: str, key: str) -> str: + s3_client = boto3.client('s3') + response = s3_client.head_object( + Bucket=bucket, + Key=key, + ChecksumMode='ENABLED', + ) + return response['ChecksumSHA256'] + + class LambdaValidator: def __init__(self, lambda_client): self.lambda_client = lambda_client diff --git a/test/e2e/tests/test_function.py b/test/e2e/tests/test_function.py index 92390ab6..52337e29 100644 --- a/test/e2e/tests/test_function.py +++ b/test/e2e/tests/test_function.py @@ -17,9 +17,6 @@ import pytest import time import logging -import hashlib -import base64 - from acktest import tags from acktest.resources import random_suffix_name from acktest.aws.identity import get_region, get_account_id @@ -28,9 +25,9 @@ from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_lambda_resource from e2e.replacement_values import REPLACEMENT_VALUES from e2e.bootstrap_resources import get_bootstrap_resources -from e2e.service_bootstrap import LAMBDA_FUNCTION_FILE_ZIP, LAMBDA_FUNCTION_FILE_PATH_ZIP -from e2e.service_bootstrap import LAMBDA_FUNCTION_UPDATED_FILE_ZIP, LAMBDA_FUNCTION_UPDATED_FILE_PATH_ZIP -from e2e.tests.helper import LambdaValidator +from e2e.service_bootstrap import LAMBDA_FUNCTION_FILE_ZIP +from e2e.service_bootstrap import LAMBDA_FUNCTION_UPDATED_FILE_ZIP +from e2e.tests.helper import LambdaValidator, get_s3_object_sha256 RESOURCE_PLURAL = "functions" @@ -543,7 +540,7 @@ def test_function_snapstart(self, lambda_client): assert not lambda_validator.function_exists(resource_name) def test_function_architecture(self, lambda_client): - resource_name = random_suffix_name("functionsarchitecture", 24) + resource_name = random_suffix_name("functionsarchitecture", 30) resources = get_bootstrap_resources() logging.debug(resources) @@ -669,7 +666,7 @@ def test_function_features(self, lambda_client): assert not lambda_validator.function_exists(resource_name) def test_function_runtime(self, lambda_client): - resource_name = random_suffix_name("function", 24) + resource_name = random_suffix_name("functionruntime", 24) resources = get_bootstrap_resources() logging.debug(resources) @@ -736,13 +733,15 @@ def test_function_layers(self, lambda_client): resources = get_bootstrap_resources() logging.debug(resources) + region = get_region() + replacements = REPLACEMENT_VALUES.copy() replacements["FUNCTION_NAME"] = resource_name replacements["BUCKET_NAME"] = resources.FunctionsBucket.name replacements["LAMBDA_ROLE"] = resources.EICRole.arn replacements["LAMBDA_FILE_NAME"] = LAMBDA_FUNCTION_FILE_ZIP - replacements["AWS_REGION"] = get_region() - replacements["LAYERS"] = "arn:aws:lambda:us-west-2:336392948345:layer:AWSSDKPandas-Python310:14" + replacements["AWS_REGION"] = region + replacements["LAYERS"] = f"arn:aws:lambda:{region}:336392948345:layer:AWSSDKPandas-Python310:14" # Load Lambda CR resource_data = load_lambda_resource( @@ -772,7 +771,7 @@ def test_function_layers(self, lambda_client): assert lambda_validator.function_exists(resource_name) # Update cr - layers_list = ["arn:aws:lambda:us-west-2:017000801446:layer:AWSLambdaPowertoolsPythonV2:68", "arn:aws:lambda:us-west-2:580247275435:layer:LambdaInsightsExtension:52"] + layers_list = [f"arn:aws:lambda:{region}:017000801446:layer:AWSLambdaPowertoolsPythonV2:68", f"arn:aws:lambda:{region}:580247275435:layer:LambdaInsightsExtension:52"] cr["spec"]["layers"] = layers_list #Patch k8s resource @@ -876,21 +875,13 @@ def test_function_code_s3(self, lambda_client): resources = get_bootstrap_resources() logging.debug(resources) - archive_1 = open(LAMBDA_FUNCTION_FILE_PATH_ZIP, 'rb') - readFile_1 = archive_1.read() - hash_1 = hashlib.sha256(readFile_1) - binary_hash_1 = hash_1.digest() - base64_hash_1 = base64.b64encode(binary_hash_1).decode('utf-8') - - archive_2 = open(LAMBDA_FUNCTION_UPDATED_FILE_PATH_ZIP, 'rb') - readFile_2 = archive_2.read() - hash_2 = hashlib.sha256(readFile_2) - binary_hash_2 = hash_2.digest() - base64_hash_2 = base64.b64encode(binary_hash_2).decode('utf-8') + bucket_name = resources.FunctionsBucket.name + base64_hash_1 = get_s3_object_sha256(bucket_name, "main.zip") + base64_hash_2 = get_s3_object_sha256(bucket_name, "updated_main.zip") replacements = REPLACEMENT_VALUES.copy() replacements["FUNCTION_NAME"] = resource_name - replacements["BUCKET_NAME"] = resources.FunctionsBucket.name + replacements["BUCKET_NAME"] = bucket_name replacements["LAMBDA_ROLE"] = resources.BasicRole.arn replacements["LAMBDA_FILE_NAME"] = LAMBDA_FUNCTION_FILE_ZIP replacements["RESERVED_CONCURRENT_EXECUTIONS"] = "0" @@ -925,7 +916,7 @@ def test_function_code_s3(self, lambda_client): # Assert that the original code.s3Bucket and code.s3Key is still part of # the function's CR - assert cr["spec"]["code"]["s3Bucket"] == resources.FunctionsBucket.name + assert cr["spec"]["code"]["s3Bucket"] == bucket_name assert cr["spec"]["code"]["s3Key"] == LAMBDA_FUNCTION_FILE_ZIP # Check Lambda function exists @@ -952,28 +943,20 @@ def test_function_code_s3(self, lambda_client): # Check Lambda function doesn't exist assert not lambda_validator.function_exists(resource_name) - + def test_function_update_code_and_architecture(self, lambda_client): resource_name = random_suffix_name("functionupdatecode", 24) resources = get_bootstrap_resources() logging.debug(resources) - archive_1 = open(LAMBDA_FUNCTION_FILE_PATH_ZIP, 'rb') - readFile_1 = archive_1.read() - hash_1 = hashlib.sha256(readFile_1) - binary_hash_1 = hash_1.digest() - base64_hash_1 = base64.b64encode(binary_hash_1).decode('utf-8') - - archive_2 = open(LAMBDA_FUNCTION_UPDATED_FILE_PATH_ZIP, 'rb') - readFile_2 = archive_2.read() - hash_2 = hashlib.sha256(readFile_2) - binary_hash_2 = hash_2.digest() - base64_hash_2 = base64.b64encode(binary_hash_2).decode('utf-8') + bucket_name = resources.FunctionsBucket.name + base64_hash_1 = get_s3_object_sha256(bucket_name, "main.zip") + base64_hash_2 = get_s3_object_sha256(bucket_name, "updated_main.zip") replacements = REPLACEMENT_VALUES.copy() replacements["FUNCTION_NAME"] = resource_name - replacements["BUCKET_NAME"] = resources.FunctionsBucket.name + replacements["BUCKET_NAME"] = bucket_name replacements["LAMBDA_ROLE"] = resources.BasicRole.arn replacements["LAMBDA_FILE_NAME"] = LAMBDA_FUNCTION_FILE_ZIP replacements["RESERVED_CONCURRENT_EXECUTIONS"] = "0" @@ -1008,7 +991,7 @@ def test_function_update_code_and_architecture(self, lambda_client): # Assert that the original code.s3Bucket and code.s3Key is still part of # the function's CR - assert cr["spec"]["code"]["s3Bucket"] == resources.FunctionsBucket.name + assert cr["spec"]["code"]["s3Bucket"] == bucket_name assert cr["spec"]["code"]["s3Key"] == LAMBDA_FUNCTION_FILE_ZIP # Check Lambda function exists @@ -1036,4 +1019,88 @@ def test_function_update_code_and_architecture(self, lambda_client): time.sleep(DELETE_WAIT_AFTER_SECONDS) # Check Lambda function doesn't exist - assert not lambda_validator.function_exists(resource_name) \ No newline at end of file + assert not lambda_validator.function_exists(resource_name) + + def test_function_update_code_and_environment_variable(self, lambda_client): + resource_name = random_suffix_name("functionupdatecodeandenv", 32) + + resources = get_bootstrap_resources() + logging.debug(resources) + + bucket_name = resources.FunctionsBucket.name + base64_hash_1 = get_s3_object_sha256(bucket_name, "main.zip") + base64_hash_2 = get_s3_object_sha256(bucket_name, "updated_main.zip") + + replacements = REPLACEMENT_VALUES.copy() + replacements["FUNCTION_NAME"] = resource_name + replacements["BUCKET_NAME"] = bucket_name + replacements["LAMBDA_ROLE"] = resources.BasicRole.arn + replacements["LAMBDA_FILE_NAME"] = LAMBDA_FUNCTION_FILE_ZIP + replacements["RESERVED_CONCURRENT_EXECUTIONS"] = "0" + replacements["CODE_SIGNING_CONFIG_ARN"] = "" + replacements["AWS_REGION"] = get_region() + replacements["ARCHITECTURES"] = 'x86_64' + replacements["HASH"] = base64_hash_1 + + # Load Lambda CR + resource_data = load_lambda_resource( + "function_code_s3", + additional_replacements=replacements, + ) + logging.debug(resource_data) + + # Create k8s resource + ref = k8s.CustomResourceReference( + CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, + resource_name, namespace="default", + ) + k8s.create_custom_resource(ref, resource_data) + cr = k8s.wait_resource_consumed_by_controller(ref) + + assert cr is not None + assert k8s.get_resource_exists(ref) + + time.sleep(CREATE_WAIT_AFTER_SECONDS) + + cr = k8s.wait_resource_consumed_by_controller(ref) + + lambda_validator = LambdaValidator(lambda_client) + + # Assert that the original code.s3Bucket and code.s3Key is still part of + # the function's CR + assert cr["spec"]["code"]["s3Bucket"] == bucket_name + assert cr["spec"]["code"]["s3Key"] == LAMBDA_FUNCTION_FILE_ZIP + assert cr["spec"].get("environment", {}).get("variables", {}) == {} + # Check Lambda function exists + function = lambda_validator.get_function(resource_name) + assert function is not None + assert function["Configuration"]["CodeSha256"] == base64_hash_1 + assert function["Configuration"].get("Environment", {}).get("Variables", {}) == {} + + # Update cr + cr["spec"]["code"]["sha256"] = base64_hash_2 + cr["spec"]["code"]["s3Key"] = LAMBDA_FUNCTION_UPDATED_FILE_ZIP + cr["spec"]["environment"] = dict(variables={ + "TEST_ENV_VAR": "test_value" + }) + + # Patch k8s resource + k8s.patch_custom_resource(ref, cr) + time.sleep(UPDATE_WAIT_AFTER_SECONDS) + + # Check function updated fields + function = lambda_validator.get_function(resource_name) + assert function is not None + assert function["Configuration"]["CodeSha256"] == base64_hash_2 + assert function["Configuration"].get("Environment", {}).get("Variables", {}) == { + "TEST_ENV_VAR": "test_value" + } + + # Delete k8s resource + _, deleted = k8s.delete_custom_resource(ref) + assert deleted is True + + time.sleep(DELETE_WAIT_AFTER_SECONDS) + + # Check Lambda function doesn't exist + assert not lambda_validator.function_exists(resource_name) diff --git a/test/e2e/tests/test_version.py b/test/e2e/tests/test_version.py index 32d26399..c9e7d5dd 100644 --- a/test/e2e/tests/test_version.py +++ b/test/e2e/tests/test_version.py @@ -17,9 +17,6 @@ import pytest import time import logging -import hashlib -import base64 - from acktest.resources import random_suffix_name from acktest.aws.identity import get_region from acktest.k8s import resource as k8s @@ -27,8 +24,8 @@ from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_lambda_resource from e2e.replacement_values import REPLACEMENT_VALUES from e2e.bootstrap_resources import get_bootstrap_resources -from e2e.service_bootstrap import LAMBDA_FUNCTION_FILE_ZIP, LAMBDA_FUNCTION_FILE_PATH_ZIP -from e2e.tests.helper import LambdaValidator +from e2e.service_bootstrap import LAMBDA_FUNCTION_FILE_ZIP +from e2e.tests.helper import LambdaValidator, get_s3_object_sha256 RESOURCE_PLURAL = "versions" @@ -183,11 +180,8 @@ def test_version_with_revision_hash(self, lambda_client, lambda_function): resource_name = random_suffix_name("lambda-version", 24) - archive_1 = open(LAMBDA_FUNCTION_FILE_PATH_ZIP, 'rb') - readFile_1 = archive_1.read() - hash_1 = hashlib.sha256(readFile_1) - binary_hash_1 = hash_1.digest() - base64_hash_1 = base64.b64encode(binary_hash_1).decode('utf-8') + resources = get_bootstrap_resources() + base64_hash_1 = get_s3_object_sha256(resources.FunctionsBucket.name, "main.zip") replacements = REPLACEMENT_VALUES.copy() replacements["AWS_REGION"] = get_region()