Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions Dockerfile.local
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY bin/controller /bin/controller
USER 65532:65532
ENTRYPOINT ["/bin/controller"]
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,11 @@ You can also learn more about our [Governance](/GOVERNANCE.md) structure.

[coc]: https://aws.github.io/code-of-conduct

## Testing

See our [testing documentation](/test/e2e/README.md) for details on how to
set up lambda controller for testing

## License

This project is [licensed](/LICENSE) under the Apache-2.0 License.
Expand Down
5 changes: 4 additions & 1 deletion test/e2e/.gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
__pycache__/
*.py[cod]
**/bootstrap.yaml
**/bootstrap.yaml
bootstrap.pkl*
bootstrap_resources.env

78 changes: 78 additions & 0 deletions test/e2e/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@

## Testing

You will need an AWS account, a kubernetes cluster (running locally is fine)
and python

### Container image build

This assumes a target docker architecture of linux/arm64 (typically for OS X)

1. `CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o bin/controller ./cmd/controller/`
2. `docker build -f Dockerfile.local -t lambda-controller:local .`

### Kubernetes setup

Create a temporary credentials file in `~/.aws.temporary.credss` that you don't mind using with ack-controller

```
[temp-profile]
aws_access_key_id=<access key>
aws_secret_access_key=<secret key>
aws_session_token=<session token>
```

1. `kubectl create namespace ack-system`
2. `kubectl create secret generic aws-credentials --from-file=credentials=$HOME/.aws.temporary.creds -n ack-system
3. ```
helm install ack-lambda-controller ./helm \
--namespace ack-system \
--set image.repository=lambda-controller \
--set image.tag=local \
--set aws.region=ap-southeast-2 \
--set aws.credentials.secretName=aws-credentials \
--set aws.credentials.secretKey=credentials \
--set aws.credentials.profile=temp-profile \
--set installScope=cluster \
--set leaderElection.enabled=false
```

### Python set-up

In `test/e2e`:

1. Create a virtual environment `python -m venv venv`
2. Activate the virtual environment `source venv/bin/activate`
3. Install testing requirements `pip install -r requirements.txt`

### AWS setup

In `test/e2e`:

1. Run `AWS_PROFILE=my-profile ./setup.sh --pickle` (or run `setup.sh` and then
`pickle.sh`)

### Setup the docker test image used by the lambda

In `test/e2e/resources/lambda_function`:

1. `AWS_PROFILE=my-profile aws ecr create-repository --repository-name ack-e2e-testing-lambda-controller`
2. Run `AWS_PROFILE=my-profile make`
3. ```
repo=$(AWS_PROFILE=my-profile aws ecr describe-repositories --repository-names ack-e2e-testing-lambda-controller --query 'repositories[].repositoryUri' --output text)
AWS_PROFILE=my-profile aws ecr get-login-password | docker login --password-stdin -u "${repo%/*}"
docker push ${repo}:v1
```
4. Run `zip main.zip main.py` and `zip updated_main.zip updated_main.py`

### Run the test suite

In `test/e2e`:

1. Run `AWS_PROFILE=my-profile pytest`

### Clean up

In test/e2e:

1. Run `AWS_PROFILE=my-profile ./teardown.sh`
118 changes: 118 additions & 0 deletions test/e2e/pickle.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
#!/usr/bin/env bash
# Generates bootstrap.pkl from bootstrap_resources.env.
# The pickle file contains serialized acktest BootstrapResources objects
# that the e2e tests load at startup.
#
# Usage: ./pickle.sh

set -euo pipefail

SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="${SCRIPT_DIR}/bootstrap_resources.env"
PKL_FILE="${SCRIPT_DIR}/bootstrap.pkl"

if [ ! -f "${ENV_FILE}" ]; then
echo "Error: ${ENV_FILE} not found. Run setup.sh first."
exit 1
fi

# shellcheck source=/dev/null
source "${ENV_FILE}"

# Back up existing pickle file
if [ -f "${PKL_FILE}" ]; then
BACKUP="${PKL_FILE}.$(date +%s)"
echo "Backing up existing bootstrap.pkl to ${BACKUP}"
cp "${PKL_FILE}" "${BACKUP}"
fi

echo "Generating bootstrap.pkl..."

python3 << PYEOF
import sys
sys.path.insert(0, "${SCRIPT_DIR}/..")

from pathlib import Path
from acktest.bootstrapping.s3 import Bucket
from acktest.bootstrapping.dynamodb import Table
from acktest.bootstrapping.signer import SigningProfile
from acktest.bootstrapping.sqs import Queue
from acktest.bootstrapping.iam import Role
from e2e.bootstrap_resources import BootstrapResources

# Construct resource objects with the correct output attributes set

bucket = Bucket("ack-lambda-controller-tests")
bucket.name = "${BUCKET_NAME}"

signing_profile = SigningProfile("ack_testing_signer", signing_platform_id="AWSLambda-SHA384-ECDSA")
signing_profile.name = "${SIGNING_PROFILE_NAME}"
signing_profile.signing_profile_arn = "${SIGNING_PROFILE_ARN}"

basic_role = Role("ack-lambda-controller-basic-role", principal_service="lambda.amazonaws.com",
managed_policies=["arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"])
basic_role.name = "${BASIC_ROLE_NAME}"
basic_role.arn = "${BASIC_ROLE_ARN}"

esm_role = Role("ack-lambda-controller-esm-role", principal_service="lambda.amazonaws.com",
managed_policies=[
"arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole",
"arn:aws:iam::aws:policy/service-role/AWSLambdaDynamoDBExecutionRole",
"arn:aws:iam::aws:policy/AmazonSQSFullAccess",
])
esm_role.name = "${ESM_ROLE_NAME}"
esm_role.arn = "${ESM_ROLE_ARN}"

esm_table = Table("ack-lambda-controller-table",
attribute_definitions=[
{"AttributeName": "id", "AttributeType": "N"},
{"AttributeName": "createdAt", "AttributeType": "S"},
],
key_schema=[
{"AttributeName": "id", "KeyType": "HASH"},
{"AttributeName": "createdAt", "KeyType": "RANGE"},
],
stream_specification={"StreamEnabled": True, "StreamViewType": "NEW_IMAGE"},
provisioned_throughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5})
esm_table.name = "${ESM_TABLE_NAME}"
esm_table.latest_stream_arn = "${ESM_TABLE_STREAM_ARN}"

esm_queue = Queue("ack-lambda-controller-queue")
esm_queue.name = "${ESM_QUEUE_NAME}"
esm_queue.arn = "${ESM_QUEUE_ARN}"
esm_queue.url = "${ESM_QUEUE_URL}"

eic_role = Role("ack-lambda-controller-eic-role", principal_service="lambda.amazonaws.com",
managed_policies=[
"arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole",
"arn:aws:iam::aws:policy/AmazonSQSFullAccess",
])
eic_role.name = "${EIC_ROLE_NAME}"
eic_role.arn = "${EIC_ROLE_ARN}"

eic_queue_onsuccess = Queue("ack-lambda-controller-function-queue-eic-onsuccess")
eic_queue_onsuccess.name = "${EIC_QUEUE_ONSUCCESS_NAME}"
eic_queue_onsuccess.arn = "${EIC_QUEUE_ONSUCCESS_ARN}"
eic_queue_onsuccess.url = "${EIC_QUEUE_ONSUCCESS_URL}"

eic_queue_onfailure = Queue("ack-lambda-controller-function-queue-eic-onfailure")
eic_queue_onfailure.name = "${EIC_QUEUE_ONFAILURE_NAME}"
eic_queue_onfailure.arn = "${EIC_QUEUE_ONFAILURE_ARN}"
eic_queue_onfailure.url = "${EIC_QUEUE_ONFAILURE_URL}"

resources = BootstrapResources(
FunctionsBucket=bucket,
SigningProfile=signing_profile,
BasicRole=basic_role,
ESMRole=esm_role,
ESMTable=esm_table,
ESMQueue=esm_queue,
EICRole=eic_role,
EICQueueOnSuccess=eic_queue_onsuccess,
EICQueueOnFailure=eic_queue_onfailure,
)

output_dir = Path("${SCRIPT_DIR}")
resources.serialize(output_dir)
print(f"bootstrap.pkl written to {output_dir / 'bootstrap.pkl'}")
PYEOF
10 changes: 5 additions & 5 deletions test/e2e/resources/lambda_function/Makefile
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
AWS_REGION ?= "us-west-2"
AWS_REGION ?= $(shell aws configure get region 2>/dev/null || echo "us-west-2")
ECR_REPOSITORY ?= ack-e2e-testing-lambda-controller
IMAGE_TAG ?= v1

AWS_ACCOUNT_ID ?= $(shell aws sts get-caller-identity --query "Account" --output text)
IMAGE_URL ?= $(AWS_ACCOUNT_ID).dkr.ecr.us-west-2.amazonaws.com/$(ECR_REPOSITORY):$(IMAGE_TAG)
IMAGE_URL ?= $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/$(ECR_REPOSITORY):$(IMAGE_TAG)

build-image:
docker build -t $(IMAGE_URL) .
Expand All @@ -15,8 +15,8 @@ create-ecr-repository:
aws ecr create-repository --region $(AWS_REGION) --repository-name $(ECR_REPOSITORY) >/dev/null

docker-ecr-auth:
aws ecr get-login-password --region us-west-2 | \
aws ecr get-login-password --region $(AWS_REGION) | \
docker login --username AWS --password-stdin\
$(AWS_ACCOUNT_ID).dkr.ecr.us-west-2.amazonaws.com
$(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com

all: build-image publish-image
all: build-image publish-image
1 change: 1 addition & 0 deletions test/e2e/service_bootstrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ def upload_function_to_bucket(file_path: str, bucket_name: str):
file_path,
bucket_name,
object_name,
ExtraArgs={'ChecksumAlgorithm': 'SHA256'},
)
except ClientError as e:
logging.error(e)
Expand Down
Loading