/object/object1/+`
+ return ServiceModelIdentifier("s3")
+
+ # detect S3 requests with "AWS id:key" Auth headers
+ auth_header = request.headers.get("Authorization") or ""
+ if auth_header.startswith("AWS "):
+ return ServiceModelIdentifier("s3")
+
+ if uses_host_addressing(request.headers):
+ # Note: This needs to be the last rule (and therefore is not in the host rules), since it is incredibly greedy
+ return ServiceModelIdentifier("s3")
+
+
+def resolve_conflicts(
+ candidates: Set[ServiceModelIdentifier], request: Request
+) -> ServiceModelIdentifier:
+ """
+ Some service definitions are overlapping to a point where they are _not_ distinguishable at all
+ (f.e. ``DescribeEndpints`` in timestream-query and timestream-write).
+ These conflicts need to be resolved manually.
+ """
+ service_name_candidates = {service.name for service in candidates}
+ if service_name_candidates == {"timestream-query", "timestream-write"}:
+ return ServiceModelIdentifier("timestream-query")
+ if service_name_candidates == {"docdb", "neptune", "rds"}:
+ return ServiceModelIdentifier("rds")
+ if service_name_candidates == {"sqs"}:
+ # SQS now have 2 different specs for `query` and `json` protocol. From our current implementation with the
+ # parser and serializer, we need to have 2 different service names for them, but they share one provider
+ # implementation. `sqs` represents the `json` protocol spec, and `sqs-query` the `query` protocol
+ # (default again in botocore starting with 1.32.6).
+ # The `application/x-amz-json-1.0` header is mandatory for requests targeting SQS with the `json` protocol. We
+ # can safely route them to the `sqs` JSON parser/serializer. If not present, route the request to the
+ # sqs-query protocol.
+ content_type = request.headers.get("Content-Type")
+ return (
+ ServiceModelIdentifier("sqs")
+ if content_type == "application/x-amz-json-1.0"
+ else ServiceModelIdentifier("sqs", "query")
+ )
+
+
+def determine_aws_service_model_for_data_plane(
+ request: Request, services: ServiceCatalog = None
+) -> Optional[ServiceModel]:
+ """
+ A stripped down version of ``determine_aws_service_model`` which only checks hostname indicators for
+ the AWS data plane, such as s3 websites, lambda function URLs, or API gateway routes.
+ """
+ custom_host_match = custom_host_addressing_rules(request.host)
+ if custom_host_match:
+ services = services or get_service_catalog()
+ return services.get(*custom_host_match)
+
+
+def determine_aws_service_model(
+ request: Request, services: ServiceCatalog = None
+) -> Optional[ServiceModel]:
+ """
+ Tries to determine the name of the AWS service an incoming request is targeting.
+ :param request: to determine the target service name of
+ :param services: service catalog (can be handed in for caching purposes)
+ :return: service name string (or None if the targeting service could not be determined exactly)
+ """
+ services = services or get_service_catalog()
+ signing_name, target_prefix, operation, host, path = _extract_service_indicators(request)
+ candidates = set()
+
+ # 1. check the signing names
+ if signing_name:
+ signing_name_candidates = services.by_signing_name(signing_name)
+ if len(signing_name_candidates) == 1:
+ # a unique signing-name -> service name mapping is the case for ~75% of service operations
+ return services.get(*signing_name_candidates[0])
+
+ # try to find a match with the custom signing name rules
+ custom_match = custom_signing_name_rules(signing_name, path)
+ if custom_match:
+ return services.get(*custom_match)
+
+ # still ambiguous - add the services to the list of candidates
+ candidates.update(signing_name_candidates)
+
+ # 2. check the target prefix
+ if target_prefix and operation:
+ target_candidates = services.by_target_prefix(target_prefix)
+ if len(target_candidates) == 1:
+ # a unique target prefix
+ return services.get(*target_candidates[0])
+
+ # still ambiguous - add the services to the list of candidates
+ candidates.update(target_candidates)
+
+ # exclude services where the operation is not contained in the service spec
+ for service_identifier in list(candidates):
+ service = services.get(*service_identifier)
+ if operation not in service.operation_names:
+ candidates.remove(service_identifier)
+ else:
+ # exclude services which have a target prefix (the current request does not have one)
+ for service_identifier in list(candidates):
+ service = services.get(*service_identifier)
+ if service.metadata.get("targetPrefix") is not None:
+ candidates.remove(service_identifier)
+
+ if len(candidates) == 1:
+ service_identifier = candidates.pop()
+ return services.get(*service_identifier)
+
+ # 3. check the path if it is set and not a trivial root path
+ if path and path != "/":
+ # try to find a match with the custom path rules
+ custom_path_match = custom_path_addressing_rules(path)
+ if custom_path_match:
+ return services.get(*custom_path_match)
+
+ # 4. check the host (custom host addressing rules)
+ if host:
+ # iterate over the service spec's endpoint prefix
+ for prefix, services_per_prefix in services.endpoint_prefix_index.items():
+ # this prevents a virtual host addressed bucket to be wrongly recognized
+ if host.startswith(f"{prefix}.") and ".s3." not in host:
+ if len(services_per_prefix) == 1:
+ return services.get(*services_per_prefix[0])
+ candidates.update(services_per_prefix)
+
+ custom_host_match = custom_host_addressing_rules(host)
+ if custom_host_match:
+ return services.get(*custom_host_match)
+
+ if request.shallow:
+ # from here on we would need access to the request body, which doesn't exist for shallow requests like
+ # WebsocketRequests.
+ return None
+
+ # 5. check the query / form-data
+ try:
+ values = request.values
+ if "Action" in values:
+ # query / ec2 protocol requests always have an action and a version (the action is more significant)
+ query_candidates = [
+ service
+ for service in services.by_operation(values["Action"])
+ if service.protocol in ("ec2", "query")
+ ]
+
+ if len(query_candidates) == 1:
+ return services.get(*query_candidates[0])
+
+ if "Version" in values:
+ for service_identifier in list(query_candidates):
+ service_model = services.get(*service_identifier)
+ if values["Version"] != service_model.api_version:
+ # the combination of Version and Action is not unique, add matches to the candidates
+ query_candidates.remove(service_identifier)
+
+ if len(query_candidates) == 1:
+ return services.get(*query_candidates[0])
+
+ candidates.update(query_candidates)
+
+ except RequestEntityTooLarge:
+ # Some requests can be form-urlencoded but also contain binary data, which will fail the form parsing (S3 can
+ # do this). In that case, skip this step and continue to try to determine the service name. The exception is
+ # RequestEntityTooLarge even if the error is due to failed decoding.
+ LOG.debug(
+ "Failed to determine AWS service from request body because the form could not be parsed",
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
+ )
+
+ # 6. resolve service spec conflicts
+ resolved_conflict = resolve_conflicts(candidates, request)
+ if resolved_conflict:
+ return services.get(*resolved_conflict)
+
+ # 7. check the legacy S3 rules in the end
+ legacy_match = legacy_s3_rules(request)
+ if legacy_match:
+ return services.get(*legacy_match)
+
+ if signing_name:
+ return services.get(name=signing_name)
+ if candidates:
+ return services.get(*candidates.pop())
+ return None
diff --git a/localstack-core/localstack/aws/protocol/validate.py b/localstack-core/localstack/aws/protocol/validate.py
new file mode 100644
index 0000000000000..30d1be4355fb0
--- /dev/null
+++ b/localstack-core/localstack/aws/protocol/validate.py
@@ -0,0 +1,173 @@
+"""Slightly extends the ``botocore.validate`` package to provide better integration with our parser/serializer."""
+
+from typing import Any, Dict, List, NamedTuple
+
+from botocore.model import OperationModel, Shape
+from botocore.validate import ParamValidator as BotocoreParamValidator
+from botocore.validate import ValidationErrors as BotocoreValidationErrors
+from botocore.validate import type_check
+
+from localstack.aws.api import ServiceRequest
+
+
+class Error(NamedTuple):
+ """
+ A wrapper around ``botocore.validate`` error tuples.
+
+ Attributes:
+ reason The error type
+ name The name of the parameter the error occurred at
+ attributes Error type-specific attributes
+ """
+
+ reason: str
+ name: str
+ attributes: Dict[str, Any]
+
+
+class ParameterValidationError(Exception):
+ error: Error
+
+ def __init__(self, error: Error) -> None:
+ self.error = error
+ super().__init__(self.message)
+
+ @property
+ def reason(self):
+ return self.error.reason
+
+ @property
+ def message(self) -> str:
+ """
+ Returns a default message for the error formatted by BotocoreValidationErrors.
+ :return: the exception message.
+ """
+ return BotocoreValidationErrors()._format_error(self.error)
+
+
+class MissingRequiredField(ParameterValidationError):
+ @property
+ def required_name(self) -> str:
+ return self.error.attributes["required_name"]
+
+
+# TODO: extend subclasses with properties from error arguments as needed. see ValidationErrors._format_error for
+# which those are.
+
+
+class UnknownField(ParameterValidationError):
+ pass
+
+
+class InvalidType(ParameterValidationError):
+ pass
+
+
+class InvalidRange(ParameterValidationError):
+ pass
+
+
+class InvalidLength(ParameterValidationError):
+ pass
+
+
+class JsonEncodingError(ParameterValidationError):
+ pass
+
+
+class InvalidDocumentType(ParameterValidationError):
+ pass
+
+
+class MoreThanOneInput(ParameterValidationError):
+ pass
+
+
+class EmptyInput(ParameterValidationError):
+ pass
+
+
+class ValidationErrors(BotocoreValidationErrors):
+ def __init__(self, shape: Shape, params: Dict[str, Any]):
+ super().__init__()
+ self.shape = shape
+ self.params = params
+ self._exceptions: List[ParameterValidationError] = []
+
+ @property
+ def exceptions(self):
+ return self._exceptions
+
+ def raise_first(self):
+ for error in self._exceptions:
+ raise error
+
+ def report(self, name, reason, **kwargs):
+ error = Error(reason, name, kwargs)
+ self._errors.append(error)
+ self._exceptions.append(self.to_exception(error))
+
+ def to_exception(self, error: Error) -> ParameterValidationError:
+ error_type, name, additional = error
+
+ if error_type == "missing required field":
+ return MissingRequiredField(error)
+ elif error_type == "unknown field":
+ return UnknownField(error)
+ elif error_type == "invalid type":
+ return InvalidType(error)
+ elif error_type == "invalid range":
+ return InvalidRange(error)
+ elif error_type == "invalid length":
+ return InvalidLength(error)
+ elif error_type == "unable to encode to json":
+ return JsonEncodingError(error)
+ elif error_type == "invalid type for document":
+ return InvalidDocumentType(error)
+ elif error_type == "more than one input":
+ return MoreThanOneInput(error)
+ elif error_type == "empty input":
+ return EmptyInput(error)
+
+ return ParameterValidationError(error)
+
+
+class ParamValidator(BotocoreParamValidator):
+ def validate(self, params: Dict[str, Any], shape: Shape):
+ """Validate parameters against a shape model.
+
+ This method will validate the parameters against a provided shape model.
+ All errors will be collected before returning to the caller. This means
+ that this method will not stop at the first error, it will return all
+ possible errors.
+
+ :param params: User provided dict of parameters
+ :param shape: A shape model describing the expected input.
+
+ :return: A list of errors.
+
+ """
+ errors = ValidationErrors(shape, params)
+ self._validate(params, shape, errors, name="")
+ return errors
+
+ @type_check(valid_types=(dict,))
+ def _validate_structure(self, params, shape, errors, name):
+ # our parser sets the value of required members to None if they are not in the incoming request. we correct
+ # this behavior here to get the correct error messages.
+ for required_member in shape.metadata.get("required", []):
+ if required_member in params and params[required_member] is None:
+ params.pop(required_member)
+
+ super(ParamValidator, self)._validate_structure(params, shape, errors, name)
+
+
+def validate_request(operation: OperationModel, request: ServiceRequest) -> ValidationErrors:
+ """
+ Validates the service request with the input shape of the given operation.
+
+ :param operation: the operation
+ :param request: the input shape of the operation being validated
+ :return: ValidationError object
+ """
+ return ParamValidator().validate(request, operation.input_shape)
diff --git a/localstack-core/localstack/aws/scaffold.py b/localstack-core/localstack/aws/scaffold.py
new file mode 100644
index 0000000000000..3d9c0e3e55db4
--- /dev/null
+++ b/localstack-core/localstack/aws/scaffold.py
@@ -0,0 +1,560 @@
+import io
+import keyword
+import re
+from functools import cached_property
+from multiprocessing import Pool
+from pathlib import Path
+from typing import Dict, List, Optional, Set
+
+import click
+from botocore import xform_name
+from botocore.exceptions import UnknownServiceError
+from botocore.model import (
+ ListShape,
+ MapShape,
+ OperationModel,
+ ServiceModel,
+ Shape,
+ StringShape,
+ StructureShape,
+)
+from typing_extensions import OrderedDict
+
+from localstack.aws.spec import load_service
+from localstack.utils.common import camel_to_snake_case, snake_to_camel_case
+
+# Some minification packages might treat "type" as a keyword, some specs define shapes called like the type "Optional"
+KEYWORDS = list(keyword.kwlist) + ["type", "Optional", "Union"]
+is_keyword = KEYWORDS.__contains__
+
+
+def is_bad_param_name(name: str) -> bool:
+ if name == "context":
+ return True
+
+ if is_keyword(name):
+ return True
+
+ return False
+
+
+def to_valid_python_name(spec_name: str) -> str:
+ sanitized = re.sub(r"[^0-9a-zA-Z_]+", "_", spec_name)
+
+ if sanitized[0].isnumeric():
+ sanitized = "i_" + sanitized
+
+ if is_keyword(sanitized):
+ sanitized += "_"
+
+ if sanitized.startswith("__"):
+ sanitized = sanitized[1:]
+
+ return sanitized
+
+
+def html_to_rst(html: str):
+ import pypandoc
+
+ doc = pypandoc.convert_text(html, "rst", format="html")
+ doc = doc.replace("\_", "_") # noqa: W605
+ doc = doc.replace("\|", "|") # noqa: W605
+ doc = doc.replace("\ ", " ") # noqa: W605
+ doc = doc.replace("\\", "\\\\") # noqa: W605
+ rst = doc.strip()
+ return rst
+
+
+class ShapeNode:
+ service: ServiceModel
+ shape: Shape
+
+ def __init__(self, service: ServiceModel, shape: Shape) -> None:
+ super().__init__()
+ self.service = service
+ self.shape = shape
+
+ @cached_property
+ def request_operation(self) -> Optional[OperationModel]:
+ for operation_name in self.service.operation_names:
+ operation = self.service.operation_model(operation_name)
+ if operation.input_shape is None:
+ continue
+
+ if to_valid_python_name(self.shape.name) == to_valid_python_name(
+ operation.input_shape.name
+ ):
+ return operation
+
+ return None
+
+ @cached_property
+ def response_operation(self) -> Optional[OperationModel]:
+ for operation_name in self.service.operation_names:
+ operation = self.service.operation_model(operation_name)
+ if operation.output_shape is None:
+ continue
+
+ if to_valid_python_name(self.shape.name) == to_valid_python_name(
+ operation.output_shape.name
+ ):
+ return operation
+
+ return None
+
+ @cached_property
+ def is_request(self):
+ return self.request_operation is not None
+
+ @cached_property
+ def is_response(self):
+ return self.response_operation is not None
+
+ @property
+ def name(self) -> str:
+ return to_valid_python_name(self.shape.name)
+
+ @cached_property
+ def is_exception(self):
+ metadata = self.shape.metadata
+ return metadata.get("error") or metadata.get("exception")
+
+ @property
+ def is_primitive(self):
+ return self.shape.type_name in ["integer", "boolean", "float", "double", "string"]
+
+ @property
+ def is_enum(self):
+ return isinstance(self.shape, StringShape) and self.shape.enum
+
+ @property
+ def dependencies(self) -> List[str]:
+ shape = self.shape
+
+ if isinstance(shape, StructureShape):
+ return [to_valid_python_name(v.name) for v in shape.members.values()]
+ if isinstance(shape, ListShape):
+ return [to_valid_python_name(shape.member.name)]
+ if isinstance(shape, MapShape):
+ return [to_valid_python_name(shape.key.name), to_valid_python_name(shape.value.name)]
+
+ return []
+
+ def _print_structure_declaration(self, output, doc=True, quote_types=False):
+ if self.is_exception:
+ self._print_as_class(output, "ServiceException", doc, quote_types)
+ return
+
+ if any(map(is_keyword, self.shape.members.keys())):
+ self._print_as_typed_dict(output, doc, quote_types)
+ return
+
+ if self.is_request:
+ base = "ServiceRequest"
+ else:
+ base = "TypedDict, total=False"
+
+ self._print_as_class(output, base, doc, quote_types)
+
+ def _print_as_class(self, output, base: str, doc=True, quote_types=False):
+ output.write(f"class {to_valid_python_name(self.shape.name)}({base}):\n")
+
+ q = '"' if quote_types else ""
+
+ if doc:
+ self.print_shape_doc(output, self.shape)
+
+ if self.is_exception:
+ error_spec = self.shape.metadata.get("error", {})
+ output.write(f' code: str = "{error_spec.get("code", self.shape.name)}"\n')
+ output.write(f" sender_fault: bool = {error_spec.get('senderFault', False)}\n")
+ output.write(f" status_code: int = {error_spec.get('httpStatusCode', 400)}\n")
+ elif not self.shape.members:
+ output.write(" pass\n")
+
+ # Avoid generating members for the common error members:
+ # - The message will always be the exception message (first argument of the exception class init)
+ # - The code is already set above
+ # - The type is the sender_fault which is already set above
+ remaining_members = {
+ k: v
+ for k, v in self.shape.members.items()
+ if not self.is_exception or k.lower() not in ["message", "code"]
+ }
+
+ # render any streaming payload first
+ if self.is_request and self.request_operation.has_streaming_input:
+ member: str = self.request_operation.input_shape.serialization.get("payload")
+ shape: Shape = self.request_operation.get_streaming_input()
+ if member in self.shape.required_members:
+ output.write(f" {member}: IO[{q}{to_valid_python_name(shape.name)}{q}]\n")
+ else:
+ output.write(
+ f" {member}: Optional[IO[{q}{to_valid_python_name(shape.name)}{q}]]\n"
+ )
+ del remaining_members[member]
+ # render the streaming payload first
+ if self.is_response and self.response_operation.has_streaming_output:
+ member: str = self.response_operation.output_shape.serialization.get("payload")
+ shape: Shape = self.response_operation.get_streaming_output()
+ shape_name = to_valid_python_name(shape.name)
+ if member in self.shape.required_members:
+ output.write(
+ f" {member}: Union[{q}{shape_name}{q}, IO[{q}{shape_name}{q}], Iterable[{q}{shape_name}{q}]]\n"
+ )
+ else:
+ output.write(
+ f" {member}: Optional[Union[{q}{shape_name}{q}, IO[{q}{shape_name}{q}], Iterable[{q}{shape_name}{q}]]]\n"
+ )
+ del remaining_members[member]
+
+ for k, v in remaining_members.items():
+ if k in self.shape.required_members:
+ if v.serialization.get("eventstream"):
+ output.write(f" {k}: Iterator[{q}{to_valid_python_name(v.name)}{q}]\n")
+ else:
+ output.write(f" {k}: {q}{to_valid_python_name(v.name)}{q}\n")
+ else:
+ if v.serialization.get("eventstream"):
+ output.write(f" {k}: Iterator[{q}{to_valid_python_name(v.name)}{q}]\n")
+ else:
+ output.write(f" {k}: Optional[{q}{to_valid_python_name(v.name)}{q}]\n")
+
+ def _print_as_typed_dict(self, output, doc=True, quote_types=False):
+ name = to_valid_python_name(self.shape.name)
+ output.write('%s = TypedDict("%s", {\n' % (name, name))
+ for k, v in self.shape.members.items():
+ member_name = to_valid_python_name(v.name)
+ # check if the member name is the same as the type name (recursive types need to use forward references)
+ recursive_type = name == member_name
+ q = '"' if quote_types or recursive_type else ""
+ if k in self.shape.required_members:
+ if v.serialization.get("eventstream"):
+ output.write(f' "{k}": Iterator[{q}{member_name}{q}],\n')
+ else:
+ output.write(f' "{k}": {q}{member_name}{q},\n')
+ else:
+ if v.serialization.get("eventstream"):
+ output.write(f' "{k}": Iterator[{q}{member_name}{q}],\n')
+ else:
+ output.write(f' "{k}": Optional[{q}{member_name}{q}],\n')
+ output.write("}, total=False)")
+
+ def print_shape_doc(self, output, shape):
+ html = shape.documentation
+ rst = html_to_rst(html)
+ if rst:
+ output.write(' """')
+ output.write(f"{rst}\n")
+ output.write(' """\n')
+
+ def print_declaration(self, output, doc=True, quote_types=False):
+ shape = self.shape
+
+ q = '"' if quote_types else ""
+
+ if isinstance(shape, StructureShape):
+ self._print_structure_declaration(output, doc, quote_types)
+ elif isinstance(shape, ListShape):
+ output.write(
+ f"{to_valid_python_name(shape.name)} = List[{q}{to_valid_python_name(shape.member.name)}{q}]"
+ )
+ elif isinstance(shape, MapShape):
+ output.write(
+ f"{to_valid_python_name(shape.name)} = Dict[{q}{to_valid_python_name(shape.key.name)}{q}, {q}{to_valid_python_name(shape.value.name)}{q}]"
+ )
+ elif isinstance(shape, StringShape):
+ if shape.enum:
+ output.write(f"class {to_valid_python_name(shape.name)}(StrEnum):\n")
+ for value in shape.enum:
+ name = to_valid_python_name(value)
+ output.write(f' {name} = "{value}"\n')
+ else:
+ output.write(f"{to_valid_python_name(shape.name)} = str")
+ elif shape.type_name == "string":
+ output.write(f"{to_valid_python_name(shape.name)} = str")
+ elif shape.type_name == "integer":
+ output.write(f"{to_valid_python_name(shape.name)} = int")
+ elif shape.type_name == "long":
+ output.write(f"{to_valid_python_name(shape.name)} = int")
+ elif shape.type_name == "double":
+ output.write(f"{to_valid_python_name(shape.name)} = float")
+ elif shape.type_name == "float":
+ output.write(f"{to_valid_python_name(shape.name)} = float")
+ elif shape.type_name == "boolean":
+ output.write(f"{to_valid_python_name(shape.name)} = bool")
+ elif shape.type_name == "blob":
+ # blobs are often associated with streaming payloads, but we handle that on operation level,
+ # not on shape level
+ output.write(f"{to_valid_python_name(shape.name)} = bytes")
+ elif shape.type_name == "timestamp":
+ output.write(f"{to_valid_python_name(shape.name)} = datetime")
+ else:
+ output.write(
+ f"# unknown shape type for {to_valid_python_name(shape.name)}: {shape.type_name}"
+ )
+ # TODO: BoxedInteger?
+
+ output.write("\n")
+
+ def get_order(self):
+ """
+ Defines a basic order in which to sort the stack of shape nodes before printing.
+ First all non-enum primitives are printed, then enums, then exceptions, then all other types.
+ """
+ if self.is_primitive:
+ if self.is_enum:
+ return 1
+ else:
+ return 0
+
+ if self.is_exception:
+ return 2
+
+ return 3
+
+
+def generate_service_types(output, service: ServiceModel, doc=True):
+ output.write("from datetime import datetime\n")
+ output.write("from enum import StrEnum\n")
+ output.write(
+ "from typing import Dict, List, Optional, Iterator, Iterable, IO, Union, TypedDict\n"
+ )
+ output.write("\n")
+ output.write(
+ "from localstack.aws.api import handler, RequestContext, ServiceException, ServiceRequest"
+ )
+ output.write("\n")
+
+ # ==================================== print type declarations
+ nodes: Dict[str, ShapeNode] = {}
+
+ for shape_name in service.shape_names:
+ shape = service.shape_for(shape_name)
+ nodes[to_valid_python_name(shape_name)] = ShapeNode(service, shape)
+
+ # output.write("__all__ = [\n")
+ # for name in nodes.keys():
+ # output.write(f' "{name}",\n')
+ # output.write("]\n")
+
+ printed: Set[str] = set()
+ visited: Set[str] = set()
+ stack: List[str] = list(nodes.keys())
+
+ stack = sorted(stack, key=lambda name: nodes[name].get_order())
+ stack.reverse()
+
+ while stack:
+ name = stack.pop()
+ if name in printed:
+ continue
+ node = nodes[name]
+
+ dependencies = [dep for dep in node.dependencies if dep not in printed]
+
+ if not dependencies:
+ node.print_declaration(output, doc=doc)
+ printed.add(name)
+ elif name in visited:
+ # break out of circular dependencies
+ node.print_declaration(output, doc=doc, quote_types=True)
+ printed.add(name)
+ else:
+ stack.append(name)
+ stack.extend(dependencies)
+ visited.add(name)
+
+
+def generate_service_api(output, service: ServiceModel, doc=True):
+ service_name = service.service_name.replace("-", "_")
+ class_name = service_name + "_api"
+ class_name = snake_to_camel_case(class_name)
+
+ output.write(f"class {class_name}:\n")
+ output.write("\n")
+ output.write(f' service = "{service.service_name}"\n')
+ output.write(f' version = "{service.api_version}"\n')
+ for op_name in service.operation_names:
+ operation: OperationModel = service.operation_model(op_name)
+
+ fn_name = camel_to_snake_case(op_name)
+
+ if operation.output_shape:
+ output_shape = to_valid_python_name(operation.output_shape.name)
+ else:
+ output_shape = "None"
+
+ output.write("\n")
+ parameters = OrderedDict()
+ param_shapes = OrderedDict()
+
+ if input_shape := operation.input_shape:
+ members = list(input_shape.members)
+
+ streaming_payload_member = None
+ if operation.has_streaming_input:
+ streaming_payload_member = operation.input_shape.serialization.get("payload")
+
+ for m in input_shape.required_members:
+ members.remove(m)
+ m_shape = input_shape.members[m]
+ type_name = to_valid_python_name(m_shape.name)
+ if m == streaming_payload_member:
+ type_name = f"IO[{type_name}]"
+ parameters[xform_name(m)] = type_name
+ param_shapes[xform_name(m)] = m_shape
+
+ for m in members:
+ m_shape = input_shape.members[m]
+ param_shapes[xform_name(m)] = m_shape
+ type_name = to_valid_python_name(m_shape.name)
+ if m == streaming_payload_member:
+ type_name = f"IO[{type_name}]"
+ parameters[xform_name(m)] = f"{type_name} | None = None"
+
+ if any(map(is_bad_param_name, parameters.keys())):
+ # if we cannot render the parameter name, don't expand the parameters in the handler
+ param_list = f"request: {to_valid_python_name(input_shape.name)}" if input_shape else ""
+ output.write(f' @handler("{operation.name}", expand=False)\n')
+ else:
+ param_list = ", ".join([f"{k}: {v}" for k, v in parameters.items()])
+ output.write(f' @handler("{operation.name}")\n')
+
+ # add the **kwargs in the end
+ if param_list:
+ param_list += ", **kwargs"
+ else:
+ param_list = "**kwargs"
+
+ output.write(
+ f" def {fn_name}(self, context: RequestContext, {param_list}) -> {output_shape}:\n"
+ )
+
+ # convert html documentation to rst and print it into to the signature
+ if doc:
+ html = operation.documentation
+ rst = html_to_rst(html)
+ output.write(' """')
+ output.write(f"{rst}\n")
+ output.write("\n")
+
+ # parameters
+ for param_name, shape in param_shapes.items():
+ # FIXME: this doesn't work properly
+ rst = html_to_rst(shape.documentation)
+ rst = rst.strip().split(".")[0] + "."
+ output.write(f":param {param_name}: {rst}\n")
+
+ # return value
+ if operation.output_shape:
+ output.write(f":returns: {to_valid_python_name(operation.output_shape.name)}\n")
+
+ # errors
+ for error in operation.error_shapes:
+ output.write(f":raises {to_valid_python_name(error.name)}:\n")
+
+ output.write(' """\n')
+
+ output.write(" raise NotImplementedError\n")
+
+
+@click.group()
+def scaffold():
+ pass
+
+
+@scaffold.command(name="generate")
+@click.argument("service", type=str)
+@click.option("--doc/--no-doc", default=False, help="whether or not to generate docstrings")
+@click.option(
+ "--save/--print",
+ default=False,
+ help="whether or not to save the result into the api directory",
+)
+@click.option(
+ "--path",
+ default="./localstack-core/localstack/aws/api",
+ help="the path where the api should be saved",
+)
+def generate(service: str, doc: bool, save: bool, path: str):
+ """
+ Generate types and API stubs for a given AWS service.
+
+ SERVICE is the service to generate the stubs for (e.g., sqs, or cloudformation)
+ """
+ from click import ClickException
+
+ try:
+ code = generate_code(service, doc=doc)
+ except UnknownServiceError:
+ raise ClickException(f"unknown service {service}")
+
+ if not save:
+ # either just print the code to stdout
+ click.echo(code)
+ return
+
+ # or find the file path and write the code to that location
+ create_code_directory(service, code, path)
+ click.echo("done!")
+
+
+def generate_code(service_name: str, doc: bool = False) -> str:
+ model = load_service(service_name)
+ output = io.StringIO()
+ generate_service_types(output, model, doc=doc)
+ generate_service_api(output, model, doc=doc)
+ return output.getvalue()
+
+
+def create_code_directory(service_name: str, code: str, base_path: str):
+ service_name = service_name.replace("-", "_")
+ # handle service names which are reserved keywords in python (f.e. lambda)
+ if is_keyword(service_name):
+ service_name += "_"
+ path = Path(base_path, service_name)
+
+ if not path.exists():
+ click.echo(f"creating directory {path}")
+ path.mkdir()
+
+ file = path / "__init__.py"
+ click.echo(f"writing to file {file}")
+ file.write_text(code)
+
+
+@scaffold.command()
+@click.option("--doc/--no-doc", default=False, help="whether or not to generate docstrings")
+@click.option(
+ "--path",
+ default="./localstack-core/localstack/aws/api",
+ help="the path in which to upgrade ASF APIs",
+)
+def upgrade(path: str, doc: bool = False):
+ """
+ Execute the code generation for all existing APIs.
+ """
+ services = [
+ d.name.rstrip("_").replace("_", "-")
+ for d in Path(path).iterdir()
+ if d.is_dir() and not d.name.startswith("__")
+ ]
+
+ with Pool() as pool:
+ pool.starmap(_do_generate_code, [(service, path, doc) for service in services])
+
+ click.echo("done!")
+
+
+def _do_generate_code(service: str, path: str, doc: bool):
+ try:
+ code = generate_code(service, doc)
+ except UnknownServiceError:
+ click.echo(f"unknown service {service}! skipping...")
+ return
+ create_code_directory(service, code, base_path=path)
+
+
+if __name__ == "__main__":
+ scaffold()
diff --git a/localstack/services/__init__.py b/localstack-core/localstack/aws/serving/__init__.py
similarity index 100%
rename from localstack/services/__init__.py
rename to localstack-core/localstack/aws/serving/__init__.py
diff --git a/localstack-core/localstack/aws/serving/asgi.py b/localstack-core/localstack/aws/serving/asgi.py
new file mode 100644
index 0000000000000..3bbeefd49944f
--- /dev/null
+++ b/localstack-core/localstack/aws/serving/asgi.py
@@ -0,0 +1,5 @@
+from rolo.gateway.asgi import AsgiGateway
+
+__all__ = [
+ "AsgiGateway",
+]
diff --git a/localstack-core/localstack/aws/serving/edge.py b/localstack-core/localstack/aws/serving/edge.py
new file mode 100644
index 0000000000000..0e204a4d96f88
--- /dev/null
+++ b/localstack-core/localstack/aws/serving/edge.py
@@ -0,0 +1,119 @@
+import logging
+import threading
+from typing import List
+
+from rolo.gateway.wsgi import WsgiGateway
+
+from localstack import config
+from localstack.aws.app import LocalstackAwsGateway
+from localstack.config import HostAndPort
+from localstack.runtime import get_current_runtime
+from localstack.runtime.shutdown import ON_AFTER_SERVICE_SHUTDOWN_HANDLERS
+from localstack.utils.collections import ensure_list
+
+LOG = logging.getLogger(__name__)
+
+
+def serve_gateway(
+ listen: HostAndPort | List[HostAndPort], use_ssl: bool, asynchronous: bool = False
+):
+ """
+ Implementation of the edge.do_start_edge_proxy interface to start a Hypercorn server instance serving the
+ LocalstackAwsGateway.
+ """
+
+ gateway = get_current_runtime().components.gateway
+
+ listens = ensure_list(listen)
+
+ if config.GATEWAY_SERVER == "hypercorn":
+ return _serve_hypercorn(gateway, listens, use_ssl, asynchronous)
+ elif config.GATEWAY_SERVER == "werkzeug":
+ return _serve_werkzeug(gateway, listens, use_ssl, asynchronous)
+ elif config.GATEWAY_SERVER == "twisted":
+ return _serve_twisted(gateway, listens, use_ssl, asynchronous)
+ else:
+ raise ValueError(f"Unknown gateway server type {config.GATEWAY_SERVER}")
+
+
+def _serve_werkzeug(
+ gateway: LocalstackAwsGateway, listen: List[HostAndPort], use_ssl: bool, asynchronous: bool
+):
+ from werkzeug.serving import ThreadedWSGIServer
+
+ from .werkzeug import CustomWSGIRequestHandler
+
+ params = {
+ "app": WsgiGateway(gateway),
+ "handler": CustomWSGIRequestHandler,
+ }
+
+ if use_ssl:
+ from localstack.utils.ssl import create_ssl_cert, install_predefined_cert_if_available
+
+ install_predefined_cert_if_available()
+ serial_number = listen[0].port
+ _, cert_file_name, key_file_name = create_ssl_cert(serial_number=serial_number)
+ params["ssl_context"] = (cert_file_name, key_file_name)
+
+ threads = []
+ servers: List[ThreadedWSGIServer] = []
+
+ for host_port in listen:
+ kwargs = dict(params)
+ kwargs["host"] = host_port.host
+ kwargs["port"] = host_port.port
+ server = ThreadedWSGIServer(**kwargs)
+ servers.append(server)
+ threads.append(
+ threading.Thread(
+ target=server.serve_forever, name=f"werkzeug-server-{host_port.port}", daemon=True
+ )
+ )
+
+ def _shutdown_servers():
+ LOG.debug("[shutdown] Shutting down gateway servers")
+ for _srv in servers:
+ _srv.shutdown()
+
+ ON_AFTER_SERVICE_SHUTDOWN_HANDLERS.register(_shutdown_servers)
+
+ for thread in threads:
+ thread.start()
+
+ if not asynchronous:
+ for thread in threads:
+ return thread.join()
+
+ # FIXME: thread handling is a bit wonky
+ return threads[0]
+
+
+def _serve_hypercorn(
+ gateway: LocalstackAwsGateway, listen: List[HostAndPort], use_ssl: bool, asynchronous: bool
+):
+ from localstack.http.hypercorn import GatewayServer
+
+ # start serving gateway
+ server = GatewayServer(gateway, listen, use_ssl, config.GATEWAY_WORKER_COUNT)
+ server.start()
+
+ # with the current way the infrastructure is started, this is the easiest way to shut down the server correctly
+ # FIXME: but the infrastructure shutdown should be much cleaner, core components like the gateway should be handled
+ # explicitly by the thing starting the components, not implicitly by the components.
+ def _shutdown_gateway():
+ LOG.debug("[shutdown] Shutting down gateway server")
+ server.shutdown()
+
+ ON_AFTER_SERVICE_SHUTDOWN_HANDLERS.register(_shutdown_gateway)
+ if not asynchronous:
+ server.join()
+ return server._thread
+
+
+def _serve_twisted(
+ gateway: LocalstackAwsGateway, listen: List[HostAndPort], use_ssl: bool, asynchronous: bool
+):
+ from .twisted import serve_gateway
+
+ return serve_gateway(gateway, listen, use_ssl, asynchronous)
diff --git a/localstack-core/localstack/aws/serving/hypercorn.py b/localstack-core/localstack/aws/serving/hypercorn.py
new file mode 100644
index 0000000000000..450d2664badc9
--- /dev/null
+++ b/localstack-core/localstack/aws/serving/hypercorn.py
@@ -0,0 +1,47 @@
+import asyncio
+from typing import Any, Optional, Tuple
+
+from hypercorn import Config
+from hypercorn.asyncio import serve as serve_hypercorn
+
+from localstack import constants
+
+from ..gateway import Gateway
+from .asgi import AsgiGateway
+
+
+def serve(
+ gateway: Gateway,
+ host: str = "localhost",
+ port: int = constants.DEFAULT_PORT_EDGE,
+ use_reloader: bool = True,
+ ssl_creds: Optional[Tuple[Any, Any]] = None,
+ **kwargs,
+) -> None:
+ """
+ Serve the given Gateway through a hypercorn server and block until it is completed.
+
+ :param gateway: the Gateway instance to serve
+ :param host: the host to expose the server on
+ :param port: the port to expose the server on
+ :param use_reloader: whether to use the reloader
+ :param ssl_creds: the ssl credentials (tuple of certfile and keyfile)
+ :param kwargs: any oder parameters that can be passed to the hypercorn.Config object
+ """
+ config = Config()
+ config.h11_pass_raw_headers = True
+ config.bind = f"{host}:{port}"
+ config.use_reloader = use_reloader
+
+ if ssl_creds:
+ cert_file_name, key_file_name = ssl_creds
+ if cert_file_name:
+ kwargs["certfile"] = cert_file_name
+ if key_file_name:
+ kwargs["keyfile"] = key_file_name
+
+ for k, v in kwargs.items():
+ setattr(config, k, v)
+
+ loop = asyncio.new_event_loop()
+ loop.run_until_complete(serve_hypercorn(AsgiGateway(gateway, event_loop=loop), config))
diff --git a/localstack-core/localstack/aws/serving/twisted.py b/localstack-core/localstack/aws/serving/twisted.py
new file mode 100644
index 0000000000000..549150a73ae61
--- /dev/null
+++ b/localstack-core/localstack/aws/serving/twisted.py
@@ -0,0 +1,173 @@
+"""
+Bindings to serve LocalStack using twisted.
+"""
+
+import logging
+import time
+from typing import List
+
+from rolo.gateway import Gateway
+from rolo.serving.twisted import TwistedGateway
+from twisted.internet import endpoints, interfaces, reactor, ssl
+from twisted.protocols.policies import ProtocolWrapper, WrappingFactory
+from twisted.protocols.tls import BufferingTLSTransport, TLSMemoryBIOFactory
+from twisted.python.threadpool import ThreadPool
+
+from localstack import config
+from localstack.config import HostAndPort
+from localstack.runtime.shutdown import ON_AFTER_SERVICE_SHUTDOWN_HANDLERS
+from localstack.utils.patch import patch
+from localstack.utils.ssl import create_ssl_cert, install_predefined_cert_if_available
+from localstack.utils.threads import start_worker_thread
+
+LOG = logging.getLogger(__name__)
+
+
+class TLSMultiplexer(ProtocolWrapper):
+ """
+ Custom protocol to multiplex HTTPS and HTTP connections over the same port. This is the equivalent of
+ ``DuplexSocket``, but since twisted use its own SSL layer and doesn't use `ssl.SSLSocket``, we need to implement
+ the multiplexing behavior in the Twisted layer.
+
+ The basic idea is to defer the ``makeConnection`` call until the first data are received, and then re-configure
+ the underlying ``wrappedProtocol`` if needed with a TLS wrapper.
+ """
+
+ tlsProtocol = BufferingTLSTransport
+
+ def __init__(
+ self,
+ factory: "WrappingFactory",
+ wrappedProtocol: interfaces.IProtocol,
+ ):
+ super().__init__(factory, wrappedProtocol)
+ self._isInitialized = False
+ self._isTLS = None
+ self._negotiatedProtocol = None
+
+ def makeConnection(self, transport):
+ self.connected = 1
+ self.transport = transport
+ self.factory.registerProtocol(self) # this is idempotent
+ # we defer the actual makeConnection call to the first invocation of dataReceived
+
+ def dataReceived(self, data: bytes) -> None:
+ if self._isInitialized:
+ super().dataReceived(data)
+ return
+
+ # once the first data have been received, we can check whether it's a TLS handshake, then we need to run the
+ # actual makeConnection procedure.
+ self._isInitialized = True
+ self._isTLS = data[0] == 22 # 0x16 is the marker byte identifying a TLS handshake
+
+ if self._isTLS:
+ # wrap protocol again in tls protocol
+ self.wrappedProtocol = self.tlsProtocol(self.factory, self.wrappedProtocol)
+ else:
+ if data.startswith(b"PRI * HTTP/2"):
+ # TODO: can we do proper protocol negotiation like in ALPN?
+ # in the TLS case, this is determined by the ALPN procedure by OpenSSL.
+ self._negotiatedProtocol = b"h2"
+
+ # now that we've set the real wrapped protocol, run the make connection procedure
+ super().makeConnection(self.transport)
+ super().dataReceived(data)
+
+ @property
+ def negotiatedProtocol(self) -> str | None:
+ if self._negotiatedProtocol:
+ return self._negotiatedProtocol
+ return self.wrappedProtocol.negotiatedProtocol
+
+
+class TLSMultiplexerFactory(TLSMemoryBIOFactory):
+ protocol = TLSMultiplexer
+
+
+def stop_thread_pool(self: ThreadPool, stop, timeout: float = None):
+ """
+ Patch for a custom shutdown procedure for a ThreadPool that waits a given amount of time for all threads.
+
+ :param self: the pool to shut down
+ :param stop: the original function
+ :param timeout: the maximum amount of time to wait
+ """
+ # copied from ThreadPool.stop()
+ if self.joined:
+ return
+ if not timeout:
+ stop()
+ return
+
+ self.joined = True
+ self.started = False
+ self._team.quit()
+
+ # our own joining logic with timeout
+ remaining = timeout
+ total_waited = 0
+
+ for thread in self.threads:
+ then = time.time()
+
+ # LOG.info("[shutdown] Joining thread %s", thread)
+ thread.join(remaining)
+
+ waited = time.time() - then
+ total_waited += waited
+ remaining -= waited
+
+ if thread.is_alive():
+ LOG.warning(
+ "[shutdown] Request thread %s still alive after %.2f seconds",
+ thread,
+ total_waited,
+ )
+
+ if remaining <= 0:
+ remaining = 0
+
+
+def serve_gateway(
+ gateway: Gateway, listen: List[HostAndPort], use_ssl: bool, asynchronous: bool = False
+):
+ """
+ Serve a Gateway instance using twisted.
+ """
+ # setup reactor
+ reactor.suggestThreadPoolSize(config.GATEWAY_WORKER_COUNT)
+ thread_pool = reactor.getThreadPool()
+ patch(thread_pool.stop)(stop_thread_pool)
+
+ def _shutdown_reactor():
+ LOG.debug("[shutdown] Shutting down twisted reactor serving the gateway")
+ thread_pool.stop(timeout=10)
+ reactor.stop()
+
+ ON_AFTER_SERVICE_SHUTDOWN_HANDLERS.register(_shutdown_reactor)
+
+ # setup twisted webserver Site
+ site = TwistedGateway(gateway)
+
+ # configure ssl
+ if use_ssl:
+ install_predefined_cert_if_available()
+ serial_number = listen[0].port
+ _, cert_file_name, key_file_name = create_ssl_cert(serial_number=serial_number)
+ context_factory = ssl.DefaultOpenSSLContextFactory(key_file_name, cert_file_name)
+ context_factory.getContext().use_certificate_chain_file(cert_file_name)
+ protocol_factory = TLSMultiplexerFactory(context_factory, False, site)
+ else:
+ protocol_factory = site
+
+ # add endpoint for each host/port combination
+ for host_and_port in listen:
+ # TODO: interface = host?
+ endpoint = endpoints.TCP4ServerEndpoint(reactor, host_and_port.port)
+ endpoint.listen(protocol_factory)
+
+ if asynchronous:
+ return start_worker_thread(reactor.run)
+ else:
+ return reactor.run()
diff --git a/localstack-core/localstack/aws/serving/werkzeug.py b/localstack-core/localstack/aws/serving/werkzeug.py
new file mode 100644
index 0000000000000..22e351adc4842
--- /dev/null
+++ b/localstack-core/localstack/aws/serving/werkzeug.py
@@ -0,0 +1,58 @@
+import ssl
+from typing import TYPE_CHECKING, Any, Optional, Tuple
+
+from rolo.gateway import Gateway
+from rolo.gateway.wsgi import WsgiGateway
+from werkzeug import run_simple
+from werkzeug.serving import WSGIRequestHandler
+
+if TYPE_CHECKING:
+ from _typeshed.wsgi import WSGIEnvironment
+
+from localstack import constants
+
+
+def serve(
+ gateway: Gateway,
+ host: str = "localhost",
+ port: int = constants.DEFAULT_PORT_EDGE,
+ use_reloader: bool = True,
+ ssl_creds: Optional[Tuple[Any, Any]] = None,
+ **kwargs,
+) -> None:
+ """
+ Serve a Gateway as a WSGI application through werkzeug. This is mostly for development purposes.
+
+ :param gateway: the Gateway to serve
+ :param host: the host to expose the server to
+ :param port: the port to expose the server to
+ :param use_reloader: whether to autoreload the server on changes
+ :param kwargs: any other arguments that can be passed to `werkzeug.run_simple`
+ """
+ kwargs["threaded"] = kwargs.get("threaded", True) # make sure requests don't block
+ kwargs["ssl_context"] = ssl_creds
+ kwargs.setdefault("request_handler", CustomWSGIRequestHandler)
+ run_simple(host, port, WsgiGateway(gateway), use_reloader=use_reloader, **kwargs)
+
+
+class CustomWSGIRequestHandler(WSGIRequestHandler):
+ def make_environ(self) -> "WSGIEnvironment":
+ environ = super().make_environ()
+
+ # restore RAW_URI from the requestline will be something like ``GET //foo/?foo=bar%20ed HTTP/1.1``
+ environ["RAW_URI"] = " ".join(self.requestline.split(" ")[1:-1])
+
+ # restore raw headers for rolo
+ environ["asgi.headers"] = [
+ (k.encode("latin-1"), v.encode("latin-1")) for k, v in self.headers.raw_items()
+ ]
+
+ # the default WSGIRequestHandler does not understand our DuplexSocket, so it will always set https, which we
+ # correct here
+ try:
+ is_ssl = isinstance(self.request, ssl.SSLSocket)
+ except AttributeError:
+ is_ssl = False
+ environ["wsgi.url_scheme"] = "https" if is_ssl else "http"
+
+ return environ
diff --git a/localstack-core/localstack/aws/serving/wsgi.py b/localstack-core/localstack/aws/serving/wsgi.py
new file mode 100644
index 0000000000000..8ae26b3d8c9df
--- /dev/null
+++ b/localstack-core/localstack/aws/serving/wsgi.py
@@ -0,0 +1,5 @@
+from rolo.gateway.wsgi import WsgiGateway
+
+__all__ = [
+ "WsgiGateway",
+]
diff --git a/localstack-core/localstack/aws/skeleton.py b/localstack-core/localstack/aws/skeleton.py
new file mode 100644
index 0000000000000..9d66fa4b375c1
--- /dev/null
+++ b/localstack-core/localstack/aws/skeleton.py
@@ -0,0 +1,228 @@
+import inspect
+import logging
+from typing import Any, Callable, Dict, NamedTuple, Optional, Union
+
+from botocore import xform_name
+from botocore.model import ServiceModel
+
+from localstack.aws.api import (
+ CommonServiceException,
+ RequestContext,
+ ServiceException,
+)
+from localstack.aws.api.core import ServiceRequest, ServiceRequestHandler, ServiceResponse
+from localstack.aws.protocol.parser import create_parser
+from localstack.aws.protocol.serializer import ResponseSerializer, create_serializer
+from localstack.aws.spec import load_service
+from localstack.http import Response
+from localstack.utils import analytics
+from localstack.utils.coverage_docs import get_coverage_link_for_service
+
+LOG = logging.getLogger(__name__)
+
+DispatchTable = Dict[str, ServiceRequestHandler]
+
+
+def create_skeleton(service: Union[str, ServiceModel], delegate: Any):
+ if isinstance(service, str):
+ service = load_service(service)
+
+ return Skeleton(service, create_dispatch_table(delegate))
+
+
+class HandlerAttributes(NamedTuple):
+ """
+ Holder object of the attributes added to a function by the @handler decorator.
+ """
+
+ function_name: str
+ operation: str
+ pass_context: bool
+ expand_parameters: bool
+
+
+def create_dispatch_table(delegate: object) -> DispatchTable:
+ """
+ Creates a dispatch table for a given object. First, the entire class tree of the object is scanned to find any
+ functions that are decorated with @handler. It then resolves those functions on the delegate.
+ """
+ # scan class tree for @handler wrapped functions (reverse class tree so that inherited functions overwrite parent
+ # functions)
+ cls_tree = inspect.getmro(delegate.__class__)
+ handlers: Dict[str, HandlerAttributes] = {}
+ cls_tree = reversed(list(cls_tree))
+ for cls in cls_tree:
+ if cls == object:
+ continue
+
+ for name, fn in inspect.getmembers(cls, inspect.isfunction):
+ try:
+ # attributes come from operation_marker in @handler wrapper
+ handlers[fn.operation] = HandlerAttributes(
+ fn.__name__, fn.operation, fn.pass_context, fn.expand_parameters
+ )
+ except AttributeError:
+ pass
+
+ # create dispatch table from operation handlers by resolving bound functions on the delegate
+ dispatch_table: DispatchTable = {}
+ for handler in handlers.values():
+ # resolve the bound function of the delegate
+ bound_function = getattr(delegate, handler.function_name)
+ # create a dispatcher
+ dispatch_table[handler.operation] = ServiceRequestDispatcher(
+ bound_function,
+ operation=handler.operation,
+ pass_context=handler.pass_context,
+ expand_parameters=handler.expand_parameters,
+ )
+
+ return dispatch_table
+
+
+class ServiceRequestDispatcher:
+ fn: Callable
+ operation: str
+ expand_parameters: bool = True
+ pass_context: bool = True
+
+ def __init__(
+ self,
+ fn: Callable,
+ operation: str,
+ pass_context: bool = True,
+ expand_parameters: bool = True,
+ ):
+ self.fn = fn
+ self.operation = operation
+ self.pass_context = pass_context
+ self.expand_parameters = expand_parameters
+
+ def __call__(
+ self, context: RequestContext, request: ServiceRequest
+ ) -> Optional[ServiceResponse]:
+ args = []
+ kwargs = {}
+
+ if not self.expand_parameters:
+ if self.pass_context:
+ args.append(context)
+ args.append(request)
+ else:
+ if request is None:
+ kwargs = {}
+ else:
+ kwargs = {xform_name(k): v for k, v in request.items()}
+ kwargs["context"] = context
+
+ return self.fn(*args, **kwargs)
+
+
+class Skeleton:
+ service: ServiceModel
+ dispatch_table: DispatchTable
+
+ def __init__(self, service: ServiceModel, implementation: Union[Any, DispatchTable]):
+ self.service = service
+
+ if isinstance(implementation, dict):
+ self.dispatch_table = implementation
+ else:
+ self.dispatch_table = create_dispatch_table(implementation)
+
+ def invoke(self, context: RequestContext) -> Response:
+ serializer = create_serializer(context.service)
+
+ if context.operation and context.service_request:
+ # if the parsed request is already set in the context, re-use them
+ operation, instance = context.operation, context.service_request
+ else:
+ # otherwise, parse the incoming HTTPRequest
+ operation, instance = create_parser(context.service).parse(context.request)
+ context.operation = operation
+
+ try:
+ # Find the operation's handler in the dispatch table
+ if operation.name not in self.dispatch_table:
+ LOG.warning(
+ "missing entry in dispatch table for %s.%s",
+ self.service.service_name,
+ operation.name,
+ )
+ raise NotImplementedError
+
+ return self.dispatch_request(serializer, context, instance)
+ except ServiceException as e:
+ return self.on_service_exception(serializer, context, e)
+ except NotImplementedError as e:
+ return self.on_not_implemented_error(serializer, context, e)
+
+ def dispatch_request(
+ self, serializer: ResponseSerializer, context: RequestContext, instance: ServiceRequest
+ ) -> Response:
+ operation = context.operation
+
+ handler = self.dispatch_table[operation.name]
+
+ # Call the appropriate handler
+ result = handler(context, instance) or {}
+
+ # if the service handler returned an HTTP request, forego serialization and return immediately
+ if isinstance(result, Response):
+ return result
+
+ context.service_response = result
+
+ # Serialize result dict to a Response and return it
+ return serializer.serialize_to_response(
+ result, operation, context.request.headers, context.request_id
+ )
+
+ def on_service_exception(
+ self, serializer: ResponseSerializer, context: RequestContext, exception: ServiceException
+ ) -> Response:
+ """
+ Called by invoke if the handler of the operation raised a ServiceException.
+
+ :param serializer: serializer which should be used to serialize the exception
+ :param context: the request context
+ :param exception: the exception that was raised
+ :return: a Response object
+ """
+ context.service_exception = exception
+
+ return serializer.serialize_error_to_response(
+ exception, context.operation, context.request.headers, context.request_id
+ )
+
+ def on_not_implemented_error(
+ self,
+ serializer: ResponseSerializer,
+ context: RequestContext,
+ exception: NotImplementedError,
+ ) -> Response:
+ """
+ Called by invoke if either the dispatch table did not contain an entry for the operation, or the service
+ provider raised a NotImplementedError
+ :param serializer: the serialzier which should be used to serialize the NotImplementedError
+ :param context: the request context
+ :param exception: the NotImplementedError that was raised
+ :return: a Response object
+ """
+ operation = context.operation
+
+ action_name = operation.name
+ service_name = operation.service_model.service_name
+ exception_message: str | None = exception.args[0] if exception.args else None
+ message = exception_message or get_coverage_link_for_service(service_name, action_name)
+ LOG.info(message)
+ error = CommonServiceException("InternalFailure", message, status_code=501)
+ # record event
+ analytics.log.event(
+ "services_notimplemented", payload={"s": service_name, "a": action_name}
+ )
+ context.service_exception = error
+
+ return serializer.serialize_error_to_response(
+ error, operation, context.request.headers, context.request_id
+ )
diff --git a/localstack-core/localstack/aws/spec-patches.json b/localstack-core/localstack/aws/spec-patches.json
new file mode 100644
index 0000000000000..37cc8a5c27001
--- /dev/null
+++ b/localstack-core/localstack/aws/spec-patches.json
@@ -0,0 +1,1356 @@
+{
+ "s3/2006-03-01/service-2": [
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchBucket/members/BucketName",
+ "value": {
+ "shape": "BucketName"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchBucket/error",
+ "value": {
+ "httpStatusCode": 404
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchLifecycleConfiguration",
+ "value": {
+ "type": "structure",
+ "members": {
+ "BucketName": {
+ "shape": "BucketName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 404
+ },
+ "documentation": "The lifecycle configuration does not exist
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidBucketName",
+ "value": {
+ "type": "structure",
+ "members": {
+ "BucketName": {
+ "shape": "BucketName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 400
+ },
+ "documentation": "The specified bucket is not valid.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/BucketRegion",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/BucketContentType",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/HeadBucketOutput",
+ "value": {
+ "type": "structure",
+ "members": {
+ "BucketRegion": {
+ "shape": "BucketRegion",
+ "location": "header",
+ "locationName": "x-amz-bucket-region"
+ },
+ "BucketContentType": {
+ "shape": "BucketContentType",
+ "location": "header",
+ "locationName": "content-type"
+ }
+ }
+ }
+ },
+ {
+ "op": "add",
+ "path": "/operations/HeadBucket/output",
+ "value": {
+ "shape": "HeadBucketOutput"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/operations/PutBucketPolicy/http/responseCode",
+ "value": 204
+ },
+ {
+ "op": "add",
+ "path": "/shapes/GetBucketLocationOutput/payload",
+ "value": "LocationConstraint"
+ },
+ {
+ "op": "add",
+ "path": "/shapes/BucketAlreadyOwnedByYou/members/BucketName",
+ "value": {
+ "shape": "BucketName"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/BucketAlreadyOwnedByYou/error",
+ "value": {
+ "httpStatusCode": 409
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/GetObjectOutput/members/StatusCode",
+ "value": {
+ "shape": "GetObjectResponseStatusCode",
+ "location": "statusCode"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/HeadObjectOutput/members/StatusCode",
+ "value": {
+ "shape": "GetObjectResponseStatusCode",
+ "location": "statusCode"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchKey/members/Key",
+ "value": {
+ "shape": "ObjectKey"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchKey/error",
+ "value": {
+ "httpStatusCode": 404
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchKey/members/DeleteMarker",
+ "value": {
+ "shape": "DeleteMarker",
+ "location": "header",
+ "locationName": "x-amz-delete-marker"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchKey/members/VersionId",
+ "value": {
+ "shape": "ObjectVersionId",
+ "location": "header",
+ "locationName": "x-amz-version-id"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchVersion",
+ "value": {
+ "type": "structure",
+ "members": {
+ "VersionId": {
+ "shape": "ObjectVersionId"
+ },
+ "Key": {
+ "shape": "ObjectKey"
+ }
+ },
+ "error": {
+ "httpStatusCode": 404
+ },
+ "documentation": "
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/PreconditionFailed",
+ "value": {
+ "type": "structure",
+ "members": {
+ "Condition": {
+ "shape": "IfCondition"
+ }
+ },
+ "error": {
+ "httpStatusCode": 412
+ },
+ "documentation": "At least one of the pre-conditions you specified did not hold
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/IfCondition",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidRange",
+ "value": {
+ "type": "structure",
+ "members": {
+ "ActualObjectSize": {
+ "shape": "ObjectSize"
+ },
+ "RangeRequested": {
+ "shape": "ContentRange"
+ }
+ },
+ "error": {
+ "httpStatusCode": 416
+ },
+ "documentation": "The requested range is not satisfiable
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/HeadObjectOutput/members/Expires",
+ "value": {
+ "shape": "Expires",
+ "documentation": "The date and time at which the object is no longer cacheable.
",
+ "location": "header",
+ "locationName": "expires"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/GetObjectOutput/members/Expires",
+ "value": {
+ "shape": "Expires",
+ "documentation": "The date and time at which the object is no longer cacheable.
",
+ "location": "header",
+ "locationName": "expires"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/RestoreObjectOutputStatusCode",
+ "value": {
+ "type": "integer"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/RestoreObjectOutput/members/StatusCode",
+ "value": {
+ "shape": "RestoreObjectOutputStatusCode",
+ "location": "statusCode"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidArgument",
+ "value": {
+ "type": "structure",
+ "members": {
+ "ArgumentName": {
+ "shape": "ArgumentName"
+ },
+ "ArgumentValue": {
+ "shape": "ArgumentValue"
+ },
+ "HostId": {
+ "shape": "HostId"
+ }
+ },
+ "error": {
+ "httpStatusCode": 400
+ },
+ "documentation": "Invalid Argument
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ArgumentName",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ArgumentValue",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/SignatureDoesNotMatch",
+ "value": {
+ "type": "structure",
+ "members": {
+ "AWSAccessKeyId": {
+ "shape": "AWSAccessKeyId"
+ },
+ "CanonicalRequest": {
+ "shape": "CanonicalRequest"
+ },
+ "CanonicalRequestBytes": {
+ "shape": "CanonicalRequestBytes"
+ },
+ "HostId": {
+ "shape": "HostId"
+ },
+ "SignatureProvided": {
+ "shape": "SignatureProvided"
+ },
+ "StringToSign": {
+ "shape": "StringToSign"
+ },
+ "StringToSignBytes": {
+ "shape": "StringToSignBytes"
+ }
+ },
+ "error": {
+ "httpStatusCode": 403
+ },
+ "documentation": "The request signature we calculated does not match the signature you provided. Check your key and signing method.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/AccessDenied",
+ "value": {
+ "type": "structure",
+ "members": {
+ "Expires": {
+ "shape": "Expires"
+ },
+ "ServerTime": {
+ "shape": "ServerTime"
+ },
+ "X_Amz_Expires": {
+ "shape": "X-Amz-Expires",
+ "locationName":"X-Amz-Expires"
+ },
+ "HostId": {
+ "shape": "HostId"
+ },
+ "HeadersNotSigned": {
+ "shape": "HeadersNotSigned"
+ }
+ },
+ "error": {
+ "httpStatusCode": 403
+ },
+ "documentation": "Request has expired
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/AWSAccessKeyId",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/HostId",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/HeadersNotSigned",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/SignatureProvided",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/StringToSign",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/StringToSignBytes",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/CanonicalRequest",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/CanonicalRequestBytes",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ServerTime",
+ "value": {
+ "type": "timestamp"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/X-Amz-Expires",
+ "value": {
+ "type": "integer"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/AuthorizationQueryParametersError",
+ "value": {
+ "type": "structure",
+ "members": {
+ "HostId": {
+ "shape": "HostId"
+ }
+ },
+ "documentation": "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/operations/PostObject",
+ "value": {
+ "name":"PostObject",
+ "http":{
+ "method":"POST",
+ "requestUri":"/{Bucket}"
+ },
+ "input":{"shape":"PostObjectRequest"},
+ "output":{"shape":"PostResponse"},
+ "documentationUrl":"http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html",
+ "documentation":"The POST operation adds an object to a specified bucket by using HTML forms. POST is an alternate form of PUT that enables browser-based uploads as a way of putting objects in buckets. Parameters that are passed to PUT through HTTP Headers are instead passed as form fields to POST in the multipart/form-data encoded message body. To add an object to a bucket, you must have WRITE access on the bucket. Amazon S3 never stores partial objects. If you receive a successful response, you can be confident that the entire object was stored.
"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/PostObjectRequest",
+ "value": {
+ "type":"structure",
+ "required":[
+ "Bucket"
+ ],
+ "members":{
+ "Body":{
+ "shape":"Body",
+ "documentation":"
Object data.
",
+ "streaming":true
+ },
+ "Bucket":{
+ "shape":"BucketName",
+ "documentation":"The bucket name to which the PUT action was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName -AccountId .s3-accesspoint.Region .amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide .
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName -AccountId .outpostID .s3-outposts.Region .amazonaws.com. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide .
",
+ "location":"uri",
+ "locationName":"Bucket"
+ }
+ },
+ "payload":"Body"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/PostResponse",
+ "value": {
+ "type":"structure",
+ "members":{
+ "StatusCode": {
+ "shape": "GetObjectResponseStatusCode",
+ "location": "statusCode"
+ },
+ "Location":{
+ "shape":"Location",
+ "documentation":"The URI that identifies the newly created object.
"
+ },
+ "LocationHeader":{
+ "shape":"Location",
+ "documentation":"The URI that identifies the newly created object.
",
+ "location": "header",
+ "locationName": "Location"
+ },
+ "Bucket":{
+ "shape":"BucketName",
+ "documentation":"The name of the bucket that contains the newly created object. Does not return the access point ARN or access point alias if used.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName -AccountId .s3-accesspoint.Region .amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide .
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName -AccountId .outpostID .s3-outposts.Region .amazonaws.com. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide .
"
+ },
+ "Key":{
+ "shape":"ObjectKey",
+ "documentation":"The object key of the newly created object.
"
+ },
+ "Expiration": {
+ "shape": "Expiration",
+ "documentation": "If the expiration is configured for the object (see PutBucketLifecycleConfiguration ), the response includes this header. It includes the expiry-date and rule-id key-value pairs that provide information about object expiration. The value of the rule-id is URL-encoded.
",
+ "location": "header",
+ "locationName": "x-amz-expiration"
+ },
+ "ETag":{
+ "shape":"ETag",
+ "documentation":"Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits. For more information about how the entity tag is calculated, see Checking object integrity in the Amazon S3 User Guide .
"
+ },
+ "ETagHeader":{
+ "shape":"ETag",
+ "documentation":"Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits. For more information about how the entity tag is calculated, see Checking object integrity in the Amazon S3 User Guide .
",
+ "location": "header",
+ "locationName": "ETag"
+ },
+ "ChecksumCRC32": {
+ "shape": "ChecksumCRC32",
+ "documentation": "The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide .
",
+ "location": "header",
+ "locationName": "x-amz-checksum-crc32"
+ },
+ "ChecksumCRC32C": {
+ "shape": "ChecksumCRC32C",
+ "documentation": "The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide .
",
+ "location": "header",
+ "locationName": "x-amz-checksum-crc32c"
+ },
+ "ChecksumCRC64NVME":{
+ "shape":"ChecksumCRC64NVME",
+ "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum is always a full object checksum. For more information, see Checking object integrity in the Amazon S3 User Guide .
",
+ "location":"header",
+ "locationName":"x-amz-checksum-crc64nvme"
+ },
+ "ChecksumSHA1": {
+ "shape": "ChecksumSHA1",
+ "documentation": "The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide .
",
+ "location": "header",
+ "locationName": "x-amz-checksum-sha1"
+ },
+ "ChecksumSHA256": {
+ "shape": "ChecksumSHA256",
+ "documentation": "The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide .
",
+ "location": "header",
+ "locationName": "x-amz-checksum-sha256"
+ },
+ "ChecksumType":{
+ "shape":"ChecksumType",
+ "documentation":"This header specifies the checksum type of the object, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. You can use this header as a data integrity check to verify that the checksum type that is received is the same checksum that was specified. If the checksum type doesnβt match the checksum type that was specified for the object during the CreateMultipartUpload request, itβll result in a BadDigest error. For more information, see Checking object integrity in the Amazon S3 User Guide.
",
+ "location":"header",
+ "locationName":"x-amz-checksum-type"
+ },
+ "ServerSideEncryption": {
+ "shape": "ServerSideEncryption",
+ "documentation": "If you specified server-side encryption either with an Amazon Web Services KMS key or Amazon S3-managed encryption key in your PUT request, the response includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the object.
",
+ "location": "header",
+ "locationName": "x-amz-server-side-encryption"
+ },
+ "VersionId": {
+ "shape": "ObjectVersionId",
+ "documentation": "Version of the object.
",
+ "location": "header",
+ "locationName": "x-amz-version-id"
+ },
+ "SSECustomerAlgorithm": {
+ "shape": "SSECustomerAlgorithm",
+ "documentation": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.
",
+ "location": "header",
+ "locationName": "x-amz-server-side-encryption-customer-algorithm"
+ },
+ "SSECustomerKeyMD5": {
+ "shape": "SSECustomerKeyMD5",
+ "documentation": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.
",
+ "location": "header",
+ "locationName": "x-amz-server-side-encryption-customer-key-MD5"
+ },
+ "SSEKMSKeyId": {
+ "shape": "SSEKMSKeyId",
+ "documentation": "If x-amz-server-side-encryption is present and has the value of aws:kms, this header specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed key that was used for the object.
",
+ "location": "header",
+ "locationName": "x-amz-server-side-encryption-aws-kms-key-id"
+ },
+ "SSEKMSEncryptionContext": {
+ "shape": "SSEKMSEncryptionContext",
+ "documentation": "If present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.
",
+ "location": "header",
+ "locationName": "x-amz-server-side-encryption-context"
+ },
+ "BucketKeyEnabled": {
+ "shape": "BucketKeyEnabled",
+ "documentation": "Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).
",
+ "location": "header",
+ "locationName": "x-amz-server-side-encryption-bucket-key-enabled"
+ },
+ "RequestCharged": {
+ "shape": "RequestCharged",
+ "location": "header",
+ "locationName": "x-amz-request-charged"
+ }
+ }
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchWebsiteConfiguration",
+ "value": {
+ "type": "structure",
+ "members": {
+ "BucketName": {
+ "shape": "BucketName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 404
+ },
+ "documentation": "The specified bucket does not have a website configuration
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchUpload/members/UploadId",
+ "value": {
+ "shape": "MultipartUploadId"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchUpload/error",
+ "value": {
+ "httpStatusCode": 404
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ReplicationConfigurationNotFoundError",
+ "value": {
+ "type": "structure",
+ "members": {
+ "BucketName": {
+ "shape": "BucketName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 404
+ },
+ "documentation": "The replication configuration was not found.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/BucketCannedACL/enum/4",
+ "value": "log-delivery-write",
+ "documentation": "Not included in the specs, but valid value according to the docs: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl
"
+ },
+ {
+ "op": "add",
+ "path": "/shapes/BadRequest",
+ "value": {
+ "type": "structure",
+ "members": {
+ "HostId": {
+ "shape": "HostId"
+ }
+ },
+ "documentation": "Insufficient information. Origin request header needed.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/AccessForbidden",
+ "value": {
+ "type": "structure",
+ "members": {
+ "HostId": {
+ "shape": "HostId"
+ },
+ "Method": {
+ "shape": "HttpMethod"
+ },
+ "ResourceType": {
+ "shape": "ResourceType"
+ }
+ },
+ "error": {
+ "httpStatusCode": 403
+ },
+ "documentation": "CORSResponse
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/HttpMethod",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ResourceType",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchCORSConfiguration",
+ "value": {
+ "type": "structure",
+ "members": {
+ "BucketName": {
+ "shape": "BucketName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 404
+ },
+ "documentation": "The CORS configuration does not exist
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/MissingSecurityHeader",
+ "value": {
+ "type": "structure",
+ "members": {
+ "MissingHeaderName": {
+ "shape": "MissingHeaderName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 400
+ },
+ "documentation": "Your request was missing a required header
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/MissingHeaderName",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidPartOrder",
+ "value": {
+ "type": "structure",
+ "members": {
+ "UploadId": {
+ "shape": "MultipartUploadId"
+ }
+ },
+ "error": {
+ "httpStatusCode": 400
+ },
+ "documentation": "The list of parts was not in ascending order. Parts must be ordered by part number.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidStorageClass",
+ "value": {
+ "type": "structure",
+ "members": {
+ "StorageClassRequested": {
+ "shape": "StorageClass"
+ }
+ },
+ "error": {
+ "httpStatusCode": 400
+ },
+ "documentation": "The storage class you specified is not valid
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ListObjectsOutput/members/BucketRegion",
+ "value": {
+ "shape": "BucketRegion",
+ "location": "header",
+ "locationName": "x-amz-bucket-region"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ListObjectsV2Output/members/BucketRegion",
+ "value": {
+ "shape": "BucketRegion",
+ "location": "header",
+ "locationName": "x-amz-bucket-region"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ResourceType",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/MethodNotAllowed",
+ "value": {
+ "type": "structure",
+ "members": {
+ "Method": {
+ "shape": "HttpMethod"
+ },
+ "ResourceType": {
+ "shape": "ResourceType"
+ },
+ "DeleteMarker": {
+ "shape": "DeleteMarker",
+ "location": "header",
+ "locationName": "x-amz-delete-marker"
+ },
+ "VersionId": {
+ "shape": "ObjectVersionId",
+ "location": "header",
+ "locationName": "x-amz-version-id"
+ },
+ "Allow": {
+ "shape": "HttpMethod",
+ "location": "header",
+ "locationName": "allow"
+ }
+ },
+ "error": {
+ "httpStatusCode": 405
+ },
+ "documentation": "The specified method is not allowed against this resource.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "remove",
+ "path": "/shapes/ListBucketsOutput/members/Buckets"
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ListBucketsOutput/members/Buckets",
+ "value": {
+ "shape":"Buckets",
+ "documentation":"The list of buckets owned by the requester.
"
+ }
+ },
+ {
+ "op": "remove",
+ "path": "/shapes/ListObjectsOutput/members/Contents"
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ListObjectsOutput/members/Contents",
+ "value": {
+ "shape":"ObjectList",
+ "documentation":"Metadata about each object returned.
"
+ }
+ },
+ {
+ "op": "remove",
+ "path": "/shapes/ListObjectsV2Output/members/Contents"
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ListObjectsV2Output/members/Contents",
+ "value": {
+ "shape":"ObjectList",
+ "documentation":"Metadata about each object returned.
"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/CrossLocationLoggingProhibitted",
+ "value": {
+ "type": "structure",
+ "members": {
+ "TargetBucketLocation": {
+ "shape": "BucketRegion"
+ },
+ "SourceBucketLocation": {
+ "shape": "BucketRegion"
+ }
+ },
+ "error": {
+ "httpStatusCode": 403
+ },
+ "documentation": "Cross S3 location logging not allowed.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidTargetBucketForLogging",
+ "value": {
+ "type": "structure",
+ "members": {
+ "TargetBucket": {
+ "shape": "BucketName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 400
+ },
+ "documentation": "The target bucket for logging does not exist
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/operations/PutBucketInventoryConfiguration/http/responseCode",
+ "value": 204
+ },
+ {
+ "op": "add",
+ "path": "/operations/PutBucketAnalyticsConfiguration/http/responseCode",
+ "value": 204
+ },
+ {
+ "op": "add",
+ "path": "/operations/PutBucketIntelligentTieringConfiguration/http/responseCode",
+ "value": 204
+ },
+ {
+ "op": "add",
+ "path": "/shapes/BucketNotEmpty",
+ "value": {
+ "type": "structure",
+ "members": {
+ "BucketName": {
+ "shape": "BucketName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 409
+ },
+ "documentation": "The bucket you tried to delete is not empty
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/MinSizeAllowed",
+ "value": {
+ "type": "long"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ProposedSize",
+ "value": {
+ "type": "long"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/EntityTooSmall",
+ "value": {
+ "type": "structure",
+ "members": {
+ "ETag": {
+ "shape": "ETag"
+ },
+ "MinSizeAllowed": {
+ "shape": "MinSizeAllowed"
+ },
+ "PartNumber": {
+ "shape": "PartNumber"
+ },
+ "ProposedSize": {
+ "shape": "ProposedSize"
+ }
+ },
+ "documentation": "Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidPart",
+ "value": {
+ "type": "structure",
+ "members": {
+ "ETag": {
+ "shape": "ETag"
+ },
+ "UploadId": {
+ "shape": "MultipartUploadId"
+ },
+ "PartNumber": {
+ "shape": "PartNumber"
+ }
+ },
+ "documentation": "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchTagSet",
+ "value": {
+ "type": "structure",
+ "members": {
+ "BucketName": {
+ "shape": "BucketName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 404
+ },
+ "documentation": "There is no tag set associated with the bucket.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/operations/PutBucketTagging/http/responseCode",
+ "value": 204
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidTag",
+ "value": {
+ "type": "structure",
+ "members": {
+ "TagKey": {
+ "shape": "ObjectKey"
+ },
+ "TagValue": {
+ "shape": "Value"
+ }
+ },
+ "documentation": "The tag provided was not a valid tag. This error can occur if the tag did not pass input validation.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ObjectLockConfigurationNotFoundError",
+ "value": {
+ "type": "structure",
+ "members": {
+ "BucketName": {
+ "shape": "BucketName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 404
+ },
+ "documentation": "Object Lock configuration does not exist for this bucket
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidPartNumber",
+ "value": {
+ "type": "structure",
+ "members": {
+ "PartNumberRequested": {
+ "shape": "PartNumber"
+ },
+ "ActualPartCount": {
+ "shape": "PartNumber"
+ }
+ },
+ "error": {
+ "httpStatusCode": 416
+ },
+ "documentation": "The requested partnumber is not satisfiable
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/OwnershipControlsNotFoundError",
+ "value": {
+ "type": "structure",
+ "members": {
+ "BucketName": {
+ "shape": "BucketName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 404
+ },
+ "documentation": "The bucket ownership controls were not found
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchPublicAccessBlockConfiguration",
+ "value": {
+ "type": "structure",
+ "members": {
+ "BucketName": {
+ "shape": "BucketName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 404
+ },
+ "documentation": "The public access block configuration was not found
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchBucketPolicy",
+ "value": {
+ "type": "structure",
+ "members": {
+ "BucketName": {
+ "shape": "BucketName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 404
+ },
+ "documentation": "The bucket policy does not exist
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidObjectState/error",
+ "value": {
+ "httpStatusCode": 403
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidDigest",
+ "value": {
+ "type": "structure",
+ "members": {
+ "Content_MD5": {
+ "shape": "ContentMD5",
+ "locationName":"Content-MD5"
+ }
+ },
+ "documentation": "The Content-MD5 you specified was invalid.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/KeyLength",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/KeyTooLongError",
+ "value": {
+ "type": "structure",
+ "members": {
+ "MaxSizeAllowed": {
+ "shape": "KeyLength"
+ },
+ "Size": {
+ "shape": "KeyLength"
+ }
+ },
+ "documentation": "Your key is too long
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidLocationConstraint",
+ "value": {
+ "type": "structure",
+ "members": {
+ "LocationConstraint": {
+ "shape": "BucketRegion"
+ }
+ },
+ "documentation": "The specified location-constraint is not valid
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/EntityTooLarge",
+ "value": {
+ "type": "structure",
+ "members": {
+ "MaxSizeAllowed": {
+ "shape": "KeyLength"
+ },
+ "HostId": {
+ "shape": "HostId"
+ },
+ "ProposedSize": {
+ "shape": "ProposedSize"
+ }
+ },
+ "documentation": "Your proposed upload exceeds the maximum allowed size
",
+ "exception": true
+ }
+ },
+ {
+ "op": "remove",
+ "path": "/shapes/ListObjectVersionsOutput/members/Versions"
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ListObjectVersionsOutput/members/Versions",
+ "value": {
+ "shape":"ObjectVersionList",
+ "documentation":"Container for version information.
",
+ "locationName":"Version"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidEncryptionAlgorithmError",
+ "value": {
+ "type": "structure",
+ "members": {
+ "ArgumentName": {
+ "shape": "ArgumentName"
+ },
+ "ArgumentValue": {
+ "shape": "ArgumentValue"
+ }
+ },
+ "error": {
+ "httpStatusCode": 400
+ },
+ "documentation": "The Encryption request you specified is not valid.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/Header",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/additionalMessage",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NotImplemented",
+ "value": {
+ "type": "structure",
+ "members": {
+ "Header": {
+ "shape": "Header"
+ },
+ "additionalMessage": {
+ "shape": "additionalMessage"
+ }
+ },
+ "error": {
+ "httpStatusCode": 501
+ },
+ "documentation": "A header you provided implies functionality that is not implemented.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/ConditionalRequestConflict",
+ "value": {
+ "type": "structure",
+ "members": {
+ "Condition": {
+ "shape": "IfCondition"
+ },
+ "Key": {
+ "shape": "ObjectKey"
+ }
+ },
+ "error": {
+ "httpStatusCode": 409
+ },
+ "documentation": "The conditional request cannot succeed due to a conflicting operation against this resource.
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/BadDigest",
+ "value": {
+ "type": "structure",
+ "members": {
+ "ExpectedDigest": {
+ "shape": "ContentMD5"
+ },
+ "CalculatedDigest": {
+ "shape": "ContentMD5"
+ }
+ },
+ "error": {
+ "httpStatusCode": 400
+ },
+ "documentation": "The Content-MD5 you specified did not match what we received.
",
+ "exception": true
+ }
+ }
+ ],
+ "apigatewayv2/2018-11-29/service-2": [
+ {
+ "op": "add",
+ "path": "/operations/UpdateDeployment/http/responseCode",
+ "value": 201
+ },
+ {
+ "op": "add",
+ "path": "/operations/UpdateApi/http/responseCode",
+ "value": 201
+ },
+ {
+ "op": "add",
+ "path": "/operations/UpdateRoute/http/responseCode",
+ "value": 201
+ },
+ {
+ "op": "add",
+ "path": "/operations/CreateApiMapping/http/responseCode",
+ "value": 200
+ }
+ ]
+}
diff --git a/localstack-core/localstack/aws/spec.py b/localstack-core/localstack/aws/spec.py
new file mode 100644
index 0000000000000..1410ddde3e246
--- /dev/null
+++ b/localstack-core/localstack/aws/spec.py
@@ -0,0 +1,369 @@
+import dataclasses
+import json
+import logging
+import os
+import sys
+from collections import defaultdict
+from functools import cached_property, lru_cache
+from typing import Dict, Generator, List, Literal, NamedTuple, Optional, Tuple
+
+import botocore
+import jsonpatch
+from botocore.exceptions import UnknownServiceError
+from botocore.loaders import Loader, instance_cache
+from botocore.model import OperationModel, ServiceModel
+
+from localstack import config
+from localstack.constants import VERSION
+from localstack.utils.objects import singleton_factory
+
+LOG = logging.getLogger(__name__)
+
+ServiceName = str
+ProtocolName = Literal["query", "json", "rest-json", "rest-xml", "ec2"]
+
+
+class ServiceModelIdentifier(NamedTuple):
+ """
+ Identifies a specific service model.
+ If the protocol is not given, the default protocol of the service with the specific name is assumed.
+ Maybe also add versions here in the future (if we can support multiple different versions for one service).
+ """
+
+ name: ServiceName
+ protocol: Optional[ProtocolName] = None
+
+
+spec_patches_json = os.path.join(os.path.dirname(__file__), "spec-patches.json")
+
+
+def load_spec_patches() -> Dict[str, list]:
+ if not os.path.exists(spec_patches_json):
+ return {}
+ with open(spec_patches_json) as fd:
+ return json.load(fd)
+
+
+# Path for custom specs which are not (anymore) provided by botocore
+LOCALSTACK_BUILTIN_DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
+
+
+class LocalStackBuiltInDataLoaderMixin(Loader):
+ def __init__(self, *args, **kwargs):
+ # add the builtin data path to the extra_search_paths to ensure they are discovered by the loader
+ super().__init__(*args, extra_search_paths=[LOCALSTACK_BUILTIN_DATA_PATH], **kwargs)
+
+
+class PatchingLoader(Loader):
+ """
+ A custom botocore Loader that applies JSON patches from the given json patch file to the specs as they are loaded.
+ """
+
+ patches: Dict[str, list]
+
+ def __init__(self, patches: Dict[str, list], *args, **kwargs):
+ # add the builtin data path to the extra_search_paths to ensure they are discovered by the loader
+ super().__init__(*args, **kwargs)
+ self.patches = patches
+
+ @instance_cache
+ def load_data(self, name: str):
+ result = super(PatchingLoader, self).load_data(name)
+
+ if patches := self.patches.get(name):
+ return jsonpatch.apply_patch(result, patches)
+
+ return result
+
+
+class CustomLoader(PatchingLoader, LocalStackBuiltInDataLoaderMixin):
+ # Class mixing the different loader features (patching, localstack specific data)
+ pass
+
+
+loader = CustomLoader(load_spec_patches())
+
+
+class UnknownServiceProtocolError(UnknownServiceError):
+ """Raised when trying to load a service with an unknown protocol.
+
+ :ivar service_name: The name of the service.
+ :ivar protocol: The name of the unknown protocol.
+ """
+
+ fmt = "Unknown service protocol: '{service_name}-{protocol}'."
+
+
+def list_services() -> List[ServiceModel]:
+ return [load_service(service) for service in loader.list_available_services("service-2")]
+
+
+def load_service(
+ service: ServiceName, version: Optional[str] = None, protocol: Optional[ProtocolName] = None
+) -> ServiceModel:
+ """
+ Loads a service
+ :param service: to load, f.e. "sqs". For custom, internalized, service protocol specs (f.e. sqs-query) it's also
+ possible to directly define the protocol in the service name (f.e. use sqs-query)
+ :param version: of the service to load, f.e. "2012-11-05", by default the latest version will be used
+ :param protocol: specific protocol to load for the specific service, f.e. "json" for the "sqs" service
+ if the service cannot be found
+ :return: Loaded service model of the service
+ :raises: UnknownServiceError if the service cannot be found
+ :raises: UnknownServiceProtocolError if the specific protocol of the service cannot be found
+ """
+ service_description = loader.load_service_model(service, "service-2", version)
+
+ # check if the protocol is defined, and if so, if the loaded service defines this protocol
+ if protocol is not None and protocol != service_description.get("metadata", {}).get("protocol"):
+ # if the protocol is defined, but not the one of the currently loaded service,
+ # check if we already loaded the custom spec based on the naming convention (-),
+ # f.e. "sqs-query"
+ if service.endswith(f"-{protocol}"):
+ # if so, we raise an exception
+ raise UnknownServiceProtocolError(service_name=service, protocol=protocol)
+ # otherwise we try to load it (recursively)
+ try:
+ return load_service(f"{service}-{protocol}", version, protocol=protocol)
+ except UnknownServiceError:
+ # raise an unknown protocol error in case the service also can't be loaded with the naming convention
+ raise UnknownServiceProtocolError(service_name=service, protocol=protocol)
+
+ # remove potential protocol names from the service name
+ # FIXME add more protocols here if we have to internalize more than just sqs-query
+ # TODO this should not contain specific internalized serivce names
+ service = {"sqs-query": "sqs"}.get(service, service)
+ return ServiceModel(service_description, service)
+
+
+def iterate_service_operations() -> Generator[Tuple[ServiceModel, OperationModel], None, None]:
+ """
+ Returns one record per operation in the AWS service spec, where the first item is the service model the operation
+ belongs to, and the second is the operation model.
+
+ :return: an iterable
+ """
+ for service in list_services():
+ for op_name in service.operation_names:
+ yield service, service.operation_model(op_name)
+
+
+@dataclasses.dataclass
+class ServiceCatalogIndex:
+ """
+ The ServiceCatalogIndex enables fast lookups for common operations to determine a service from service indicators.
+ """
+
+ service_names: List[ServiceName]
+ target_prefix_index: Dict[str, List[ServiceModelIdentifier]]
+ signing_name_index: Dict[str, List[ServiceModelIdentifier]]
+ operations_index: Dict[str, List[ServiceModelIdentifier]]
+ endpoint_prefix_index: Dict[str, List[ServiceModelIdentifier]]
+
+
+class LazyServiceCatalogIndex:
+ """
+ A ServiceCatalogIndex that builds indexes in-memory from the spec.
+ """
+
+ @cached_property
+ def service_names(self) -> List[ServiceName]:
+ return list(self._services.keys())
+
+ @cached_property
+ def target_prefix_index(self) -> Dict[str, List[ServiceModelIdentifier]]:
+ result = defaultdict(list)
+ for service_models in self._services.values():
+ for service_model in service_models:
+ target_prefix = service_model.metadata.get("targetPrefix")
+ if target_prefix:
+ result[target_prefix].append(
+ ServiceModelIdentifier(service_model.service_name, service_model.protocol)
+ )
+ return dict(result)
+
+ @cached_property
+ def signing_name_index(self) -> Dict[str, List[ServiceModelIdentifier]]:
+ result = defaultdict(list)
+ for service_models in self._services.values():
+ for service_model in service_models:
+ result[service_model.signing_name].append(
+ ServiceModelIdentifier(service_model.service_name, service_model.protocol)
+ )
+ return dict(result)
+
+ @cached_property
+ def operations_index(self) -> Dict[str, List[ServiceModelIdentifier]]:
+ result = defaultdict(list)
+ for service_models in self._services.values():
+ for service_model in service_models:
+ operations = service_model.operation_names
+ if operations:
+ for operation in operations:
+ result[operation].append(
+ ServiceModelIdentifier(
+ service_model.service_name, service_model.protocol
+ )
+ )
+ return dict(result)
+
+ @cached_property
+ def endpoint_prefix_index(self) -> Dict[str, List[ServiceModelIdentifier]]:
+ result = defaultdict(list)
+ for service_models in self._services.values():
+ for service_model in service_models:
+ result[service_model.endpoint_prefix].append(
+ ServiceModelIdentifier(service_model.service_name, service_model.protocol)
+ )
+ return dict(result)
+
+ @cached_property
+ def _services(self) -> Dict[ServiceName, List[ServiceModel]]:
+ services = defaultdict(list)
+ for service in list_services():
+ services[service.service_name].append(service)
+ return services
+
+
+class ServiceCatalog:
+ index: ServiceCatalogIndex
+
+ def __init__(self, index: ServiceCatalogIndex = None):
+ self.index = index or LazyServiceCatalogIndex()
+
+ @lru_cache(maxsize=512)
+ def get(
+ self, name: ServiceName, protocol: Optional[ProtocolName] = None
+ ) -> Optional[ServiceModel]:
+ return load_service(name, protocol=protocol)
+
+ @property
+ def service_names(self) -> List[ServiceName]:
+ return self.index.service_names
+
+ @property
+ def target_prefix_index(self) -> Dict[str, List[ServiceModelIdentifier]]:
+ return self.index.target_prefix_index
+
+ @property
+ def signing_name_index(self) -> Dict[str, List[ServiceModelIdentifier]]:
+ return self.index.signing_name_index
+
+ @property
+ def operations_index(self) -> Dict[str, List[ServiceModelIdentifier]]:
+ return self.index.operations_index
+
+ @property
+ def endpoint_prefix_index(self) -> Dict[str, List[ServiceModelIdentifier]]:
+ return self.index.endpoint_prefix_index
+
+ def by_target_prefix(self, target_prefix: str) -> List[ServiceModelIdentifier]:
+ return self.target_prefix_index.get(target_prefix, [])
+
+ def by_signing_name(self, signing_name: str) -> List[ServiceModelIdentifier]:
+ return self.signing_name_index.get(signing_name, [])
+
+ def by_operation(self, operation_name: str) -> List[ServiceModelIdentifier]:
+ return self.operations_index.get(operation_name, [])
+
+
+def build_service_index_cache(file_path: str) -> ServiceCatalogIndex:
+ """
+ Creates a new ServiceCatalogIndex and stores it into the given file_path.
+
+ :param file_path: the path to store the file to
+ :return: the created ServiceCatalogIndex
+ """
+ return save_service_index_cache(LazyServiceCatalogIndex(), file_path)
+
+
+def load_service_index_cache(file: str) -> ServiceCatalogIndex:
+ """
+ Loads from the given file the stored ServiceCatalogIndex.
+
+ :param file: the file to load from
+ :return: the loaded ServiceCatalogIndex
+ """
+ import dill
+
+ with open(file, "rb") as fd:
+ return dill.load(fd)
+
+
+def save_service_index_cache(index: LazyServiceCatalogIndex, file_path: str) -> ServiceCatalogIndex:
+ """
+ Creates from the given LazyServiceCatalogIndex a ``ServiceCatalogIndex`, stores its contents into the given file,
+ and then returns the newly created index.
+
+ :param index: the LazyServiceCatalogIndex to store the index from.
+ :param file_path: the path to store the binary index cache file to
+ :return: the created ServiceCatalogIndex
+ """
+ import dill
+
+ cache = ServiceCatalogIndex(
+ service_names=index.service_names,
+ endpoint_prefix_index=index.endpoint_prefix_index,
+ operations_index=index.operations_index,
+ signing_name_index=index.signing_name_index,
+ target_prefix_index=index.target_prefix_index,
+ )
+ with open(file_path, "wb") as fd:
+ # use dill (instead of plain pickle) to avoid issues when serializing the pickle from __main__
+ dill.dump(cache, fd)
+ return cache
+
+
+def _get_catalog_filename():
+ ls_ver = VERSION.replace(".", "_")
+ botocore_ver = botocore.__version__.replace(".", "_")
+ return f"service-catalog-{ls_ver}-{botocore_ver}.dill"
+
+
+@singleton_factory
+def get_service_catalog() -> ServiceCatalog:
+ """Loads the ServiceCatalog (which contains all the service specs), and potentially re-uses a cached index."""
+
+ try:
+ catalog_file_name = _get_catalog_filename()
+ static_catalog_file = os.path.join(config.dirs.static_libs, catalog_file_name)
+
+ # try to load or load/build/save the service catalog index from the static libs
+ index = None
+ if os.path.exists(static_catalog_file):
+ # load the service catalog from the static libs dir / built at build time
+ LOG.debug("loading service catalog index cache file %s", static_catalog_file)
+ index = load_service_index_cache(static_catalog_file)
+ elif os.path.isdir(config.dirs.cache):
+ cache_catalog_file = os.path.join(config.dirs.cache, catalog_file_name)
+ if os.path.exists(cache_catalog_file):
+ LOG.debug("loading service catalog index cache file %s", cache_catalog_file)
+ index = load_service_index_cache(cache_catalog_file)
+ else:
+ LOG.debug("building service catalog index cache file %s", cache_catalog_file)
+ index = build_service_index_cache(cache_catalog_file)
+ return ServiceCatalog(index)
+ except Exception:
+ LOG.exception(
+ "error while processing service catalog index cache, falling back to lazy-loaded index"
+ )
+ return ServiceCatalog()
+
+
+def main():
+ catalog_file_name = _get_catalog_filename()
+ static_catalog_file = os.path.join(config.dirs.static_libs, catalog_file_name)
+
+ if os.path.exists(static_catalog_file):
+ LOG.error(
+ "service catalog index cache file (%s) already there. aborting!", static_catalog_file
+ )
+ return 1
+
+ # load the service catalog from the static libs dir / built at build time
+ LOG.debug("building service catalog index cache file %s", static_catalog_file)
+ build_service_index_cache(static_catalog_file)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/localstack-core/localstack/cli/__init__.py b/localstack-core/localstack/cli/__init__.py
new file mode 100644
index 0000000000000..fb0407e19e65e
--- /dev/null
+++ b/localstack-core/localstack/cli/__init__.py
@@ -0,0 +1,10 @@
+from .console import console
+from .plugin import LocalstackCli, LocalstackCliPlugin
+
+name = "cli"
+
+__all__ = [
+ "console",
+ "LocalstackCli",
+ "LocalstackCliPlugin",
+]
diff --git a/localstack-core/localstack/cli/console.py b/localstack-core/localstack/cli/console.py
new file mode 100644
index 0000000000000..24bda10813744
--- /dev/null
+++ b/localstack-core/localstack/cli/console.py
@@ -0,0 +1,11 @@
+from rich.console import Console
+
+BANNER = r"""
+ __ _______ __ __
+ / / ____ _________ _/ / ___// /_____ ______/ /__
+ / / / __ \/ ___/ __ `/ /\__ \/ __/ __ `/ ___/ //_/
+ / /___/ /_/ / /__/ /_/ / /___/ / /_/ /_/ / /__/ ,<
+ /_____/\____/\___/\__,_/_//____/\__/\__,_/\___/_/|_|
+"""
+
+console = Console()
diff --git a/localstack-core/localstack/cli/exceptions.py b/localstack-core/localstack/cli/exceptions.py
new file mode 100644
index 0000000000000..cd65d2ee13d26
--- /dev/null
+++ b/localstack-core/localstack/cli/exceptions.py
@@ -0,0 +1,19 @@
+import typing as t
+from gettext import gettext
+
+import click
+from click import ClickException, echo
+from click._compat import get_text_stderr
+
+
+class CLIError(ClickException):
+ """A ClickException with a red error message"""
+
+ def format_message(self) -> str:
+ return click.style(f"β Error: {self.message}", fg="red")
+
+ def show(self, file: t.Optional[t.IO[t.Any]] = None) -> None:
+ if file is None:
+ file = get_text_stderr()
+
+ echo(gettext(self.format_message()), file=file)
diff --git a/localstack-core/localstack/cli/localstack.py b/localstack-core/localstack/cli/localstack.py
new file mode 100644
index 0000000000000..016834b3e21b3
--- /dev/null
+++ b/localstack-core/localstack/cli/localstack.py
@@ -0,0 +1,946 @@
+import json
+import logging
+import os
+import sys
+import traceback
+from typing import Dict, List, Optional, Tuple, TypedDict
+
+import click
+import requests
+
+from localstack import config
+from localstack.cli.exceptions import CLIError
+from localstack.constants import VERSION
+from localstack.utils.analytics.cli import publish_invocation
+from localstack.utils.bootstrap import get_container_default_logfile_location
+from localstack.utils.json import CustomEncoder
+
+from .console import BANNER, console
+from .plugin import LocalstackCli, load_cli_plugins
+
+
+class LocalStackCliGroup(click.Group):
+ """
+ A Click group used for the top-level ``localstack`` command group. It implements global exception handling
+ by:
+
+ - Ignoring click exceptions (already handled)
+ - Handling common exceptions (like DockerNotAvailable)
+ - Wrapping all unexpected exceptions in a ClickException (for a unified error message)
+
+ It also implements a custom help formatter to build more fine-grained groups.
+ """
+
+ # FIXME: find a way to communicate this from the actual command
+ advanced_commands = [
+ "aws",
+ "dns",
+ "extensions",
+ "license",
+ "login",
+ "logout",
+ "pod",
+ "state",
+ "ephemeral",
+ "replicator",
+ ]
+
+ def invoke(self, ctx: click.Context):
+ try:
+ return super(LocalStackCliGroup, self).invoke(ctx)
+ except click.exceptions.Exit:
+ # raise Exit exceptions unmodified (e.g., raised on --help)
+ raise
+ except click.ClickException:
+ # don't handle ClickExceptions, just reraise
+ if ctx and ctx.params.get("debug"):
+ click.echo(traceback.format_exc())
+ raise
+ except Exception as e:
+ if ctx and ctx.params.get("debug"):
+ click.echo(traceback.format_exc())
+ from localstack.utils.container_utils.container_client import (
+ ContainerException,
+ DockerNotAvailable,
+ )
+
+ if isinstance(e, DockerNotAvailable):
+ raise CLIError(
+ "Docker could not be found on the system.\n"
+ "Please make sure that you have a working docker environment on your machine."
+ )
+ elif isinstance(e, ContainerException):
+ raise CLIError(e.message)
+ else:
+ # If we have a generic exception, we wrap it in a ClickException
+ raise CLIError(str(e)) from e
+
+ def format_commands(self, ctx: click.Context, formatter: click.HelpFormatter) -> None:
+ """Extra format methods for multi methods that adds all the commands after the options. It also
+ groups commands into command categories."""
+ categories = {"Commands": [], "Advanced": [], "Deprecated": []}
+
+ commands = []
+ for subcommand in self.list_commands(ctx):
+ cmd = self.get_command(ctx, subcommand)
+ # What is this, the tool lied about a command. Ignore it
+ if cmd is None:
+ continue
+ if cmd.hidden:
+ continue
+
+ commands.append((subcommand, cmd))
+
+ # allow for 3 times the default spacing
+ if len(commands):
+ limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
+
+ for subcommand, cmd in commands:
+ help = cmd.get_short_help_str(limit)
+ categories[self._get_category(cmd)].append((subcommand, help))
+
+ for category, rows in categories.items():
+ if rows:
+ with formatter.section(category):
+ formatter.write_dl(rows)
+
+ def _get_category(self, cmd) -> str:
+ if cmd.deprecated:
+ return "Deprecated"
+
+ if cmd.name in self.advanced_commands:
+ return "Advanced"
+
+ return "Commands"
+
+
+def create_with_plugins() -> LocalstackCli:
+ """
+ Creates a LocalstackCli instance with all cli plugins loaded.
+ :return: a LocalstackCli instance
+ """
+ cli = LocalstackCli()
+ cli.group = localstack
+ load_cli_plugins(cli)
+ return cli
+
+
+def _setup_cli_debug() -> None:
+ from localstack.logging.setup import setup_logging_for_cli
+
+ config.DEBUG = True
+ os.environ["DEBUG"] = "1"
+
+ setup_logging_for_cli(logging.DEBUG if config.DEBUG else logging.INFO)
+
+
+# Re-usable format option decorator which can be used across multiple commands
+_click_format_option = click.option(
+ "-f",
+ "--format",
+ "format_",
+ type=click.Choice(["table", "plain", "dict", "json"]),
+ default="table",
+ help="The formatting style for the command output.",
+)
+
+
+@click.group(
+ name="localstack",
+ help="The LocalStack Command Line Interface (CLI)",
+ cls=LocalStackCliGroup,
+ context_settings={
+ # add "-h" as a synonym for "--help"
+ # https://click.palletsprojects.com/en/8.1.x/documentation/#help-parameter-customization
+ "help_option_names": ["-h", "--help"],
+ # show default values for options by default - https://github.com/pallets/click/pull/1225
+ "show_default": True,
+ },
+)
+@click.version_option(
+ VERSION,
+ "--version",
+ "-v",
+ message="LocalStack CLI %(version)s",
+ help="Show the version of the LocalStack CLI and exit",
+)
+@click.option("-d", "--debug", is_flag=True, help="Enable CLI debugging mode")
+@click.option("-p", "--profile", type=str, help="Set the configuration profile")
+def localstack(debug, profile) -> None:
+ # --profile is read manually in localstack.cli.main because it needs to be read before localstack.config is read
+
+ if debug:
+ _setup_cli_debug()
+
+ from localstack.utils.files import cache_dir
+
+ # overwrite the config variable here to defer import of cache_dir
+ if not os.environ.get("LOCALSTACK_VOLUME_DIR", "").strip():
+ config.VOLUME_DIR = str(cache_dir() / "volume")
+
+ # FIXME: at some point we should remove the use of `config.dirs` for the CLI,
+ # see https://github.com/localstack/localstack/pull/7906
+ config.dirs.for_cli().mkdirs()
+
+
+@localstack.group(
+ name="config",
+ short_help="Manage your LocalStack config",
+)
+def localstack_config() -> None:
+ """
+ Inspect and validate your LocalStack configuration.
+ """
+ pass
+
+
+@localstack_config.command(name="show", short_help="Show your config")
+@_click_format_option
+@publish_invocation
+def cmd_config_show(format_: str) -> None:
+ """
+ Print the current LocalStack config values.
+
+ This command prints the LocalStack configuration values from your environment.
+ It analyzes the environment variables as well as the LocalStack CLI profile.
+ It does _not_ analyze a specific file (like a docker-compose-yml).
+ """
+ # TODO: parse values from potential docker-compose file?
+ assert config
+
+ try:
+ # only load the ext config if it's available
+ from localstack.pro.core import config as ext_config
+
+ assert ext_config
+ except ImportError:
+ # the ext package is not available
+ return None
+
+ if format_ == "table":
+ _print_config_table()
+ elif format_ == "plain":
+ _print_config_pairs()
+ elif format_ == "dict":
+ _print_config_dict()
+ elif format_ == "json":
+ _print_config_json()
+ else:
+ _print_config_pairs() # fall back to plain
+
+
+@localstack_config.command(name="validate", short_help="Validate your config")
+@click.option(
+ "-f",
+ "--file",
+ help="Path to compose file",
+ default="docker-compose.yml",
+ type=click.Path(exists=True, file_okay=True, readable=True),
+)
+@publish_invocation
+def cmd_config_validate(file: str) -> None:
+ """
+ Validate your LocalStack configuration (docker compose).
+
+ This command inspects the given docker-compose file (by default docker-compose.yml in the current working
+ directory) and validates if the configuration is valid.
+
+ \b
+ It will show an error and return a non-zero exit code if:
+ - The docker-compose file is syntactically incorrect.
+ - If the file contains common issues when configuring LocalStack.
+ """
+
+ from localstack.utils import bootstrap
+
+ if bootstrap.validate_localstack_config(file):
+ console.print("[green]:heavy_check_mark:[/green] config valid")
+ sys.exit(0)
+ else:
+ console.print("[red]:heavy_multiplication_x:[/red] validation error")
+ sys.exit(1)
+
+
+def _print_config_json() -> None:
+ import json
+
+ console.print(json.dumps(dict(config.collect_config_items()), cls=CustomEncoder))
+
+
+def _print_config_pairs() -> None:
+ for key, value in config.collect_config_items():
+ console.print(f"{key}={value}")
+
+
+def _print_config_dict() -> None:
+ console.print(dict(config.collect_config_items()))
+
+
+def _print_config_table() -> None:
+ from rich.table import Table
+
+ grid = Table(show_header=True)
+ grid.add_column("Key")
+ grid.add_column("Value")
+
+ for key, value in config.collect_config_items():
+ grid.add_row(key, str(value))
+
+ console.print(grid)
+
+
+@localstack.group(
+ name="status",
+ short_help="Query status info",
+ invoke_without_command=True,
+)
+@click.pass_context
+def localstack_status(ctx: click.Context) -> None:
+ """
+ Query status information about the currently running LocalStack instance.
+ """
+ if ctx.invoked_subcommand is None:
+ ctx.invoke(localstack_status.get_command(ctx, "docker"))
+
+
+@localstack_status.command(name="docker", short_help="Query LocalStack Docker status")
+@_click_format_option
+def cmd_status_docker(format_: str) -> None:
+ """
+ Query information about the currently running LocalStack Docker image, its container,
+ and the LocalStack runtime.
+ """
+ with console.status("Querying Docker status"):
+ _print_docker_status(format_)
+
+
+class DockerStatus(TypedDict, total=False):
+ running: bool
+ runtime_version: str
+ image_tag: str
+ image_id: str
+ image_created: str
+ container_name: Optional[str]
+ container_ip: Optional[str]
+
+
+def _print_docker_status(format_: str) -> None:
+ from localstack.utils import docker_utils
+ from localstack.utils.bootstrap import get_docker_image_details, get_server_version
+ from localstack.utils.container_networking import get_main_container_ip, get_main_container_name
+
+ img = get_docker_image_details()
+ cont_name = config.MAIN_CONTAINER_NAME
+ running = docker_utils.DOCKER_CLIENT.is_container_running(cont_name)
+ status = DockerStatus(
+ runtime_version=get_server_version(),
+ image_tag=img["tag"],
+ image_id=img["id"],
+ image_created=img["created"],
+ running=running,
+ )
+ if running:
+ status["container_name"] = get_main_container_name()
+ status["container_ip"] = get_main_container_ip()
+
+ if format_ == "dict":
+ console.print(status)
+ if format_ == "table":
+ _print_docker_status_table(status)
+ if format_ == "json":
+ console.print(json.dumps(status))
+ if format_ == "plain":
+ for key, value in status.items():
+ console.print(f"{key}={value}")
+
+
+def _print_docker_status_table(status: DockerStatus) -> None:
+ from rich.table import Table
+
+ grid = Table(show_header=False)
+ grid.add_column()
+ grid.add_column()
+
+ grid.add_row("Runtime version", f"[bold]{status['runtime_version']}[/bold]")
+ grid.add_row(
+ "Docker image",
+ f"tag: {status['image_tag']}, "
+ f"id: {status['image_id']}, "
+ f":calendar: {status['image_created']}",
+ )
+ cont_status = "[bold][red]:heavy_multiplication_x: stopped"
+ if status["running"]:
+ cont_status = (
+ f"[bold][green]:heavy_check_mark: running[/green][/bold] "
+ f'(name: "[italic]{status["container_name"]}[/italic]", IP: {status["container_ip"]})'
+ )
+ grid.add_row("Runtime status", cont_status)
+ console.print(grid)
+
+
+@localstack_status.command(name="services", short_help="Query LocalStack services status")
+@_click_format_option
+def cmd_status_services(format_: str) -> None:
+ """
+ Query information about the services of the currently running LocalStack instance.
+ """
+ url = config.external_service_url()
+
+ try:
+ health = requests.get(f"{url}/_localstack/health", timeout=2)
+ doc = health.json()
+ services = doc.get("services", [])
+ if format_ == "table":
+ _print_service_table(services)
+ if format_ == "plain":
+ for service, status in services.items():
+ console.print(f"{service}={status}")
+ if format_ == "dict":
+ console.print(services)
+ if format_ == "json":
+ console.print(json.dumps(services))
+ except requests.ConnectionError:
+ if config.DEBUG:
+ console.print_exception()
+ raise CLIError(f"could not connect to LocalStack health endpoint at {url}")
+
+
+def _print_service_table(services: Dict[str, str]) -> None:
+ from rich.table import Table
+
+ status_display = {
+ "running": "[green]:heavy_check_mark:[/green] running",
+ "starting": ":hourglass_flowing_sand: starting",
+ "available": "[grey]:heavy_check_mark:[/grey] available",
+ "error": "[red]:heavy_multiplication_x:[/red] error",
+ }
+
+ table = Table()
+ table.add_column("Service")
+ table.add_column("Status")
+
+ services = list(services.items())
+ services.sort(key=lambda item: item[0])
+
+ for service, status in services:
+ if status in status_display:
+ status = status_display[status]
+
+ table.add_row(service, status)
+
+ console.print(table)
+
+
+@localstack.command(name="start", short_help="Start LocalStack")
+@click.option("--docker", is_flag=True, help="Start LocalStack in a docker container [default]")
+@click.option("--host", is_flag=True, help="Start LocalStack directly on the host")
+@click.option("--no-banner", is_flag=True, help="Disable LocalStack banner", default=False)
+@click.option(
+ "-d", "--detached", is_flag=True, help="Start LocalStack in the background", default=False
+)
+@click.option(
+ "--network",
+ type=str,
+ help="The container network the LocalStack container should be started in. By default, the default docker bridge network is used.",
+ required=False,
+)
+@click.option(
+ "--env",
+ "-e",
+ help="Additional environment variables that are passed to the LocalStack container",
+ multiple=True,
+ required=False,
+)
+@click.option(
+ "--publish",
+ "-p",
+ help="Additional port mappings that are passed to the LocalStack container",
+ multiple=True,
+ required=False,
+)
+@click.option(
+ "--volume",
+ "-v",
+ help="Additional volume mounts that are passed to the LocalStack container",
+ multiple=True,
+ required=False,
+)
+@click.option(
+ "--host-dns",
+ help="Expose the LocalStack DNS server to the host using port bindings.",
+ required=False,
+ is_flag=True,
+ default=False,
+)
+@click.option(
+ "--stack",
+ "-s",
+ type=str,
+ help="Use a specific stack with optional version. Examples: [localstack:4.5, snowflake]",
+ required=False,
+)
+@publish_invocation
+def cmd_start(
+ docker: bool,
+ host: bool,
+ no_banner: bool,
+ detached: bool,
+ network: str = None,
+ env: Tuple = (),
+ publish: Tuple = (),
+ volume: Tuple = (),
+ host_dns: bool = False,
+ stack: str = None,
+) -> None:
+ """
+ Start the LocalStack runtime.
+
+ This command starts the LocalStack runtime with your current configuration.
+ By default, it will start a new Docker container from the latest LocalStack(-Pro) Docker image
+ with best-practice volume mounts and port mappings.
+ """
+ if docker and host:
+ raise CLIError("Please specify either --docker or --host")
+ if host and detached:
+ raise CLIError("Cannot start detached in host mode")
+
+ if stack:
+ # Validate allowed stacks
+ stack_name = stack.split(":")[0]
+ allowed_stacks = ("localstack", "localstack-pro", "snowflake")
+ if stack_name.lower() not in allowed_stacks:
+ raise CLIError(f"Invalid stack '{stack_name}'. Allowed stacks: {allowed_stacks}.")
+
+ # Set IMAGE_NAME, defaulting to :latest if no version specified
+ if ":" not in stack:
+ stack = f"{stack}:latest"
+ os.environ["IMAGE_NAME"] = f"localstack/{stack}"
+
+ if not no_banner:
+ print_banner()
+ print_version()
+ print_profile()
+ print_app()
+ console.line()
+
+ from localstack.utils import bootstrap
+
+ if not no_banner:
+ if host:
+ console.log("starting LocalStack in host mode :laptop_computer:")
+ else:
+ console.log("starting LocalStack in Docker mode :whale:")
+
+ if host:
+ # call hooks to prepare host
+ bootstrap.prepare_host(console)
+
+ # from here we abandon the regular CLI control path and start treating the process like a localstack
+ # runtime process
+ os.environ["LOCALSTACK_CLI"] = "0"
+ config.dirs = config.init_directories()
+
+ try:
+ bootstrap.start_infra_locally()
+ except ImportError:
+ if config.DEBUG:
+ console.print_exception()
+ raise CLIError(
+ "It appears you have a light install of localstack which only supports running in docker.\n"
+ "If you would like to use --host, please install localstack with Python using "
+ "`pip install localstack[runtime]` instead."
+ )
+ else:
+ # make sure to initialize the bootstrap environment and directories for the host (even if we're executing
+ # in Docker), to allow starting the container from within other containers (e.g., Github Codespaces).
+ config.OVERRIDE_IN_DOCKER = False
+ config.is_in_docker = False
+ config.dirs = config.init_directories()
+
+ # call hooks to prepare host (note that this call should stay below the config overrides above)
+ bootstrap.prepare_host(console)
+
+ # pass the parsed cli params to the start infra command
+ params = click.get_current_context().params
+
+ if network:
+ # reconciles the network config and makes sure that MAIN_DOCKER_NETWORK is set automatically if
+ # `--network` is set.
+ if config.MAIN_DOCKER_NETWORK:
+ if config.MAIN_DOCKER_NETWORK != network:
+ raise CLIError(
+ f"Values of MAIN_DOCKER_NETWORK={config.MAIN_DOCKER_NETWORK} and --network={network} "
+ f"do not match"
+ )
+ else:
+ config.MAIN_DOCKER_NETWORK = network
+ os.environ["MAIN_DOCKER_NETWORK"] = network
+
+ if detached:
+ bootstrap.start_infra_in_docker_detached(console, params)
+ else:
+ bootstrap.start_infra_in_docker(console, params)
+
+
+@localstack.command(name="stop", short_help="Stop LocalStack")
+@publish_invocation
+def cmd_stop() -> None:
+ """
+ Stops the current LocalStack runtime.
+
+ This command stops the currently running LocalStack docker container.
+ By default, this command looks for a container named `localstack-main` (which is the default
+ container name used by the `localstack start` command).
+ If your LocalStack container has a different name, set the config variable
+ `MAIN_CONTAINER_NAME`.
+ """
+ from localstack.utils.docker_utils import DOCKER_CLIENT
+
+ from ..utils.container_utils.container_client import NoSuchContainer
+
+ container_name = config.MAIN_CONTAINER_NAME
+
+ try:
+ DOCKER_CLIENT.stop_container(container_name)
+ console.print("container stopped: %s" % container_name)
+ except NoSuchContainer:
+ raise CLIError(
+ f'Expected a running LocalStack container named "{container_name}", but found none'
+ )
+
+
+@localstack.command(name="restart", short_help="Restart LocalStack")
+@publish_invocation
+def cmd_restart() -> None:
+ """
+ Restarts the current LocalStack runtime.
+ """
+ url = config.external_service_url()
+
+ try:
+ response = requests.post(
+ f"{url}/_localstack/health",
+ json={"action": "restart"},
+ )
+ response.raise_for_status()
+ console.print("LocalStack restarted within the container.")
+ except requests.ConnectionError:
+ if config.DEBUG:
+ console.print_exception()
+ raise CLIError("could not restart the LocalStack container")
+
+
+@localstack.command(
+ name="logs",
+ short_help="Show LocalStack logs",
+)
+@click.option(
+ "-f",
+ "--follow",
+ is_flag=True,
+ help="Block the terminal and follow the log output",
+ default=False,
+)
+@click.option(
+ "-n",
+ "--tail",
+ type=int,
+ help="Print only the last lines of the log output",
+ default=None,
+ metavar="N",
+)
+@publish_invocation
+def cmd_logs(follow: bool, tail: int) -> None:
+ """
+ Show the logs of the current LocalStack runtime.
+
+ This command shows the logs of the currently running LocalStack docker container.
+ By default, this command looks for a container named `localstack-main` (which is the default
+ container name used by the `localstack start` command).
+ If your LocalStack container has a different name, set the config variable
+ `MAIN_CONTAINER_NAME`.
+ """
+ from localstack.utils.docker_utils import DOCKER_CLIENT
+
+ container_name = config.MAIN_CONTAINER_NAME
+ logfile = get_container_default_logfile_location(container_name)
+
+ if not DOCKER_CLIENT.is_container_running(container_name):
+ console.print("localstack container not running")
+ if os.path.exists(logfile):
+ console.print("printing logs from previous run")
+ with open(logfile) as fd:
+ for line in fd:
+ click.echo(line, nl=False)
+ sys.exit(1)
+
+ if follow:
+ num_lines = 0
+ for line in DOCKER_CLIENT.stream_container_logs(container_name):
+ print(line.decode("utf-8").rstrip("\r\n"))
+ num_lines += 1
+ if tail is not None and num_lines >= tail:
+ break
+
+ else:
+ logs = DOCKER_CLIENT.get_container_logs(container_name)
+ if tail is not None:
+ logs = "\n".join(logs.split("\n")[-tail:])
+ print(logs)
+
+
+@localstack.command(name="wait", short_help="Wait for LocalStack")
+@click.option(
+ "-t",
+ "--timeout",
+ type=float,
+ help="Only wait for seconds before raising a timeout error",
+ default=None,
+ metavar="N",
+)
+@publish_invocation
+def cmd_wait(timeout: Optional[float] = None) -> None:
+ """
+ Wait for the LocalStack runtime to be up and running.
+
+ This commands waits for a started LocalStack runtime to be up and running, ready to serve
+ requests.
+ By default, this command looks for a container named `localstack-main` (which is the default
+ container name used by the `localstack start` command).
+ If your LocalStack container has a different name, set the config variable
+ `MAIN_CONTAINER_NAME`.
+ """
+ from localstack.utils.bootstrap import wait_container_is_ready
+
+ if not wait_container_is_ready(timeout=timeout):
+ raise CLIError("timeout")
+
+
+@localstack.command(name="ssh", short_help="Obtain a shell in LocalStack")
+@publish_invocation
+def cmd_ssh() -> None:
+ """
+ Obtain a shell in the current LocalStack runtime.
+
+ This command starts a new interactive shell in the currently running LocalStack container.
+ By default, this command looks for a container named `localstack-main` (which is the default
+ container name used by the `localstack start` command).
+ If your LocalStack container has a different name, set the config variable
+ `MAIN_CONTAINER_NAME`.
+ """
+ from localstack.utils.docker_utils import DOCKER_CLIENT
+
+ if not DOCKER_CLIENT.is_container_running(config.MAIN_CONTAINER_NAME):
+ raise CLIError(
+ f'Expected a running LocalStack container named "{config.MAIN_CONTAINER_NAME}", but found none'
+ )
+ os.execlp("docker", "docker", "exec", "-it", config.MAIN_CONTAINER_NAME, "bash")
+
+
+@localstack.group(name="update", short_help="Update LocalStack")
+def localstack_update() -> None:
+ """
+ Update different LocalStack components.
+ """
+ pass
+
+
+@localstack_update.command(name="all", short_help="Update all LocalStack components")
+@click.pass_context
+@publish_invocation
+def cmd_update_all(ctx: click.Context) -> None:
+ """
+ Update all LocalStack components.
+
+ This is the same as executing `localstack update localstack-cli` and
+ `localstack update docker-images`.
+ Updating the LocalStack CLI is currently only supported if the CLI
+ is installed and run via Python / PIP. If you used a different installation method,
+ please follow the instructions on https://docs.localstack.cloud/.
+ """
+ ctx.invoke(localstack_update.get_command(ctx, "localstack-cli"))
+ ctx.invoke(localstack_update.get_command(ctx, "docker-images"))
+
+
+@localstack_update.command(name="localstack-cli", short_help="Update LocalStack CLI")
+@publish_invocation
+def cmd_update_localstack_cli() -> None:
+ """
+ Update the LocalStack CLI.
+
+ This command updates the LocalStack CLI. This is currently only supported if the CLI
+ is installed and run via Python / PIP. If you used a different installation method,
+ please follow the instructions on https://docs.localstack.cloud/.
+ """
+ if is_frozen_bundle():
+ # "update" can only be performed if running from source / in a non-frozen interpreter
+ raise CLIError(
+ "The LocalStack CLI can only update itself if installed via PIP. "
+ "Please follow the instructions on https://docs.localstack.cloud/ to update your CLI."
+ )
+
+ import subprocess
+ from subprocess import CalledProcessError
+
+ console.rule("Updating LocalStack CLI")
+ with console.status("Updating LocalStack CLI..."):
+ try:
+ subprocess.check_output(
+ [sys.executable, "-m", "pip", "install", "--upgrade", "localstack"]
+ )
+ console.print(":heavy_check_mark: LocalStack CLI updated")
+ except CalledProcessError:
+ console.print(":heavy_multiplication_x: LocalStack CLI update failed", style="bold red")
+
+
+@localstack_update.command(
+ name="docker-images", short_help="Update docker images LocalStack depends on"
+)
+@publish_invocation
+def cmd_update_docker_images() -> None:
+ """
+ Update all Docker images LocalStack depends on.
+
+ This command updates all Docker LocalStack docker images, as well as other Docker images
+ LocalStack depends on (and which have been used before / are present on the machine).
+ """
+ from localstack.utils.docker_utils import DOCKER_CLIENT
+
+ console.rule("Updating docker images")
+
+ all_images = DOCKER_CLIENT.get_docker_image_names(strip_latest=False)
+ image_prefixes = [
+ "localstack/",
+ "public.ecr.aws/lambda",
+ ]
+ localstack_images = [
+ image
+ for image in all_images
+ if any(
+ image.startswith(image_prefix) or image.startswith(f"docker.io/{image_prefix}")
+ for image_prefix in image_prefixes
+ )
+ and not image.endswith(":") # ignore dangling images
+ ]
+ update_images(localstack_images)
+
+
+def update_images(image_list: List[str]) -> None:
+ from rich.markup import escape
+ from rich.progress import MofNCompleteColumn, Progress
+
+ from localstack.utils.container_utils.container_client import ContainerException
+ from localstack.utils.docker_utils import DOCKER_CLIENT
+
+ updated_count = 0
+ failed_count = 0
+ progress = Progress(
+ *Progress.get_default_columns(), MofNCompleteColumn(), transient=True, console=console
+ )
+ with progress:
+ for image in progress.track(image_list, description="Processing image..."):
+ try:
+ updated = False
+ hash_before_pull = DOCKER_CLIENT.inspect_image(image_name=image, pull=False)["Id"]
+ DOCKER_CLIENT.pull_image(image)
+ if (
+ hash_before_pull
+ != DOCKER_CLIENT.inspect_image(image_name=image, pull=False)["Id"]
+ ):
+ updated = True
+ updated_count += 1
+ console.print(
+ f":heavy_check_mark: Image {escape(image)} {'updated' if updated else 'up-to-date'}.",
+ style="bold" if updated else None,
+ highlight=False,
+ )
+ except ContainerException as e:
+ console.print(
+ f":heavy_multiplication_x: Image {escape(image)} pull failed: {e.message}",
+ style="bold red",
+ highlight=False,
+ )
+ failed_count += 1
+ console.rule()
+ console.print(
+ f"Images updated: {updated_count}, Images failed: {failed_count}, total images processed: {len(image_list)}."
+ )
+
+
+@localstack.command(name="completion", short_help="CLI shell completion")
+@click.pass_context
+@click.argument(
+ "shell", required=True, type=click.Choice(["bash", "zsh", "fish"], case_sensitive=False)
+)
+@publish_invocation
+def localstack_completion(ctx: click.Context, shell: str) -> None:
+ """
+ Print shell completion code for the specified shell (bash, zsh, or fish).
+ The shell code must be evaluated to enable the interactive shell completion of LocalStack CLI commands.
+ This is usually done by sourcing it from the .bash_profile.
+
+ \b
+ Examples:
+ # Bash
+ ## Bash completion on Linux depends on the 'bash-completion' package.
+ ## Write the LocalStack CLI completion code for bash to a file and source it from .bash_profile
+ localstack completion bash > ~/.localstack/completion.bash.inc
+ printf "
+ # LocalStack CLI bash completion
+ source '$HOME/.localstack/completion.bash.inc'
+ " >> $HOME/.bash_profile
+ source $HOME/.bash_profile
+ \b
+ # zsh
+ ## Set the LocalStack completion code for zsh to autoload on startup:
+ localstack completion zsh > "${fpath[1]}/_localstack"
+ \b
+ # fish
+ ## Set the LocalStack completion code for fish to autoload on startup:
+ localstack completion fish > ~/.config/fish/completions/localstack.fish
+ """
+
+ # lookup the completion, raise an error if the given completion is not found
+ import click.shell_completion
+
+ comp_cls = click.shell_completion.get_completion_class(shell)
+ if comp_cls is None:
+ raise CLIError("Completion for given shell could not be found.")
+
+ # Click's program name is the base path of sys.argv[0]
+ path = sys.argv[0]
+ prog_name = os.path.basename(path)
+
+ # create the completion variable according to the docs
+ # https://click.palletsprojects.com/en/8.1.x/shell-completion/#enabling-completion
+ complete_var = f"_{prog_name}_COMPLETE".replace("-", "_").upper()
+
+ # instantiate the completion class and print the completion source
+ comp = comp_cls(ctx.command, {}, prog_name, complete_var)
+ click.echo(comp.source())
+
+
+def print_version() -> None:
+ console.print(f"- [bold]LocalStack CLI:[/bold] [blue]{VERSION}[/blue]")
+
+
+def print_profile() -> None:
+ if config.LOADED_PROFILES:
+ console.print(f"- [bold]Profile:[/bold] [blue]{', '.join(config.LOADED_PROFILES)}[/blue]")
+
+
+def print_app() -> None:
+ console.print("- [bold]App:[/bold] https://app.localstack.cloud")
+
+
+def print_banner() -> None:
+ print(BANNER)
+
+
+def is_frozen_bundle() -> bool:
+ """
+ :return: true if we are currently running in a frozen bundle / a pyinstaller binary.
+ """
+ # check if we are in a PyInstaller binary
+ # https://pyinstaller.org/en/stable/runtime-information.html
+ return getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS")
diff --git a/localstack-core/localstack/cli/lpm.py b/localstack-core/localstack/cli/lpm.py
new file mode 100644
index 0000000000000..ad4a6f5489d5c
--- /dev/null
+++ b/localstack-core/localstack/cli/lpm.py
@@ -0,0 +1,139 @@
+import itertools
+import logging
+from multiprocessing.pool import ThreadPool
+from typing import List, Optional
+
+import click
+from rich.console import Console
+
+from localstack import config
+from localstack.cli.exceptions import CLIError
+from localstack.packages import InstallTarget, Package
+from localstack.packages.api import NoSuchPackageException, PackagesPluginManager
+from localstack.utils.bootstrap import setup_logging
+
+LOG = logging.getLogger(__name__)
+
+console = Console()
+
+
+@click.group()
+def cli():
+ """
+ The LocalStack Package Manager (lpm) CLI is a set of commands to install third-party packages used by localstack
+ service providers.
+
+ Here are some handy commands:
+
+ List all packages
+
+ python -m localstack.cli.lpm list
+
+ Install DynamoDB Local:
+
+ python -m localstack.cli.install dynamodb-local
+
+ Install all community packages, four in parallel:
+
+ python -m localstack.cli.lpm list | grep "/community" | cut -d'/' -f1 | xargs python -m localstack.cli.lpm install --parallel 4
+ """
+ setup_logging()
+
+
+def _do_install_package(package: Package, version: str = None, target: InstallTarget = None):
+ console.print(f"installing... [bold]{package}[/bold]")
+ try:
+ package.install(version=version, target=target)
+ console.print(f"[green]installed[/green] [bold]{package}[/bold]")
+ except Exception as e:
+ console.print(f"[red]error[/red] installing {package}: {e}")
+ raise e
+
+
+@cli.command()
+@click.argument("package", nargs=-1, required=True)
+@click.option(
+ "--parallel",
+ type=int,
+ default=1,
+ required=False,
+ help="how many installers to run in parallel processes",
+)
+@click.option(
+ "--version",
+ type=str,
+ default=None,
+ required=False,
+ help="version to install of a package",
+)
+@click.option(
+ "--target",
+ type=click.Choice([target.name.lower() for target in InstallTarget]),
+ default=None,
+ required=False,
+ help="target of the installation",
+)
+def install(
+ package: List[str],
+ parallel: Optional[int] = 1,
+ version: Optional[str] = None,
+ target: Optional[str] = None,
+):
+ """Install one or more packages."""
+ try:
+ if target:
+ target = InstallTarget[str.upper(target)]
+ else:
+ # LPM is meant to be used at build-time, the default target is static_libs
+ target = InstallTarget.STATIC_LIBS
+
+ # collect installers and install in parallel:
+ console.print(f"resolving packages: {package}")
+ package_manager = PackagesPluginManager()
+ package_manager.load_all()
+ package_instances = package_manager.get_packages(package, version)
+
+ if parallel > 1:
+ console.print(f"install {parallel} packages in parallel:")
+
+ config.dirs.mkdirs()
+
+ with ThreadPool(processes=parallel) as pool:
+ pool.starmap(
+ _do_install_package,
+ zip(package_instances, itertools.repeat(version), itertools.repeat(target)),
+ )
+ except NoSuchPackageException as e:
+ LOG.debug(str(e), exc_info=e)
+ raise CLIError(str(e))
+ except Exception as e:
+ LOG.debug("one or more package installations failed.", exc_info=e)
+ raise CLIError("one or more package installations failed.")
+
+
+@cli.command(name="list")
+@click.option(
+ "-v",
+ "--verbose",
+ is_flag=True,
+ default=False,
+ required=False,
+ help="Verbose output (show additional info on packages)",
+)
+def list_packages(verbose: bool):
+ """List available packages of all repositories"""
+ package_manager = PackagesPluginManager()
+ package_manager.load_all()
+ packages = package_manager.get_all_packages()
+ for package_name, package_scope, package_instance in packages:
+ console.print(f"[green]{package_name}[/green]/{package_scope}")
+ if verbose:
+ for version in package_instance.get_versions():
+ if version == package_instance.default_version:
+ console.print(f" - [bold]{version} (default)[/bold]", highlight=False)
+ else:
+ console.print(f" - {version}", highlight=False)
+
+
+if __name__ == "__main__":
+ cli()
diff --git a/localstack-core/localstack/cli/main.py b/localstack-core/localstack/cli/main.py
new file mode 100644
index 0000000000000..de1f04e38cac5
--- /dev/null
+++ b/localstack-core/localstack/cli/main.py
@@ -0,0 +1,22 @@
+import os
+
+
+def main():
+ # indicate to the environment we are starting from the CLI
+ os.environ["LOCALSTACK_CLI"] = "1"
+
+ # config profiles are the first thing that need to be loaded (especially before localstack.config!)
+ from .profiles import set_and_remove_profile_from_sys_argv
+
+ # WARNING: This function modifies sys.argv to remove the profile argument.
+ set_and_remove_profile_from_sys_argv()
+
+ # initialize CLI plugins
+ from .localstack import create_with_plugins
+
+ cli = create_with_plugins()
+ cli()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/localstack-core/localstack/cli/plugin.py b/localstack-core/localstack/cli/plugin.py
new file mode 100644
index 0000000000000..f9af88474a6d5
--- /dev/null
+++ b/localstack-core/localstack/cli/plugin.py
@@ -0,0 +1,39 @@
+import abc
+import logging
+import os
+
+import click
+from plux import Plugin, PluginManager
+
+LOG = logging.getLogger(__name__)
+
+
+class LocalstackCli:
+ group: click.Group
+
+ def __call__(self, *args, **kwargs):
+ self.group(*args, **kwargs)
+
+
+class LocalstackCliPlugin(Plugin):
+ namespace = "localstack.plugins.cli"
+
+ def load(self, cli) -> None:
+ self.attach(cli)
+
+ @abc.abstractmethod
+ def attach(self, cli: LocalstackCli) -> None:
+ """
+ Attach commands to the `localstack` CLI.
+
+ :param cli: the cli object
+ """
+
+
+def load_cli_plugins(cli):
+ if os.environ.get("DEBUG_PLUGINS", "0").lower() in ("true", "1"):
+ # importing localstack.config is still quite expensive...
+ logging.basicConfig(level=logging.DEBUG)
+
+ loader = PluginManager("localstack.plugins.cli", load_args=(cli,))
+ loader.load_all()
diff --git a/localstack-core/localstack/cli/plugins.py b/localstack-core/localstack/cli/plugins.py
new file mode 100644
index 0000000000000..c63588161d304
--- /dev/null
+++ b/localstack-core/localstack/cli/plugins.py
@@ -0,0 +1,134 @@
+import os
+import time
+
+import click
+from plux import PluginManager
+from plux.build.setuptools import find_plugins
+from plux.core.entrypoint import spec_to_entry_point
+from rich import print as rprint
+from rich.console import Console
+from rich.table import Table
+from rich.tree import Tree
+
+from localstack.cli.exceptions import CLIError
+
+console = Console()
+
+
+@click.group()
+def cli():
+ """
+ The plugins CLI is a set of commands to help troubleshoot LocalStack's plugin mechanism.
+ """
+ pass
+
+
+@cli.command()
+@click.option("--where", type=str, default=os.path.abspath(os.curdir))
+@click.option("--exclude", multiple=True, default=())
+@click.option("--include", multiple=True, default=("*",))
+@click.option("--output", type=str, default="tree")
+def find(where, exclude, include, output):
+ """
+ Find plugins by scanning the given path for PluginSpecs.
+ It starts from the current directory if --where is not specified.
+ This is what a setup.py method would run as a build step, i.e., discovering entry points.
+ """
+ with console.status(f"Scanning path {where}"):
+ plugins = find_plugins(where, exclude, include)
+
+ if output == "tree":
+ tree = Tree("Entrypoints")
+ for namespace, entry_points in plugins.items():
+ node = tree.add(f"[bold]{namespace}")
+
+ t = Table()
+ t.add_column("Name")
+ t.add_column("Location")
+
+ for ep in entry_points:
+ key, value = ep.split("=")
+ t.add_row(key, value)
+
+ node.add(t)
+
+ rprint(tree)
+ elif output == "dict":
+ rprint(dict(plugins))
+ else:
+ raise CLIError("unknown output format %s" % output)
+
+
+@cli.command("list")
+@click.option("--namespace", type=str, required=True)
+def cmd_list(namespace):
+ """
+ List all available plugins using a PluginManager from available endpoints.
+ """
+ manager = PluginManager(namespace)
+
+ t = Table()
+ t.add_column("Name")
+ t.add_column("Factory")
+
+ for spec in manager.list_plugin_specs():
+ ep = spec_to_entry_point(spec)
+ t.add_row(spec.name, ep.value)
+
+ rprint(t)
+
+
+@cli.command()
+@click.option("--namespace", type=str, required=True)
+@click.option("--name", type=str, required=True)
+def load(namespace, name):
+ """
+ Attempts to load a plugin using a PluginManager.
+ """
+ manager = PluginManager(namespace)
+
+ with console.status(f"Loading {namespace}:{name}"):
+ then = time.time()
+ plugin = manager.load(name)
+ took = time.time() - then
+
+ rprint(
+ f":tada: successfully loaded [bold][green]{namespace}[/green][/bold]:[bold][cyan]{name}[/cyan][/bold] ({type(plugin)}"
+ )
+ rprint(f":stopwatch: loading took {took:.4f} s")
+
+
+@cli.command()
+@click.option("--namespace", type=str)
+def cache(namespace):
+ """
+ Outputs the stevedore entrypoints cache from which plugins are loaded.
+ """
+ from stevedore._cache import _c
+
+ data = _c._get_data_for_path(None)
+
+ tree = Tree("Entrypoints")
+ for group, entry_points in data.get("groups").items():
+ if namespace and group != namespace:
+ continue
+ node = tree.add(f"[bold]{group}")
+
+ t = Table()
+ t.add_column("Name")
+ t.add_column("Value")
+
+ for key, value, _ in entry_points:
+ t.add_row(key, value)
+
+ node.add(t)
+
+ if namespace:
+ rprint(t)
+ return
+
+ rprint(tree)
+
+
+if __name__ == "__main__":
+ cli()
diff --git a/localstack-core/localstack/cli/profiles.py b/localstack-core/localstack/cli/profiles.py
new file mode 100644
index 0000000000000..5af5e089658a4
--- /dev/null
+++ b/localstack-core/localstack/cli/profiles.py
@@ -0,0 +1,66 @@
+import argparse
+import os
+import sys
+from typing import Optional
+
+# important: this needs to be free of localstack imports
+
+
+def set_and_remove_profile_from_sys_argv():
+ """
+ Performs the following steps:
+
+ 1. Use argparse to parse the command line arguments for the --profile flag.
+ All occurrences are removed from the sys.argv list, and the value from
+ the last occurrence is used. This allows the user to specify a profile
+ at any point on the command line.
+
+ 2. If a --profile flag is not found, check for the -p flag. The first
+ occurrence of the -p flag is used and it is not removed from sys.argv.
+ The reasoning for this is that at least one of the CLI subcommands has
+ a -p flag, and we want to keep it in sys.argv for that command to
+ pick up. An existing bug means that if a -p flag is used with a
+ subcommand, it could erroneously be used as the profile value as well.
+ This behaviour is undesired, but we must maintain back-compatibility of
+ allowing the profile to be specified using -p.
+
+ 3. If a profile is found, the 'CONFIG_PROFILE' os variable is set
+ accordingly. This is later picked up by ``localstack.config``.
+
+ WARNING: Any --profile options are REMOVED from sys.argv, so that they are
+ not passed to the localstack CLI. This allows the profile option
+ to be set at any point on the command line.
+ """
+ parser = argparse.ArgumentParser(add_help=False)
+ parser.add_argument("--profile")
+ namespace, sys.argv = parser.parse_known_args(sys.argv)
+ profile = namespace.profile
+
+ if not profile:
+ # if no profile is given, check for the -p argument
+ profile = parse_p_argument(sys.argv)
+
+ if profile:
+ os.environ["CONFIG_PROFILE"] = profile.strip()
+
+
+def parse_p_argument(args) -> Optional[str]:
+ """
+ Lightweight arg parsing to find the first occurrence of ``-p ``, or ``-p=`` and return the value of
+ ```` from the given arguments.
+
+ :param args: list of CLI arguments
+ :returns: the value of ``-p``.
+ """
+ for i, current_arg in enumerate(args):
+ if current_arg.startswith("-p="):
+ # if using the "=" notation, we remove the "-p=" prefix to get the value
+ return current_arg[3:]
+ if current_arg == "-p":
+ # otherwise use the next arg in the args list as value
+ try:
+ return args[i + 1]
+ except IndexError:
+ return None
+
+ return None
diff --git a/localstack-core/localstack/config.py b/localstack-core/localstack/config.py
new file mode 100644
index 0000000000000..efbfbf83e6fd3
--- /dev/null
+++ b/localstack-core/localstack/config.py
@@ -0,0 +1,1655 @@
+import ipaddress
+import logging
+import os
+import platform
+import re
+import socket
+import subprocess
+import tempfile
+import time
+import warnings
+from collections import defaultdict
+from typing import Any, Dict, List, Mapping, Optional, Tuple, TypeVar, Union
+
+from localstack import constants
+from localstack.constants import (
+ DEFAULT_BUCKET_MARKER_LOCAL,
+ DEFAULT_DEVELOP_PORT,
+ DEFAULT_VOLUME_DIR,
+ ENV_INTERNAL_TEST_COLLECT_METRIC,
+ ENV_INTERNAL_TEST_RUN,
+ FALSE_STRINGS,
+ LOCALHOST,
+ LOCALHOST_IP,
+ LOCALSTACK_ROOT_FOLDER,
+ LOG_LEVELS,
+ TRACE_LOG_LEVELS,
+ TRUE_STRINGS,
+)
+
+T = TypeVar("T", str, int)
+
+# keep track of start time, for performance debugging
+load_start_time = time.time()
+
+
+class Directories:
+ """
+ Holds different directories available to localstack. Some directories are shared between the host and the
+ localstack container, some live only on the host and others in the container.
+
+ Attributes:
+ static_libs: container only; binaries and libraries statically packaged with the image
+ var_libs: shared; binaries and libraries+data computed at runtime: lazy-loaded binaries, ssl cert, ...
+ cache: shared; ephemeral data that has to persist across localstack runs and reboots
+ tmp: container only; ephemeral data that has to persist across localstack runs but not reboots
+ mounted_tmp: shared; same as above, but shared for persistence across different containers, tests, ...
+ functions: shared; volume to communicate between host<->lambda containers
+ data: shared; holds localstack state, pods, ...
+ config: host only; pre-defined configuration values, cached credentials, machine id, ...
+ init: shared; user-defined provisioning scripts executed in the container when it starts
+ logs: shared; log files produced by localstack
+ """
+
+ static_libs: str
+ var_libs: str
+ cache: str
+ tmp: str
+ mounted_tmp: str
+ functions: str
+ data: str
+ config: str
+ init: str
+ logs: str
+
+ def __init__(
+ self,
+ static_libs: str,
+ var_libs: str,
+ cache: str,
+ tmp: str,
+ mounted_tmp: str,
+ functions: str,
+ data: str,
+ config: str,
+ init: str,
+ logs: str,
+ ) -> None:
+ super().__init__()
+ self.static_libs = static_libs
+ self.var_libs = var_libs
+ self.cache = cache
+ self.tmp = tmp
+ self.mounted_tmp = mounted_tmp
+ self.functions = functions
+ self.data = data
+ self.config = config
+ self.init = init
+ self.logs = logs
+
+ @staticmethod
+ def defaults() -> "Directories":
+ """Returns Localstack directory paths based on the localstack filesystem hierarchy."""
+ return Directories(
+ static_libs="/usr/lib/localstack",
+ var_libs=f"{DEFAULT_VOLUME_DIR}/lib",
+ cache=f"{DEFAULT_VOLUME_DIR}/cache",
+ tmp=os.path.join(tempfile.gettempdir(), "localstack"),
+ mounted_tmp=f"{DEFAULT_VOLUME_DIR}/tmp",
+ functions=f"{DEFAULT_VOLUME_DIR}/tmp", # FIXME: remove - this was misconceived
+ data=f"{DEFAULT_VOLUME_DIR}/state",
+ logs=f"{DEFAULT_VOLUME_DIR}/logs",
+ config="/etc/localstack/conf.d", # for future use
+ init="/etc/localstack/init",
+ )
+
+ @staticmethod
+ def for_container() -> "Directories":
+ """
+ Returns Localstack directory paths as they are defined within the container. Everything shared and writable
+ lives in /var/lib/localstack or {tempfile.gettempdir()}/localstack.
+
+ :returns: Directories object
+ """
+ defaults = Directories.defaults()
+
+ return Directories(
+ static_libs=defaults.static_libs,
+ var_libs=defaults.var_libs,
+ cache=defaults.cache,
+ tmp=defaults.tmp,
+ mounted_tmp=defaults.mounted_tmp,
+ functions=defaults.functions,
+ data=defaults.data if PERSISTENCE else os.path.join(defaults.tmp, "state"),
+ config=defaults.config,
+ logs=defaults.logs,
+ init=defaults.init,
+ )
+
+ @staticmethod
+ def for_host() -> "Directories":
+ """Return directories used for running localstack in host mode. Note that these are *not* the directories
+ that are mounted into the container when the user starts localstack."""
+ root = os.environ.get("FILESYSTEM_ROOT") or os.path.join(
+ LOCALSTACK_ROOT_FOLDER, ".filesystem"
+ )
+ root = os.path.abspath(root)
+
+ defaults = Directories.for_container()
+
+ tmp = os.path.join(root, defaults.tmp.lstrip("/"))
+ data = os.path.join(root, defaults.data.lstrip("/"))
+
+ return Directories(
+ static_libs=os.path.join(root, defaults.static_libs.lstrip("/")),
+ var_libs=os.path.join(root, defaults.var_libs.lstrip("/")),
+ cache=os.path.join(root, defaults.cache.lstrip("/")),
+ tmp=tmp,
+ mounted_tmp=os.path.join(root, defaults.mounted_tmp.lstrip("/")),
+ functions=os.path.join(root, defaults.functions.lstrip("/")),
+ data=data if PERSISTENCE else os.path.join(tmp, "state"),
+ config=os.path.join(root, defaults.config.lstrip("/")),
+ init=os.path.join(root, defaults.init.lstrip("/")),
+ logs=os.path.join(root, defaults.logs.lstrip("/")),
+ )
+
+ @staticmethod
+ def for_cli() -> "Directories":
+ """Returns directories used for when running localstack CLI commands from the host system. Unlike
+ ``for_container``, these needs to be cross-platform. Ideally, this should not be needed at all,
+ because the localstack runtime and CLI do not share any control paths. There are a handful of
+ situations where directories or files may be created lazily for CLI commands. Some paths are
+ intentionally set to None to provoke errors if these paths are used from the CLI - which they
+ shouldn't. This is a symptom of not having a clear separation between CLI/runtime code, which will
+ be a future project."""
+ import tempfile
+
+ from localstack.utils import files
+
+ tmp_dir = os.path.join(tempfile.gettempdir(), "localstack-cli")
+ cache_dir = (files.get_user_cache_dir()).absolute() / "localstack-cli"
+
+ return Directories(
+ static_libs=None,
+ var_libs=None,
+ cache=str(cache_dir), # used by analytics metadata
+ tmp=tmp_dir,
+ mounted_tmp=tmp_dir,
+ functions=None,
+ data=os.path.join(tmp_dir, "state"), # used by localstack-pro config TODO: remove
+ logs=os.path.join(tmp_dir, "logs"), # used for container logs
+ config=None, # in the context of the CLI, config.CONFIG_DIR should be used
+ init=None,
+ )
+
+ def mkdirs(self):
+ for folder in [
+ self.static_libs,
+ self.var_libs,
+ self.cache,
+ self.tmp,
+ self.mounted_tmp,
+ self.functions,
+ self.data,
+ self.config,
+ self.init,
+ self.logs,
+ ]:
+ if folder and not os.path.exists(folder):
+ try:
+ os.makedirs(folder)
+ except Exception:
+ # this can happen due to a race condition when starting
+ # multiple processes in parallel. Should be safe to ignore
+ pass
+
+ def __str__(self):
+ return str(self.__dict__)
+
+
+def eval_log_type(env_var_name: str) -> Union[str, bool]:
+ """Get the log type from environment variable"""
+ ls_log = os.environ.get(env_var_name, "").lower().strip()
+ return ls_log if ls_log in LOG_LEVELS else False
+
+
+def parse_boolean_env(env_var_name: str) -> Optional[bool]:
+ """Parse the value of the given env variable and return True/False, or None if it is not a boolean value."""
+ value = os.environ.get(env_var_name, "").lower().strip()
+ if value in TRUE_STRINGS:
+ return True
+ if value in FALSE_STRINGS:
+ return False
+ return None
+
+
+def is_env_true(env_var_name: str) -> bool:
+ """Whether the given environment variable has a truthy value."""
+ return os.environ.get(env_var_name, "").lower().strip() in TRUE_STRINGS
+
+
+def is_env_not_false(env_var_name: str) -> bool:
+ """Whether the given environment variable is empty or has a truthy value."""
+ return os.environ.get(env_var_name, "").lower().strip() not in FALSE_STRINGS
+
+
+def load_environment(profiles: str = None, env=os.environ) -> List[str]:
+ """Loads the environment variables from ~/.localstack/{profile}.env, for each profile listed in the profiles.
+ :param env: environment to load profile to. Defaults to `os.environ`
+ :param profiles: a comma separated list of profiles to load (defaults to "default")
+ :returns str: the list of the actually loaded profiles (might be the fallback)
+ """
+ if not profiles:
+ profiles = "default"
+
+ profiles = profiles.split(",")
+ environment = {}
+ import dotenv
+
+ for profile in profiles:
+ profile = profile.strip()
+ path = os.path.join(CONFIG_DIR, f"{profile}.env")
+ if not os.path.exists(path):
+ continue
+ environment.update(dotenv.dotenv_values(path))
+
+ for k, v in environment.items():
+ # we do not want to override the environment
+ if k not in env and v is not None:
+ env[k] = v
+
+ return profiles
+
+
+def is_persistence_enabled() -> bool:
+ return PERSISTENCE and dirs.data
+
+
+def is_linux() -> bool:
+ return platform.system() == "Linux"
+
+
+def is_macos() -> bool:
+ return platform.system() == "Darwin"
+
+
+def is_windows() -> bool:
+ return platform.system().lower() == "windows"
+
+
+def is_wsl() -> bool:
+ return platform.system().lower() == "linux" and os.environ.get("WSL_DISTRO_NAME") is not None
+
+
+def ping(host):
+ """Returns True if the host responds to a ping request"""
+ is_in_windows = is_windows()
+ ping_opts = "-n 1 -w 2000" if is_in_windows else "-c 1 -W 2"
+ args = "ping %s %s" % (ping_opts, host)
+ return (
+ subprocess.call(
+ args, shell=not is_in_windows, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
+ == 0
+ )
+
+
+def in_docker():
+ """
+ Returns True if running in a docker container, else False
+ Ref. https://docs.docker.com/config/containers/runmetrics/#control-groups
+ """
+ if OVERRIDE_IN_DOCKER is not None:
+ return OVERRIDE_IN_DOCKER
+
+ # check some marker files that we create in our Dockerfiles
+ for path in [
+ "/usr/lib/localstack/.community-version",
+ "/usr/lib/localstack/.pro-version",
+ "/tmp/localstack/.marker",
+ ]:
+ if os.path.isfile(path):
+ return True
+
+ # details: https://github.com/localstack/localstack/pull/4352
+ if os.path.exists("/.dockerenv"):
+ return True
+ if os.path.exists("/run/.containerenv"):
+ return True
+
+ if not os.path.exists("/proc/1/cgroup"):
+ return False
+ try:
+ if any(
+ [
+ os.path.exists("/sys/fs/cgroup/memory/docker/"),
+ any(
+ "docker-" in file_names
+ for file_names in os.listdir("/sys/fs/cgroup/memory/system.slice")
+ ),
+ os.path.exists("/sys/fs/cgroup/docker/"),
+ any(
+ "docker-" in file_names
+ for file_names in os.listdir("/sys/fs/cgroup/system.slice/")
+ ),
+ ]
+ ):
+ return False
+ except Exception:
+ pass
+ with open("/proc/1/cgroup", "rt") as ifh:
+ content = ifh.read()
+ if "docker" in content or "buildkit" in content:
+ return True
+ os_hostname = socket.gethostname()
+ if os_hostname and os_hostname in content:
+ return True
+
+ # containerd does not set any specific file or config, but it does use
+ # io.containerd.snapshotter.v1.overlayfs as the overlay filesystem for `/`.
+ try:
+ with open("/proc/mounts", "rt") as infile:
+ for line in infile:
+ line = line.strip()
+
+ if not line:
+ continue
+
+ # skip comments
+ if line[0] == "#":
+ continue
+
+ # format (man 5 fstab)
+ # ...
+ parts = line.split()
+ if len(parts) < 4:
+ # badly formatted line
+ continue
+
+ mount_point = parts[1]
+ options = parts[3]
+
+ # only consider the root filesystem
+ if mount_point != "/":
+ continue
+
+ if "io.containerd" in options:
+ return True
+
+ except FileNotFoundError:
+ pass
+
+ return False
+
+
+# whether the `in_docker` check should always return True or False
+OVERRIDE_IN_DOCKER = parse_boolean_env("OVERRIDE_IN_DOCKER")
+
+is_in_docker = in_docker()
+is_in_linux = is_linux()
+is_in_macos = is_macos()
+is_in_windows = is_windows()
+is_in_wsl = is_wsl()
+default_ip = "0.0.0.0" if is_in_docker else "127.0.0.1"
+
+# CLI specific: the configuration profile to load
+CONFIG_PROFILE = os.environ.get("CONFIG_PROFILE", "").strip()
+
+# CLI specific: host configuration directory
+CONFIG_DIR = os.environ.get("CONFIG_DIR", os.path.expanduser("~/.localstack"))
+
+# keep this on top to populate the environment
+try:
+ # CLI specific: the actually loaded configuration profile
+ LOADED_PROFILES = load_environment(CONFIG_PROFILE)
+except ImportError:
+ # dotenv may not be available in lambdas or other environments where config is loaded
+ LOADED_PROFILES = None
+
+# loaded components name - default: all components are loaded and the first one is chosen
+RUNTIME_COMPONENTS = os.environ.get("RUNTIME_COMPONENTS", "").strip()
+
+# directory for persisting data (TODO: deprecated, simply use PERSISTENCE=1)
+DATA_DIR = os.environ.get("DATA_DIR", "").strip()
+
+# whether localstack should persist service state across localstack runs
+PERSISTENCE = is_env_true("PERSISTENCE")
+
+# the strategy for loading snapshots from disk when `PERSISTENCE=1` is used (on_startup, on_request, manual)
+SNAPSHOT_LOAD_STRATEGY = os.environ.get("SNAPSHOT_LOAD_STRATEGY", "").upper()
+
+# the strategy saving snapshots to disk when `PERSISTENCE=1` is used (on_shutdown, on_request, scheduled, manual)
+SNAPSHOT_SAVE_STRATEGY = os.environ.get("SNAPSHOT_SAVE_STRATEGY", "").upper()
+
+# the flush interval (in seconds) for persistence when the snapshot save strategy is set to "scheduled"
+SNAPSHOT_FLUSH_INTERVAL = int(os.environ.get("SNAPSHOT_FLUSH_INTERVAL") or 15)
+
+# whether to clear config.dirs.tmp on startup and shutdown
+CLEAR_TMP_FOLDER = is_env_not_false("CLEAR_TMP_FOLDER")
+
+# folder for temporary files and data
+TMP_FOLDER = os.path.join(tempfile.gettempdir(), "localstack")
+
+# this is exclusively for the CLI to configure the container mount into /var/lib/localstack
+VOLUME_DIR = os.environ.get("LOCALSTACK_VOLUME_DIR", "").strip() or TMP_FOLDER
+
+# fix for Mac OS, to be able to mount /var/folders in Docker
+if TMP_FOLDER.startswith("/var/folders/") and os.path.exists("/private%s" % TMP_FOLDER):
+ TMP_FOLDER = "/private%s" % TMP_FOLDER
+
+# whether to enable verbose debug logging ("LOG" is used when using the CLI with LOCALSTACK_LOG instead of LS_LOG)
+LS_LOG = eval_log_type("LS_LOG") or eval_log_type("LOG")
+DEBUG = is_env_true("DEBUG") or LS_LOG in TRACE_LOG_LEVELS
+
+# PUBLIC PREVIEW: 0 (default), 1 (preview)
+# When enabled it triggers specialised workflows for the debugging.
+LAMBDA_DEBUG_MODE = is_env_true("LAMBDA_DEBUG_MODE")
+
+# path to the lambda debug mode configuration file.
+LAMBDA_DEBUG_MODE_CONFIG_PATH = os.environ.get("LAMBDA_DEBUG_MODE_CONFIG_PATH")
+
+# EXPERIMENTAL: allow setting custom log levels for individual loggers
+LOG_LEVEL_OVERRIDES = os.environ.get("LOG_LEVEL_OVERRIDES", "")
+
+# whether to enable debugpy
+DEVELOP = is_env_true("DEVELOP")
+
+# PORT FOR DEBUGGER
+DEVELOP_PORT = int(os.environ.get("DEVELOP_PORT", "").strip() or DEFAULT_DEVELOP_PORT)
+
+# whether to make debugpy wait for a debbuger client
+WAIT_FOR_DEBUGGER = is_env_true("WAIT_FOR_DEBUGGER")
+
+# whether to assume http or https for `get_protocol`
+USE_SSL = is_env_true("USE_SSL")
+
+# Whether to report internal failures as 500 or 501 errors.
+FAIL_FAST = is_env_true("FAIL_FAST")
+
+# whether to run in TF compatibility mode for TF integration tests
+# (e.g., returning verbatim ports for ELB resources, rather than edge port 4566, etc.)
+TF_COMPAT_MODE = is_env_true("TF_COMPAT_MODE")
+
+# default encoding used to convert strings to byte arrays (mainly for Python 3 compatibility)
+DEFAULT_ENCODING = "utf-8"
+
+# path to local Docker UNIX domain socket
+DOCKER_SOCK = os.environ.get("DOCKER_SOCK", "").strip() or "/var/run/docker.sock"
+
+# additional flags to pass to "docker run" when starting the stack in Docker
+DOCKER_FLAGS = os.environ.get("DOCKER_FLAGS", "").strip()
+
+# command used to run Docker containers (e.g., set to "sudo docker" to run as sudo)
+DOCKER_CMD = os.environ.get("DOCKER_CMD", "").strip() or "docker"
+
+# use the command line docker client instead of the new sdk version, might get removed in the future
+LEGACY_DOCKER_CLIENT = is_env_true("LEGACY_DOCKER_CLIENT")
+
+# Docker image to use when starting up containers for port checks
+PORTS_CHECK_DOCKER_IMAGE = os.environ.get("PORTS_CHECK_DOCKER_IMAGE", "").strip()
+
+
+def is_trace_logging_enabled():
+ if LS_LOG:
+ log_level = str(LS_LOG).upper()
+ return log_level.lower() in TRACE_LOG_LEVELS
+ return False
+
+
+# set log levels immediately, but will be overwritten later by setup_logging
+if DEBUG:
+ logging.getLogger("").setLevel(logging.DEBUG)
+ logging.getLogger("localstack").setLevel(logging.DEBUG)
+
+LOG = logging.getLogger(__name__)
+if is_trace_logging_enabled():
+ load_end_time = time.time()
+ LOG.debug(
+ "Initializing the configuration took %s ms", int((load_end_time - load_start_time) * 1000)
+ )
+
+
+def is_ipv6_address(host: str) -> bool:
+ """
+ Returns True if the given host is an IPv6 address.
+ """
+
+ if not host:
+ return False
+
+ try:
+ ipaddress.IPv6Address(host)
+ return True
+ except ipaddress.AddressValueError:
+ return False
+
+
+class HostAndPort:
+ """
+ Definition of an address for a server to listen to.
+
+ Includes a `parse` method to convert from `str`, allowing for default fallbacks, as well as
+ some helper methods to help tests - particularly testing for equality and a hash function
+ so that `HostAndPort` instances can be used as keys to dictionaries.
+ """
+
+ host: str
+ port: int
+
+ def __init__(self, host: str, port: int):
+ self.host = host
+ self.port = port
+
+ @classmethod
+ def parse(
+ cls,
+ input: str,
+ default_host: str,
+ default_port: int,
+ ) -> "HostAndPort":
+ """
+ Parse a `HostAndPort` from strings like:
+ - 0.0.0.0:4566 -> host=0.0.0.0, port=4566
+ - 0.0.0.0 -> host=0.0.0.0, port=`default_port`
+ - :4566 -> host=`default_host`, port=4566
+ - [::]:4566 -> host=[::], port=4566
+ - [::1] -> host=[::1], port=`default_port`
+ """
+ host, port = default_host, default_port
+
+ # recognize IPv6 addresses (+ port)
+ if input.startswith("["):
+ ipv6_pattern = re.compile(r"^\[(?P[^]]+)\](:(?P\d+))?$")
+ match = ipv6_pattern.match(input)
+
+ if match:
+ host = match.group("host")
+ if not is_ipv6_address(host):
+ raise ValueError(
+ f"input looks like an IPv6 address (is enclosed in square brackets), but is not valid: {host}"
+ )
+ port_s = match.group("port")
+ if port_s:
+ port = cls._validate_port(port_s)
+ else:
+ raise ValueError(
+ f'input looks like an IPv6 address, but is invalid. Should be formatted "[ip]:port": {input}'
+ )
+
+ # recognize IPv4 address + port
+ elif ":" in input:
+ hostname, port_s = input.split(":", 1)
+ if hostname.strip():
+ host = hostname.strip()
+ port = cls._validate_port(port_s)
+ else:
+ if input.strip():
+ host = input.strip()
+
+ # validation
+ if port < 0 or port >= 2**16:
+ raise ValueError("port out of range")
+
+ return cls(host=host, port=port)
+
+ @classmethod
+ def _validate_port(cls, port_s: str) -> int:
+ try:
+ port = int(port_s)
+ except ValueError as e:
+ raise ValueError(f"specified port {port_s} not a number") from e
+
+ return port
+
+ def _get_unprivileged_port_range_start(self) -> int:
+ try:
+ with open(
+ "/proc/sys/net/ipv4/ip_unprivileged_port_start", "rt"
+ ) as unprivileged_port_start:
+ port = unprivileged_port_start.read()
+ return int(port.strip())
+ except Exception:
+ return 1024
+
+ def is_unprivileged(self) -> bool:
+ return self.port >= self._get_unprivileged_port_range_start()
+
+ def host_and_port(self) -> str:
+ formatted_host = f"[{self.host}]" if is_ipv6_address(self.host) else self.host
+ return f"{formatted_host}:{self.port}" if self.port is not None else formatted_host
+
+ def __hash__(self) -> int:
+ return hash((self.host, self.port))
+
+ # easier tests
+ def __eq__(self, other: "str | HostAndPort") -> bool:
+ if isinstance(other, self.__class__):
+ return self.host == other.host and self.port == other.port
+ elif isinstance(other, str):
+ return str(self) == other
+ else:
+ raise TypeError(f"cannot compare {self.__class__} to {other.__class__}")
+
+ def __str__(self) -> str:
+ return self.host_and_port()
+
+ def __repr__(self) -> str:
+ return f"HostAndPort(host={self.host}, port={self.port})"
+
+
+class UniqueHostAndPortList(List[HostAndPort]):
+ """
+ Container type that ensures that ports added to the list are unique based
+ on these rules:
+ - :: "trumps" any other binding on the same port, including both IPv6 and IPv4
+ addresses. All other bindings for this port are removed, since :: already
+ covers all interfaces. For example, adding 127.0.0.1:4566, [::1]:4566,
+ and [::]:4566 would result in only [::]:4566 being preserved.
+ - 0.0.0.0 "trumps" any other binding on IPv4 addresses only. IPv6 addresses
+ are not removed.
+ - Identical identical hosts and ports are de-duped
+ """
+
+ def __init__(self, iterable: Union[List[HostAndPort], None] = None):
+ super().__init__(iterable or [])
+ self._ensure_unique()
+
+ def _ensure_unique(self):
+ """
+ Ensure that all bindings on the same port are de-duped.
+ """
+ if len(self) <= 1:
+ return
+
+ unique: List[HostAndPort] = list()
+
+ # Build a dictionary of hosts by port
+ hosts_by_port: Dict[int, List[str]] = defaultdict(list)
+ for item in self:
+ hosts_by_port[item.port].append(item.host)
+
+ # For any given port, dedupe the hosts
+ for port, hosts in hosts_by_port.items():
+ deduped_hosts = set(hosts)
+
+ # IPv6 all interfaces: this is the most general binding.
+ # Any others should be removed.
+ if "::" in deduped_hosts:
+ unique.append(HostAndPort(host="::", port=port))
+ continue
+ # IPv4 all interfaces: this is the next most general binding.
+ # Any others should be removed.
+ if "0.0.0.0" in deduped_hosts:
+ unique.append(HostAndPort(host="0.0.0.0", port=port))
+ continue
+
+ # All other bindings just need to be unique
+ unique.extend([HostAndPort(host=host, port=port) for host in deduped_hosts])
+
+ self.clear()
+ self.extend(unique)
+
+ def append(self, value: HostAndPort):
+ super().append(value)
+ self._ensure_unique()
+
+
+def populate_edge_configuration(
+ environment: Mapping[str, str],
+) -> Tuple[HostAndPort, UniqueHostAndPortList]:
+ """Populate the LocalStack edge configuration from environment variables."""
+ localstack_host_raw = environment.get("LOCALSTACK_HOST")
+ gateway_listen_raw = environment.get("GATEWAY_LISTEN")
+
+ # parse gateway listen from multiple components
+ if gateway_listen_raw is not None:
+ gateway_listen = []
+ for address in gateway_listen_raw.split(","):
+ gateway_listen.append(
+ HostAndPort.parse(
+ address.strip(),
+ default_host=default_ip,
+ default_port=constants.DEFAULT_PORT_EDGE,
+ )
+ )
+ else:
+ # use default if gateway listen is not defined
+ gateway_listen = [HostAndPort(host=default_ip, port=constants.DEFAULT_PORT_EDGE)]
+
+ # the actual value of the LOCALSTACK_HOST port now depends on what gateway listen actually listens to.
+ if localstack_host_raw is None:
+ localstack_host = HostAndPort(
+ host=constants.LOCALHOST_HOSTNAME, port=gateway_listen[0].port
+ )
+ else:
+ localstack_host = HostAndPort.parse(
+ localstack_host_raw,
+ default_host=constants.LOCALHOST_HOSTNAME,
+ default_port=gateway_listen[0].port,
+ )
+
+ assert gateway_listen is not None
+ assert localstack_host is not None
+
+ return (
+ localstack_host,
+ UniqueHostAndPortList(gateway_listen),
+ )
+
+
+# How to access LocalStack
+(
+ # -- Cosmetic
+ LOCALSTACK_HOST,
+ # -- Edge configuration
+ # Main configuration of the listen address of the hypercorn proxy. Of the form
+ # :(,:port>)*
+ GATEWAY_LISTEN,
+) = populate_edge_configuration(os.environ)
+
+GATEWAY_WORKER_COUNT = int(os.environ.get("GATEWAY_WORKER_COUNT") or 1000)
+
+# the gateway server that should be used (supported: hypercorn, twisted dev: werkzeug)
+GATEWAY_SERVER = os.environ.get("GATEWAY_SERVER", "").strip() or "twisted"
+
+# IP of the docker bridge used to enable access between containers
+DOCKER_BRIDGE_IP = os.environ.get("DOCKER_BRIDGE_IP", "").strip()
+
+# Default timeout for Docker API calls sent by the Docker SDK client, in seconds.
+DOCKER_SDK_DEFAULT_TIMEOUT_SECONDS = int(os.environ.get("DOCKER_SDK_DEFAULT_TIMEOUT_SECONDS") or 60)
+
+# Default number of retries to connect to the Docker API by the Docker SDK client.
+DOCKER_SDK_DEFAULT_RETRIES = int(os.environ.get("DOCKER_SDK_DEFAULT_RETRIES") or 0)
+
+# whether to enable API-based updates of configuration variables at runtime
+ENABLE_CONFIG_UPDATES = is_env_true("ENABLE_CONFIG_UPDATES")
+
+# CORS settings
+DISABLE_CORS_HEADERS = is_env_true("DISABLE_CORS_HEADERS")
+DISABLE_CORS_CHECKS = is_env_true("DISABLE_CORS_CHECKS")
+DISABLE_CUSTOM_CORS_S3 = is_env_true("DISABLE_CUSTOM_CORS_S3")
+DISABLE_CUSTOM_CORS_APIGATEWAY = is_env_true("DISABLE_CUSTOM_CORS_APIGATEWAY")
+EXTRA_CORS_ALLOWED_HEADERS = os.environ.get("EXTRA_CORS_ALLOWED_HEADERS", "").strip()
+EXTRA_CORS_EXPOSE_HEADERS = os.environ.get("EXTRA_CORS_EXPOSE_HEADERS", "").strip()
+EXTRA_CORS_ALLOWED_ORIGINS = os.environ.get("EXTRA_CORS_ALLOWED_ORIGINS", "").strip()
+DISABLE_PREFLIGHT_PROCESSING = is_env_true("DISABLE_PREFLIGHT_PROCESSING")
+
+# whether to disable publishing events to the API
+DISABLE_EVENTS = is_env_true("DISABLE_EVENTS")
+DEBUG_ANALYTICS = is_env_true("DEBUG_ANALYTICS")
+
+# whether to log fine-grained debugging information for the handler chain
+DEBUG_HANDLER_CHAIN = is_env_true("DEBUG_HANDLER_CHAIN")
+
+# whether to eagerly start services
+EAGER_SERVICE_LOADING = is_env_true("EAGER_SERVICE_LOADING")
+
+# whether to selectively load services in SERVICES
+STRICT_SERVICE_LOADING = is_env_not_false("STRICT_SERVICE_LOADING")
+
+# Whether to skip downloading additional infrastructure components (e.g., custom Elasticsearch versions)
+SKIP_INFRA_DOWNLOADS = os.environ.get("SKIP_INFRA_DOWNLOADS", "").strip()
+
+# Whether to skip downloading our signed SSL cert.
+SKIP_SSL_CERT_DOWNLOAD = is_env_true("SKIP_SSL_CERT_DOWNLOAD")
+
+# Absolute path to a custom certificate (pem file)
+CUSTOM_SSL_CERT_PATH = os.environ.get("CUSTOM_SSL_CERT_PATH", "").strip()
+
+# Whether delete the cached signed SSL certificate at startup
+REMOVE_SSL_CERT = is_env_true("REMOVE_SSL_CERT")
+
+# Allow non-standard AWS regions
+ALLOW_NONSTANDARD_REGIONS = is_env_true("ALLOW_NONSTANDARD_REGIONS")
+if ALLOW_NONSTANDARD_REGIONS:
+ os.environ["MOTO_ALLOW_NONEXISTENT_REGION"] = "true"
+
+# name of the main Docker container
+MAIN_CONTAINER_NAME = os.environ.get("MAIN_CONTAINER_NAME", "").strip() or "localstack-main"
+
+# the latest commit id of the repository when the docker image was created
+LOCALSTACK_BUILD_GIT_HASH = os.environ.get("LOCALSTACK_BUILD_GIT_HASH", "").strip() or None
+
+# the date on which the docker image was created
+LOCALSTACK_BUILD_DATE = os.environ.get("LOCALSTACK_BUILD_DATE", "").strip() or None
+
+# Equivalent to HTTP_PROXY, but only applicable for external connections
+OUTBOUND_HTTP_PROXY = os.environ.get("OUTBOUND_HTTP_PROXY", "")
+
+# Equivalent to HTTPS_PROXY, but only applicable for external connections
+OUTBOUND_HTTPS_PROXY = os.environ.get("OUTBOUND_HTTPS_PROXY", "")
+
+# Feature flag to enable validation of internal endpoint responses in the handler chain. For test use only.
+OPENAPI_VALIDATE_RESPONSE = is_env_true("OPENAPI_VALIDATE_RESPONSE")
+# Flag to enable the validation of the requests made to the LocalStack internal endpoints. Active by default.
+OPENAPI_VALIDATE_REQUEST = is_env_true("OPENAPI_VALIDATE_REQUEST")
+
+# whether to skip waiting for the infrastructure to shut down, or exit immediately
+FORCE_SHUTDOWN = is_env_not_false("FORCE_SHUTDOWN")
+
+# set variables no_proxy, i.e., run internal service calls directly
+no_proxy = ",".join([constants.LOCALHOST_HOSTNAME, LOCALHOST, LOCALHOST_IP, "[::1]"])
+if os.environ.get("no_proxy"):
+ os.environ["no_proxy"] += "," + no_proxy
+elif os.environ.get("NO_PROXY"):
+ os.environ["NO_PROXY"] += "," + no_proxy
+else:
+ os.environ["no_proxy"] = no_proxy
+
+# additional CLI commands, can be set by plugins
+CLI_COMMANDS = {}
+
+# determine IP of Docker bridge
+if not DOCKER_BRIDGE_IP:
+ DOCKER_BRIDGE_IP = "172.17.0.1"
+ if is_in_docker:
+ candidates = (DOCKER_BRIDGE_IP, "172.18.0.1")
+ for ip in candidates:
+ # TODO: remove from here - should not perform I/O operations in top-level config.py
+ if ping(ip):
+ DOCKER_BRIDGE_IP = ip
+ break
+
+# AWS account used to store internal resources such as Lambda archives or internal SQS queues.
+# It should not be modified by the user, or visible to him, except as through a presigned url with the
+# get-function call.
+INTERNAL_RESOURCE_ACCOUNT = os.environ.get("INTERNAL_RESOURCE_ACCOUNT") or "949334387222"
+
+# TODO: remove with 4.1.0
+# Determine which implementation to use for the event rule / event filtering engine used by multiple services:
+# EventBridge, EventBridge Pipes, Lambda Event Source Mapping
+# Options: python (default) | java (deprecated since 4.0.3)
+EVENT_RULE_ENGINE = os.environ.get("EVENT_RULE_ENGINE", "python").strip()
+
+# -----
+# SERVICE-SPECIFIC CONFIGS BELOW
+# -----
+
+# port ranges for external service instances (f.e. elasticsearch clusters, opensearch clusters,...)
+EXTERNAL_SERVICE_PORTS_START = int(
+ os.environ.get("EXTERNAL_SERVICE_PORTS_START")
+ or os.environ.get("SERVICE_INSTANCES_PORTS_START")
+ or 4510
+)
+EXTERNAL_SERVICE_PORTS_END = int(
+ os.environ.get("EXTERNAL_SERVICE_PORTS_END")
+ or os.environ.get("SERVICE_INSTANCES_PORTS_END")
+ or (EXTERNAL_SERVICE_PORTS_START + 50)
+)
+
+# The default container runtime to use
+CONTAINER_RUNTIME = os.environ.get("CONTAINER_RUNTIME", "").strip() or "docker"
+
+# PUBLIC v1: -Xmx512M (example) Currently not supported in new provider but possible via custom entrypoint.
+# Allow passing custom JVM options to Java Lambdas executed in Docker.
+LAMBDA_JAVA_OPTS = os.environ.get("LAMBDA_JAVA_OPTS", "").strip()
+
+# limit in which to kinesis-mock will start throwing exceptions
+KINESIS_SHARD_LIMIT = os.environ.get("KINESIS_SHARD_LIMIT", "").strip() or "100"
+KINESIS_PERSISTENCE = is_env_not_false("KINESIS_PERSISTENCE")
+
+# limit in which to kinesis-mock will start throwing exceptions
+KINESIS_ON_DEMAND_STREAM_COUNT_LIMIT = (
+ os.environ.get("KINESIS_ON_DEMAND_STREAM_COUNT_LIMIT", "").strip() or "10"
+)
+
+# delay in kinesis-mock response when making changes to streams
+KINESIS_LATENCY = os.environ.get("KINESIS_LATENCY", "").strip() or "500"
+
+# Delay between data persistence (in seconds)
+KINESIS_MOCK_PERSIST_INTERVAL = os.environ.get("KINESIS_MOCK_PERSIST_INTERVAL", "").strip() or "5s"
+
+# Kinesis mock log level override when inconsistent with LS_LOG (e.g., when LS_LOG=debug)
+KINESIS_MOCK_LOG_LEVEL = os.environ.get("KINESIS_MOCK_LOG_LEVEL", "").strip()
+
+# randomly inject faults to Kinesis
+KINESIS_ERROR_PROBABILITY = float(os.environ.get("KINESIS_ERROR_PROBABILITY", "").strip() or 0.0)
+
+# SEMI-PUBLIC: "node" (default); not actively communicated
+# Select whether to use the node or scala build when running Kinesis Mock
+KINESIS_MOCK_PROVIDER_ENGINE = os.environ.get("KINESIS_MOCK_PROVIDER_ENGINE", "").strip() or "node"
+
+# set the maximum Java heap size corresponding to the '-Xmx' flag
+KINESIS_MOCK_MAXIMUM_HEAP_SIZE = (
+ os.environ.get("KINESIS_MOCK_MAXIMUM_HEAP_SIZE", "").strip() or "512m"
+)
+
+# set the initial Java heap size corresponding to the '-Xms' flag
+KINESIS_MOCK_INITIAL_HEAP_SIZE = (
+ os.environ.get("KINESIS_MOCK_INITIAL_HEAP_SIZE", "").strip() or "256m"
+)
+
+# randomly inject faults to DynamoDB
+DYNAMODB_ERROR_PROBABILITY = float(os.environ.get("DYNAMODB_ERROR_PROBABILITY", "").strip() or 0.0)
+DYNAMODB_READ_ERROR_PROBABILITY = float(
+ os.environ.get("DYNAMODB_READ_ERROR_PROBABILITY", "").strip() or 0.0
+)
+DYNAMODB_WRITE_ERROR_PROBABILITY = float(
+ os.environ.get("DYNAMODB_WRITE_ERROR_PROBABILITY", "").strip() or 0.0
+)
+
+# JAVA EE heap size for dynamodb
+DYNAMODB_HEAP_SIZE = os.environ.get("DYNAMODB_HEAP_SIZE", "").strip() or "256m"
+
+# single DB instance across multiple credentials are regions
+DYNAMODB_SHARE_DB = int(os.environ.get("DYNAMODB_SHARE_DB") or 0)
+
+# the port on which to expose dynamodblocal
+DYNAMODB_LOCAL_PORT = int(os.environ.get("DYNAMODB_LOCAL_PORT") or 0)
+
+# Enables the automatic removal of stale KV pais based on TTL
+DYNAMODB_REMOVE_EXPIRED_ITEMS = is_env_true("DYNAMODB_REMOVE_EXPIRED_ITEMS")
+
+# Used to toggle PurgeInProgress exceptions when calling purge within 60 seconds
+SQS_DELAY_PURGE_RETRY = is_env_true("SQS_DELAY_PURGE_RETRY")
+
+# Used to toggle QueueDeletedRecently errors when re-creating a queue within 60 seconds of deleting it
+SQS_DELAY_RECENTLY_DELETED = is_env_true("SQS_DELAY_RECENTLY_DELETED")
+
+# Used to toggle MessageRetentionPeriod functionality in SQS queues
+SQS_ENABLE_MESSAGE_RETENTION_PERIOD = is_env_true("SQS_ENABLE_MESSAGE_RETENTION_PERIOD")
+
+# Strategy used when creating SQS queue urls. can be "off", "standard" (default), "domain", "path", or "dynamic"
+SQS_ENDPOINT_STRATEGY = os.environ.get("SQS_ENDPOINT_STRATEGY", "") or "standard"
+
+# Disable the check for MaxNumberOfMessage in SQS ReceiveMessage
+SQS_DISABLE_MAX_NUMBER_OF_MESSAGE_LIMIT = is_env_true("SQS_DISABLE_MAX_NUMBER_OF_MESSAGE_LIMIT")
+
+# Disable cloudwatch metrics for SQS
+SQS_DISABLE_CLOUDWATCH_METRICS = is_env_true("SQS_DISABLE_CLOUDWATCH_METRICS")
+
+# Interval for reporting "approximate" metrics to cloudwatch, default is 60 seconds
+SQS_CLOUDWATCH_METRICS_REPORT_INTERVAL = int(
+ os.environ.get("SQS_CLOUDWATCH_METRICS_REPORT_INTERVAL") or 60
+)
+
+# PUBLIC: Endpoint host under which LocalStack APIs are accessible from Lambda Docker containers.
+HOSTNAME_FROM_LAMBDA = os.environ.get("HOSTNAME_FROM_LAMBDA", "").strip()
+
+# PUBLIC: hot-reload (default v2), __local__ (default v1)
+# Magic S3 bucket name for Hot Reloading. The S3Key points to the source code on the local file system.
+BUCKET_MARKER_LOCAL = (
+ os.environ.get("BUCKET_MARKER_LOCAL", "").strip() or DEFAULT_BUCKET_MARKER_LOCAL
+)
+
+# PUBLIC: Opt-out to inject the environment variable AWS_ENDPOINT_URL for automatic configuration of AWS SDKs:
+# https://docs.aws.amazon.com/sdkref/latest/guide/feature-ss-endpoints.html
+LAMBDA_DISABLE_AWS_ENDPOINT_URL = is_env_true("LAMBDA_DISABLE_AWS_ENDPOINT_URL")
+
+# PUBLIC: bridge (Docker default)
+# Docker network driver for the Lambda and ECS containers. https://docs.docker.com/network/
+LAMBDA_DOCKER_NETWORK = os.environ.get("LAMBDA_DOCKER_NETWORK", "").strip()
+
+# PUBLIC v1: LocalStack DNS (default)
+# Custom DNS server for the container running your lambda function.
+LAMBDA_DOCKER_DNS = os.environ.get("LAMBDA_DOCKER_DNS", "").strip()
+
+# PUBLIC: -e KEY=VALUE -v host:container
+# Additional flags passed to Docker run|create commands.
+LAMBDA_DOCKER_FLAGS = os.environ.get("LAMBDA_DOCKER_FLAGS", "").strip()
+
+# PUBLIC: 0 (default)
+# Enable this flag to run cross-platform compatible lambda functions natively (i.e., Docker selects architecture) and
+# ignore the AWS architectures (i.e., x86_64, arm64) configured for the lambda function.
+LAMBDA_IGNORE_ARCHITECTURE = is_env_true("LAMBDA_IGNORE_ARCHITECTURE")
+
+# TODO: test and add to docs
+# EXPERIMENTAL: 0 (default)
+# prebuild images before execution? Increased cold start time on the tradeoff of increased time until lambda is ACTIVE
+LAMBDA_PREBUILD_IMAGES = is_env_true("LAMBDA_PREBUILD_IMAGES")
+
+# PUBLIC: docker (default), kubernetes (pro)
+# Where Lambdas will be executed.
+LAMBDA_RUNTIME_EXECUTOR = os.environ.get("LAMBDA_RUNTIME_EXECUTOR", CONTAINER_RUNTIME).strip()
+
+# PUBLIC: 20 (default)
+# How many seconds Lambda will wait for the runtime environment to start up.
+LAMBDA_RUNTIME_ENVIRONMENT_TIMEOUT = int(os.environ.get("LAMBDA_RUNTIME_ENVIRONMENT_TIMEOUT") or 20)
+
+# PUBLIC: base images for Lambda (default) https://docs.aws.amazon.com/lambda/latest/dg/runtimes-images.html
+# localstack/services/lambda_/invocation/lambda_models.py:IMAGE_MAPPING
+# Customize the Docker image of Lambda runtimes, either by:
+# a) pattern with placeholder, e.g. custom-repo/lambda-:2022
+# b) json dict mapping the to an image, e.g. {"python3.9": "custom-repo/lambda-py:thon3.9"}
+LAMBDA_RUNTIME_IMAGE_MAPPING = os.environ.get("LAMBDA_RUNTIME_IMAGE_MAPPING", "").strip()
+
+
+# PUBLIC: 0 (default)
+# Whether to disable usage of deprecated runtimes
+LAMBDA_RUNTIME_VALIDATION = int(os.environ.get("LAMBDA_RUNTIME_VALIDATION") or 0)
+
+# PUBLIC: 1 (default)
+# Whether to remove any Lambda Docker containers.
+LAMBDA_REMOVE_CONTAINERS = (
+ os.environ.get("LAMBDA_REMOVE_CONTAINERS", "").lower().strip() not in FALSE_STRINGS
+)
+
+# PUBLIC: 600000 (default 10min)
+# Time in milliseconds until lambda shuts down the execution environment after the last invocation has been processed.
+# Set to 0 to immediately shut down the execution environment after an invocation.
+LAMBDA_KEEPALIVE_MS = int(os.environ.get("LAMBDA_KEEPALIVE_MS", 600_000))
+
+# PUBLIC: 1000 (default)
+# The maximum number of events that functions can process simultaneously in the current Region.
+# See AWS service quotas: https://docs.aws.amazon.com/general/latest/gr/lambda-service.html
+# Concurrency limits. Like on AWS these apply per account and region.
+LAMBDA_LIMITS_CONCURRENT_EXECUTIONS = int(
+ os.environ.get("LAMBDA_LIMITS_CONCURRENT_EXECUTIONS", 1_000)
+)
+# SEMI-PUBLIC: not actively communicated
+# per account/region: there must be at least unreserved concurrency.
+LAMBDA_LIMITS_MINIMUM_UNRESERVED_CONCURRENCY = int(
+ os.environ.get("LAMBDA_LIMITS_MINIMUM_UNRESERVED_CONCURRENCY", 100)
+)
+# SEMI-PUBLIC: not actively communicated
+LAMBDA_LIMITS_TOTAL_CODE_SIZE = int(os.environ.get("LAMBDA_LIMITS_TOTAL_CODE_SIZE", 80_530_636_800))
+# PUBLIC: documented after AWS changed validation around 2023-11
+LAMBDA_LIMITS_CODE_SIZE_ZIPPED = int(os.environ.get("LAMBDA_LIMITS_CODE_SIZE_ZIPPED", 52_428_800))
+# SEMI-PUBLIC: not actively communicated
+LAMBDA_LIMITS_CODE_SIZE_UNZIPPED = int(
+ os.environ.get("LAMBDA_LIMITS_CODE_SIZE_UNZIPPED", 262_144_000)
+)
+# PUBLIC: documented upon customer request
+LAMBDA_LIMITS_CREATE_FUNCTION_REQUEST_SIZE = int(
+ os.environ.get("LAMBDA_LIMITS_CREATE_FUNCTION_REQUEST_SIZE", 70_167_211)
+)
+# SEMI-PUBLIC: not actively communicated
+LAMBDA_LIMITS_MAX_FUNCTION_ENVVAR_SIZE_BYTES = int(
+ os.environ.get("LAMBDA_LIMITS_MAX_FUNCTION_ENVVAR_SIZE_BYTES", 4 * 1024)
+)
+# SEMI-PUBLIC: not actively communicated
+LAMBDA_LIMITS_MAX_FUNCTION_PAYLOAD_SIZE_BYTES = int(
+ os.environ.get(
+ "LAMBDA_LIMITS_MAX_FUNCTION_PAYLOAD_SIZE_BYTES", 6 * 1024 * 1024 + 100
+ ) # the 100 comes from the init defaults
+)
+
+# DEV: 0 (default unless in host mode on macOS) For LS developers only. Only applies to Docker mode.
+# Whether to explicitly expose a free TCP port in lambda containers when invoking functions in host mode for
+# systems that cannot reach the container via its IPv4. For example, macOS cannot reach Docker containers:
+# https://docs.docker.com/desktop/networking/#i-cannot-ping-my-containers
+LAMBDA_DEV_PORT_EXPOSE = (
+ # Enable this dev flag by default on macOS in host mode (i.e., non-Docker environment)
+ is_env_not_false("LAMBDA_DEV_PORT_EXPOSE")
+ if not is_in_docker and is_in_macos
+ else is_env_true("LAMBDA_DEV_PORT_EXPOSE")
+)
+
+# DEV: only applies to new lambda provider. All LAMBDA_INIT_* configuration are for LS developers only.
+# There are NO stability guarantees, and they may break at any time.
+
+# DEV: Release version of https://github.com/localstack/lambda-runtime-init overriding the current default
+LAMBDA_INIT_RELEASE_VERSION = os.environ.get("LAMBDA_INIT_RELEASE_VERSION")
+# DEV: 0 (default) Enable for mounting of RIE init binary and delve debugger
+LAMBDA_INIT_DEBUG = is_env_true("LAMBDA_INIT_DEBUG")
+# DEV: path to RIE init binary (e.g., var/rapid/init)
+LAMBDA_INIT_BIN_PATH = os.environ.get("LAMBDA_INIT_BIN_PATH")
+# DEV: path to entrypoint script (e.g., var/rapid/entrypoint.sh)
+LAMBDA_INIT_BOOTSTRAP_PATH = os.environ.get("LAMBDA_INIT_BOOTSTRAP_PATH")
+# DEV: path to delve debugger (e.g., var/rapid/dlv)
+LAMBDA_INIT_DELVE_PATH = os.environ.get("LAMBDA_INIT_DELVE_PATH")
+# DEV: Go Delve debug port
+LAMBDA_INIT_DELVE_PORT = int(os.environ.get("LAMBDA_INIT_DELVE_PORT") or 40000)
+# DEV: Time to wait after every invoke as a workaround to fix a race condition in persistence tests
+LAMBDA_INIT_POST_INVOKE_WAIT_MS = os.environ.get("LAMBDA_INIT_POST_INVOKE_WAIT_MS")
+# DEV: sbx_user1051 (default when not provided) Alternative system user or empty string to skip dropping privileges.
+LAMBDA_INIT_USER = os.environ.get("LAMBDA_INIT_USER")
+
+# INTERNAL: 1 (default)
+# The duration (in seconds) to wait between each poll call to an event source.
+LAMBDA_EVENT_SOURCE_MAPPING_POLL_INTERVAL_SEC = float(
+ os.environ.get("LAMBDA_EVENT_SOURCE_MAPPING_POLL_INTERVAL_SEC") or 1
+)
+
+# INTERNAL: 60 (default)
+# Maximum duration (in seconds) to wait between retries when an event source poll fails.
+LAMBDA_EVENT_SOURCE_MAPPING_MAX_BACKOFF_ON_ERROR_SEC = float(
+ os.environ.get("LAMBDA_EVENT_SOURCE_MAPPING_MAX_BACKOFF_ON_ERROR_SEC") or 60
+)
+
+# INTERNAL: 10 (default)
+# Maximum duration (in seconds) to wait between polls when an event source returns empty results.
+LAMBDA_EVENT_SOURCE_MAPPING_MAX_BACKOFF_ON_EMPTY_POLL_SEC = float(
+ os.environ.get("LAMBDA_EVENT_SOURCE_MAPPING_MAX_BACKOFF_ON_EMPTY_POLL_SEC") or 10
+)
+
+# Specifies the path to the mock configuration file for Step Functions, commonly named MockConfigFile.json.
+SFN_MOCK_CONFIG = os.environ.get("SFN_MOCK_CONFIG", "").strip()
+
+# path prefix for windows volume mounting
+WINDOWS_DOCKER_MOUNT_PREFIX = os.environ.get("WINDOWS_DOCKER_MOUNT_PREFIX", "/host_mnt")
+
+# whether to skip S3 presign URL signature validation (TODO: currently enabled, until all issues are resolved)
+S3_SKIP_SIGNATURE_VALIDATION = is_env_not_false("S3_SKIP_SIGNATURE_VALIDATION")
+# whether to skip S3 validation of provided KMS key
+S3_SKIP_KMS_KEY_VALIDATION = is_env_not_false("S3_SKIP_KMS_KEY_VALIDATION")
+
+# PUBLIC: 2000 (default)
+# Allows increasing the default char limit for truncation of lambda log lines when printed in the console.
+# This does not affect the logs processing in CloudWatch.
+LAMBDA_TRUNCATE_STDOUT = int(os.getenv("LAMBDA_TRUNCATE_STDOUT") or 2000)
+
+# INTERNAL: 60 (default matching AWS) only applies to new lambda provider
+# Base delay in seconds for async retries. Further retries use: NUM_ATTEMPTS * LAMBDA_RETRY_BASE_DELAY_SECONDS
+# 300 (5min) is the maximum because NUM_ATTEMPTS can be at most 3 and SQS has a message timer limit of 15 min.
+# For example:
+# 1x LAMBDA_RETRY_BASE_DELAY_SECONDS: delay between initial invocation and first retry
+# 2x LAMBDA_RETRY_BASE_DELAY_SECONDS: delay between the first retry and the second retry
+# 3x LAMBDA_RETRY_BASE_DELAY_SECONDS: delay between the second retry and the third retry
+LAMBDA_RETRY_BASE_DELAY_SECONDS = int(os.getenv("LAMBDA_RETRY_BASE_DELAY") or 60)
+
+# PUBLIC: 0 (default)
+# Set to 1 to create lambda functions synchronously (not recommended).
+# Whether Lambda.CreateFunction will block until the function is in a terminal state (Active or Failed).
+# This technically breaks behavior parity but is provided as a simplification over the default AWS behavior and
+# to match the behavior of the old lambda provider.
+LAMBDA_SYNCHRONOUS_CREATE = is_env_true("LAMBDA_SYNCHRONOUS_CREATE")
+
+# URL to a custom OpenSearch/Elasticsearch backend cluster. If this is set to a valid URL, then localstack will not
+# create OpenSearch/Elasticsearch cluster instances, but instead forward all domains to the given backend.
+OPENSEARCH_CUSTOM_BACKEND = os.environ.get("OPENSEARCH_CUSTOM_BACKEND", "").strip()
+
+# Strategy used when creating OpenSearch/Elasticsearch domain endpoints routed through the edge proxy
+# valid values: domain | path | port (off)
+OPENSEARCH_ENDPOINT_STRATEGY = (
+ os.environ.get("OPENSEARCH_ENDPOINT_STRATEGY", "").strip() or "domain"
+)
+if OPENSEARCH_ENDPOINT_STRATEGY == "off":
+ OPENSEARCH_ENDPOINT_STRATEGY = "port"
+
+# Whether to start one cluster per domain (default), or multiplex opensearch domains to a single clusters
+OPENSEARCH_MULTI_CLUSTER = is_env_not_false("OPENSEARCH_MULTI_CLUSTER")
+
+# Whether to really publish to GCM while using SNS Platform Application (needs credentials)
+LEGACY_SNS_GCM_PUBLISHING = is_env_true("LEGACY_SNS_GCM_PUBLISHING")
+
+SNS_SES_SENDER_ADDRESS = os.environ.get("SNS_SES_SENDER_ADDRESS", "").strip()
+
+SNS_CERT_URL_HOST = os.environ.get("SNS_CERT_URL_HOST", "").strip()
+
+# Whether the Next Gen APIGW invocation logic is enabled (on by default)
+APIGW_NEXT_GEN_PROVIDER = os.environ.get("PROVIDER_OVERRIDE_APIGATEWAY", "") in ("next_gen", "")
+
+# Whether the DynamoDBStreams native provider is enabled
+DDB_STREAMS_PROVIDER_V2 = os.environ.get("PROVIDER_OVERRIDE_DYNAMODBSTREAMS", "") == "v2"
+_override_dynamodb_v2 = os.environ.get("PROVIDER_OVERRIDE_DYNAMODB", "")
+if DDB_STREAMS_PROVIDER_V2:
+ # in order to not have conflicts between the 2 implementations, as they are tightly coupled, we need to set DDB
+ # to be v2 as well
+ if not _override_dynamodb_v2:
+ os.environ["PROVIDER_OVERRIDE_DYNAMODB"] = "v2"
+elif _override_dynamodb_v2 == "v2":
+ os.environ["PROVIDER_OVERRIDE_DYNAMODBSTREAMS"] = "v2"
+ DDB_STREAMS_PROVIDER_V2 = True
+
+# TODO remove fallback to LAMBDA_DOCKER_NETWORK with next minor version
+MAIN_DOCKER_NETWORK = os.environ.get("MAIN_DOCKER_NETWORK", "") or LAMBDA_DOCKER_NETWORK
+
+# Whether to return and parse access key ids starting with an "A", like on AWS
+PARITY_AWS_ACCESS_KEY_ID = is_env_true("PARITY_AWS_ACCESS_KEY_ID")
+
+# Show exceptions for CloudFormation deploy errors
+CFN_VERBOSE_ERRORS = is_env_true("CFN_VERBOSE_ERRORS")
+
+# The CFN_STRING_REPLACEMENT_DENY_LIST env variable is a comma separated list of strings that are not allowed to be
+# replaced in CloudFormation templates (e.g. AWS URLs that are usually edited by Localstack to point to itself if found
+# in a CFN template). They are extracted to a list of strings if the env variable is set.
+CFN_STRING_REPLACEMENT_DENY_LIST = [
+ x for x in os.environ.get("CFN_STRING_REPLACEMENT_DENY_LIST", "").split(",") if x
+]
+
+# Set the timeout to deploy each individual CloudFormation resource
+CFN_PER_RESOURCE_TIMEOUT = int(os.environ.get("CFN_PER_RESOURCE_TIMEOUT") or 300)
+
+# How localstack will react to encountering unsupported resource types.
+# By default unsupported resource types will be ignored.
+# EXPERIMENTAL
+CFN_IGNORE_UNSUPPORTED_RESOURCE_TYPES = is_env_not_false("CFN_IGNORE_UNSUPPORTED_RESOURCE_TYPES")
+
+# bind address of local DNS server
+DNS_ADDRESS = os.environ.get("DNS_ADDRESS") or "0.0.0.0"
+# port of the local DNS server
+DNS_PORT = int(os.environ.get("DNS_PORT", "53"))
+
+# Comma-separated list of regex patterns for DNS names to resolve locally.
+# Any DNS name not matched against any of the patterns on this whitelist
+# will resolve it to the real DNS entry, rather than the local one.
+DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM = (
+ os.environ.get("DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM") or ""
+).strip()
+DNS_LOCAL_NAME_PATTERNS = (os.environ.get("DNS_LOCAL_NAME_PATTERNS") or "").strip() # deprecated
+
+# IP address that AWS endpoints should resolve to in our local DNS server. By default,
+# hostnames resolve to 127.0.0.1, which allows to use the LocalStack APIs transparently
+# from the host machine. If your code is running in Docker, this should be configured
+# to resolve to the Docker bridge network address, e.g., DNS_RESOLVE_IP=172.17.0.1
+DNS_RESOLVE_IP = os.environ.get("DNS_RESOLVE_IP") or LOCALHOST_IP
+
+# fallback DNS server to send upstream requests to
+DNS_SERVER = os.environ.get("DNS_SERVER")
+DNS_VERIFICATION_DOMAIN = os.environ.get("DNS_VERIFICATION_DOMAIN") or "localstack.cloud"
+
+
+def use_custom_dns():
+ return str(DNS_ADDRESS) not in FALSE_STRINGS
+
+
+# s3 virtual host name
+S3_VIRTUAL_HOSTNAME = "s3.%s" % LOCALSTACK_HOST.host
+S3_STATIC_WEBSITE_HOSTNAME = "s3-website.%s" % LOCALSTACK_HOST.host
+
+BOTO_WAITER_DELAY = int(os.environ.get("BOTO_WAITER_DELAY") or "1")
+BOTO_WAITER_MAX_ATTEMPTS = int(os.environ.get("BOTO_WAITER_MAX_ATTEMPTS") or "120")
+DISABLE_CUSTOM_BOTO_WAITER_CONFIG = is_env_true("DISABLE_CUSTOM_BOTO_WAITER_CONFIG")
+
+# defaults to false
+# if `DISABLE_BOTO_RETRIES=1` is set, all our created boto clients will have retries disabled
+DISABLE_BOTO_RETRIES = is_env_true("DISABLE_BOTO_RETRIES")
+
+DISTRIBUTED_MODE = is_env_true("DISTRIBUTED_MODE")
+
+# This flag enables `connect_to` to be in-memory only and not do networking calls
+IN_MEMORY_CLIENT = is_env_true("IN_MEMORY_CLIENT")
+
+# This flag enables all responses from LocalStack to contain a `x-localstack` HTTP header.
+LOCALSTACK_RESPONSE_HEADER_ENABLED = is_env_not_false("LOCALSTACK_RESPONSE_HEADER_ENABLED")
+
+# List of environment variable names used for configuration that are passed from the host into the LocalStack container.
+# => Synchronize this list with the above and the configuration docs:
+# https://docs.localstack.cloud/references/configuration/
+# => Sort this list alphabetically
+# => Add deprecated environment variables to deprecations.py and add a comment in this list
+# => Move removed legacy variables to the section grouped by release (still relevant for deprecation warnings)
+# => Do *not* include any internal developer configurations that apply to host-mode only in this list.
+CONFIG_ENV_VARS = [
+ "ALLOW_NONSTANDARD_REGIONS",
+ "BOTO_WAITER_DELAY",
+ "BOTO_WAITER_MAX_ATTEMPTS",
+ "BUCKET_MARKER_LOCAL",
+ "CFN_IGNORE_UNSUPPORTED_RESOURCE_TYPES",
+ "CFN_PER_RESOURCE_TIMEOUT",
+ "CFN_STRING_REPLACEMENT_DENY_LIST",
+ "CFN_VERBOSE_ERRORS",
+ "CI",
+ "CONTAINER_RUNTIME",
+ "CUSTOM_SSL_CERT_PATH",
+ "DEBUG",
+ "DEBUG_HANDLER_CHAIN",
+ "DEVELOP",
+ "DEVELOP_PORT",
+ "DISABLE_BOTO_RETRIES",
+ "DISABLE_CORS_CHECKS",
+ "DISABLE_CORS_HEADERS",
+ "DISABLE_CUSTOM_BOTO_WAITER_CONFIG",
+ "DISABLE_CUSTOM_CORS_APIGATEWAY",
+ "DISABLE_CUSTOM_CORS_S3",
+ "DISABLE_EVENTS",
+ "DISTRIBUTED_MODE",
+ "DNS_ADDRESS",
+ "DNS_PORT",
+ "DNS_LOCAL_NAME_PATTERNS",
+ "DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM",
+ "DNS_RESOLVE_IP",
+ "DNS_SERVER",
+ "DNS_VERIFICATION_DOMAIN",
+ "DOCKER_BRIDGE_IP",
+ "DOCKER_SDK_DEFAULT_TIMEOUT_SECONDS",
+ "DYNAMODB_ERROR_PROBABILITY",
+ "DYNAMODB_HEAP_SIZE",
+ "DYNAMODB_IN_MEMORY",
+ "DYNAMODB_LOCAL_PORT",
+ "DYNAMODB_SHARE_DB",
+ "DYNAMODB_READ_ERROR_PROBABILITY",
+ "DYNAMODB_REMOVE_EXPIRED_ITEMS",
+ "DYNAMODB_WRITE_ERROR_PROBABILITY",
+ "EAGER_SERVICE_LOADING",
+ "ENABLE_CONFIG_UPDATES",
+ "EVENT_RULE_ENGINE",
+ "EXTRA_CORS_ALLOWED_HEADERS",
+ "EXTRA_CORS_ALLOWED_ORIGINS",
+ "EXTRA_CORS_EXPOSE_HEADERS",
+ "GATEWAY_LISTEN",
+ "GATEWAY_SERVER",
+ "GATEWAY_WORKER_THREAD_COUNT",
+ "HOSTNAME",
+ "HOSTNAME_FROM_LAMBDA",
+ "IN_MEMORY_CLIENT",
+ "KINESIS_ERROR_PROBABILITY",
+ "KINESIS_MOCK_PERSIST_INTERVAL",
+ "KINESIS_MOCK_LOG_LEVEL",
+ "KINESIS_ON_DEMAND_STREAM_COUNT_LIMIT",
+ "KINESIS_PERSISTENCE",
+ "LAMBDA_DEBUG_MODE",
+ "LAMBDA_DEBUG_MODE_CONFIG",
+ "LAMBDA_DISABLE_AWS_ENDPOINT_URL",
+ "LAMBDA_DOCKER_DNS",
+ "LAMBDA_DOCKER_FLAGS",
+ "LAMBDA_DOCKER_NETWORK",
+ "LAMBDA_EVENTS_INTERNAL_SQS",
+ "LAMBDA_EVENT_SOURCE_MAPPING",
+ "LAMBDA_IGNORE_ARCHITECTURE",
+ "LAMBDA_INIT_DEBUG",
+ "LAMBDA_INIT_BIN_PATH",
+ "LAMBDA_INIT_BOOTSTRAP_PATH",
+ "LAMBDA_INIT_DELVE_PATH",
+ "LAMBDA_INIT_DELVE_PORT",
+ "LAMBDA_INIT_POST_INVOKE_WAIT_MS",
+ "LAMBDA_INIT_USER",
+ "LAMBDA_INIT_RELEASE_VERSION",
+ "LAMBDA_KEEPALIVE_MS",
+ "LAMBDA_LIMITS_CONCURRENT_EXECUTIONS",
+ "LAMBDA_LIMITS_MINIMUM_UNRESERVED_CONCURRENCY",
+ "LAMBDA_LIMITS_TOTAL_CODE_SIZE",
+ "LAMBDA_LIMITS_CODE_SIZE_ZIPPED",
+ "LAMBDA_LIMITS_CODE_SIZE_UNZIPPED",
+ "LAMBDA_LIMITS_CREATE_FUNCTION_REQUEST_SIZE",
+ "LAMBDA_LIMITS_MAX_FUNCTION_ENVVAR_SIZE_BYTES",
+ "LAMBDA_LIMITS_MAX_FUNCTION_PAYLOAD_SIZE_BYTES",
+ "LAMBDA_PREBUILD_IMAGES",
+ "LAMBDA_RUNTIME_IMAGE_MAPPING",
+ "LAMBDA_REMOVE_CONTAINERS",
+ "LAMBDA_RETRY_BASE_DELAY_SECONDS",
+ "LAMBDA_RUNTIME_EXECUTOR",
+ "LAMBDA_RUNTIME_ENVIRONMENT_TIMEOUT",
+ "LAMBDA_RUNTIME_VALIDATION",
+ "LAMBDA_SYNCHRONOUS_CREATE",
+ "LAMBDA_SQS_EVENT_SOURCE_MAPPING_INTERVAL",
+ "LAMBDA_TRUNCATE_STDOUT",
+ "LEGACY_DOCKER_CLIENT",
+ "LEGACY_SNS_GCM_PUBLISHING",
+ "LOCALSTACK_API_KEY",
+ "LOCALSTACK_AUTH_TOKEN",
+ "LOCALSTACK_HOST",
+ "LOCALSTACK_RESPONSE_HEADER_ENABLED",
+ "LOG_LICENSE_ISSUES",
+ "LS_LOG",
+ "MAIN_CONTAINER_NAME",
+ "MAIN_DOCKER_NETWORK",
+ "OPENAPI_VALIDATE_REQUEST",
+ "OPENAPI_VALIDATE_RESPONSE",
+ "OPENSEARCH_ENDPOINT_STRATEGY",
+ "OUTBOUND_HTTP_PROXY",
+ "OUTBOUND_HTTPS_PROXY",
+ "PARITY_AWS_ACCESS_KEY_ID",
+ "PERSISTENCE",
+ "PORTS_CHECK_DOCKER_IMAGE",
+ "REQUESTS_CA_BUNDLE",
+ "REMOVE_SSL_CERT",
+ "S3_SKIP_SIGNATURE_VALIDATION",
+ "S3_SKIP_KMS_KEY_VALIDATION",
+ "SERVICES",
+ "SKIP_INFRA_DOWNLOADS",
+ "SKIP_SSL_CERT_DOWNLOAD",
+ "SNAPSHOT_LOAD_STRATEGY",
+ "SNAPSHOT_SAVE_STRATEGY",
+ "SNAPSHOT_FLUSH_INTERVAL",
+ "SNS_SES_SENDER_ADDRESS",
+ "SQS_DELAY_PURGE_RETRY",
+ "SQS_DELAY_RECENTLY_DELETED",
+ "SQS_ENABLE_MESSAGE_RETENTION_PERIOD",
+ "SQS_ENDPOINT_STRATEGY",
+ "SQS_DISABLE_CLOUDWATCH_METRICS",
+ "SQS_CLOUDWATCH_METRICS_REPORT_INTERVAL",
+ "STRICT_SERVICE_LOADING",
+ "TF_COMPAT_MODE",
+ "USE_SSL",
+ "WAIT_FOR_DEBUGGER",
+ "WINDOWS_DOCKER_MOUNT_PREFIX",
+ # Removed legacy variables in 2.0.0
+ # DATA_DIR => do *not* include in this list, as it is treated separately. # deprecated since 1.0.0
+ "LEGACY_DIRECTORIES", # deprecated since 1.0.0
+ "SYNCHRONOUS_API_GATEWAY_EVENTS", # deprecated since 1.3.0
+ "SYNCHRONOUS_DYNAMODB_EVENTS", # deprecated since 1.3.0
+ "SYNCHRONOUS_SNS_EVENTS", # deprecated since 1.3.0
+ "SYNCHRONOUS_SQS_EVENTS", # deprecated since 1.3.0
+ # Removed legacy variables in 3.0.0
+ "DEFAULT_REGION", # deprecated since 0.12.7
+ "EDGE_BIND_HOST", # deprecated since 2.0.0
+ "EDGE_FORWARD_URL", # deprecated since 1.4.0
+ "EDGE_PORT", # deprecated since 2.0.0
+ "EDGE_PORT_HTTP", # deprecated since 2.0.0
+ "ES_CUSTOM_BACKEND", # deprecated since 0.14.0
+ "ES_ENDPOINT_STRATEGY", # deprecated since 0.14.0
+ "ES_MULTI_CLUSTER", # deprecated since 0.14.0
+ "HOSTNAME_EXTERNAL", # deprecated since 2.0.0
+ "KINESIS_INITIALIZE_STREAMS", # deprecated since 1.4.0
+ "KINESIS_PROVIDER", # deprecated since 1.3.0
+ "KMS_PROVIDER", # deprecated since 1.4.0
+ "LAMBDA_XRAY_INIT", # deprecated since 2.0.0
+ "LAMBDA_CODE_EXTRACT_TIME", # deprecated since 2.0.0
+ "LAMBDA_CONTAINER_REGISTRY", # deprecated since 2.0.0
+ "LAMBDA_EXECUTOR", # deprecated since 2.0.0
+ "LAMBDA_FALLBACK_URL", # deprecated since 2.0.0
+ "LAMBDA_FORWARD_URL", # deprecated since 2.0.0
+ "LAMBDA_JAVA_OPTS", # currently only supported in old Lambda provider but not officially deprecated
+ "LAMBDA_REMOTE_DOCKER", # deprecated since 2.0.0
+ "LAMBDA_STAY_OPEN_MODE", # deprecated since 2.0.0
+ "LEGACY_EDGE_PROXY", # deprecated since 1.0.0
+ "LOCALSTACK_HOSTNAME", # deprecated since 2.0.0
+ "SQS_PORT_EXTERNAL", # deprecated only in docs since 2022-07-13
+ "SYNCHRONOUS_KINESIS_EVENTS", # deprecated since 1.3.0
+ "USE_SINGLE_REGION", # deprecated since 0.12.7
+ "MOCK_UNIMPLEMENTED", # deprecated since 1.3.0
+]
+
+
+def is_local_test_mode() -> bool:
+ """Returns True if we are running in the context of our local integration tests."""
+ return is_env_true(ENV_INTERNAL_TEST_RUN)
+
+
+def is_collect_metrics_mode() -> bool:
+ """Returns True if metric collection is enabled."""
+ return is_env_true(ENV_INTERNAL_TEST_COLLECT_METRIC)
+
+
+def collect_config_items() -> List[Tuple[str, Any]]:
+ """Returns a list of key-value tuples of LocalStack configuration values."""
+ none = object() # sentinel object
+
+ # collect which keys to print
+ keys = []
+ keys.extend(CONFIG_ENV_VARS)
+ keys.append("DATA_DIR")
+ keys.sort()
+
+ values = globals()
+
+ result = []
+ for k in keys:
+ v = values.get(k, none)
+ if v is none:
+ continue
+ result.append((k, v))
+ result.sort()
+ return result
+
+
+def populate_config_env_var_names():
+ global CONFIG_ENV_VARS
+
+ CONFIG_ENV_VARS += [
+ key
+ for key in [key.upper() for key in os.environ]
+ if (key.startswith("LOCALSTACK_") or key.startswith("PROVIDER_OVERRIDE_"))
+ # explicitly exclude LOCALSTACK_CLI (it's prefixed with "LOCALSTACK_",
+ # but is only used in the CLI (should not be forwarded to the container)
+ and key != "LOCALSTACK_CLI"
+ ]
+
+ # create variable aliases prefixed with LOCALSTACK_ (except LOCALSTACK_HOST)
+ CONFIG_ENV_VARS += [
+ "LOCALSTACK_" + v for v in CONFIG_ENV_VARS if not v.startswith("LOCALSTACK_")
+ ]
+
+ CONFIG_ENV_VARS = list(set(CONFIG_ENV_VARS))
+
+
+# populate env var names to be passed to the container
+populate_config_env_var_names()
+
+
+# helpers to build urls
+def get_protocol() -> str:
+ return "https" if USE_SSL else "http"
+
+
+def external_service_url(
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ protocol: Optional[str] = None,
+ subdomains: Optional[str] = None,
+) -> str:
+ """Returns a service URL (e.g., SQS queue URL) to an external client (e.g., boto3) potentially running on another
+ machine than LocalStack. The configurations LOCALSTACK_HOST and USE_SSL can customize these returned URLs.
+ The optional parameters can be used to customize the defaults.
+ Examples with default configuration:
+ * external_service_url() == http://localhost.localstack.cloud:4566
+ * external_service_url(subdomains="s3") == http://s3.localhost.localstack.cloud:4566
+ """
+ protocol = protocol or get_protocol()
+ subdomains = f"{subdomains}." if subdomains else ""
+ host = host or LOCALSTACK_HOST.host
+ port = port or LOCALSTACK_HOST.port
+ return f"{protocol}://{subdomains}{host}:{port}"
+
+
+def internal_service_url(
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ protocol: Optional[str] = None,
+ subdomains: Optional[str] = None,
+) -> str:
+ """Returns a service URL for internal use within LocalStack (i.e., same host).
+ The configuration USE_SSL can customize these returned URLs but LOCALSTACK_HOST has no effect.
+ The optional parameters can be used to customize the defaults.
+ Examples with default configuration:
+ * internal_service_url() == http://localhost:4566
+ * internal_service_url(port=8080) == http://localhost:8080
+ """
+ protocol = protocol or get_protocol()
+ subdomains = f"{subdomains}." if subdomains else ""
+ host = host or LOCALHOST
+ port = port or GATEWAY_LISTEN[0].port
+ return f"{protocol}://{subdomains}{host}:{port}"
+
+
+# DEPRECATED: old helpers for building URLs
+
+
+def service_url(service_key, host=None, port=None):
+ """@deprecated: Use `internal_service_url()` instead. We assume that most usages are internal
+ but really need to check and update each usage accordingly.
+ """
+ warnings.warn(
+ """@deprecated: Use `internal_service_url()` instead. We assume that most usages are
+ internal but really need to check and update each usage accordingly.""",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return internal_service_url(host=host, port=port)
+
+
+def service_port(service_key: str, external: bool = False) -> int:
+ """@deprecated: Use `localstack_host().port` for external and `GATEWAY_LISTEN[0].port` for
+ internal use."""
+ warnings.warn(
+ "Deprecated: use `localstack_host().port` for external and `GATEWAY_LISTEN[0].port` for "
+ "internal use.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if external:
+ return LOCALSTACK_HOST.port
+ return GATEWAY_LISTEN[0].port
+
+
+def get_edge_port_http():
+ """@deprecated: Use `localstack_host().port` for external and `GATEWAY_LISTEN[0].port` for
+ internal use. This function is not needed anymore because we don't separate between HTTP
+ and HTTP ports anymore since LocalStack listens to both ports."""
+ warnings.warn(
+ """@deprecated: Use `localstack_host().port` for external and `GATEWAY_LISTEN[0].port`
+ for internal use. This function is also not needed anymore because we don't separate
+ between HTTP and HTTP ports anymore since LocalStack listens to both.""",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return GATEWAY_LISTEN[0].port
+
+
+def get_edge_url(localstack_hostname=None, protocol=None):
+ """@deprecated: Use `internal_service_url()` instead.
+ We assume that most usages are internal but really need to check and update each usage accordingly.
+ """
+ warnings.warn(
+ """@deprecated: Use `internal_service_url()` instead.
+ We assume that most usages are internal but really need to check and update each usage accordingly.
+ """,
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return internal_service_url(host=localstack_hostname, protocol=protocol)
+
+
+class ServiceProviderConfig(Mapping[str, str]):
+ _provider_config: Dict[str, str]
+ default_value: str
+ override_prefix: str = "PROVIDER_OVERRIDE_"
+
+ def __init__(self, default_value: str):
+ self._provider_config = {}
+ self.default_value = default_value
+
+ def load_from_environment(self, env: Mapping[str, str] = None):
+ if env is None:
+ env = os.environ
+ for key, value in env.items():
+ if key.startswith(self.override_prefix) and value:
+ self.set_provider(key[len(self.override_prefix) :].lower().replace("_", "-"), value)
+
+ def get_provider(self, service: str) -> str:
+ return self._provider_config.get(service, self.default_value)
+
+ def set_provider_if_not_exists(self, service: str, provider: str) -> None:
+ if service not in self._provider_config:
+ self._provider_config[service] = provider
+
+ def set_provider(self, service: str, provider: str):
+ self._provider_config[service] = provider
+
+ def bulk_set_provider_if_not_exists(self, services: List[str], provider: str):
+ for service in services:
+ self.set_provider_if_not_exists(service, provider)
+
+ def __getitem__(self, item):
+ return self.get_provider(item)
+
+ def __setitem__(self, key, value):
+ self.set_provider(key, value)
+
+ def __len__(self):
+ return len(self._provider_config)
+
+ def __iter__(self):
+ return self._provider_config.__iter__()
+
+
+SERVICE_PROVIDER_CONFIG = ServiceProviderConfig("default")
+
+SERVICE_PROVIDER_CONFIG.load_from_environment()
+
+
+def init_directories() -> Directories:
+ if is_in_docker:
+ return Directories.for_container()
+ else:
+ if is_env_true("LOCALSTACK_CLI"):
+ return Directories.for_cli()
+
+ return Directories.for_host()
+
+
+# initialize directories
+dirs: Directories
+dirs = init_directories()
diff --git a/localstack-core/localstack/constants.py b/localstack-core/localstack/constants.py
new file mode 100644
index 0000000000000..c8833e557fced
--- /dev/null
+++ b/localstack-core/localstack/constants.py
@@ -0,0 +1,188 @@
+import os
+
+from localstack.version import __version__
+
+VERSION = __version__
+
+# HTTP headers used to forward proxy request URLs
+HEADER_LOCALSTACK_EDGE_URL = "x-localstack-edge"
+HEADER_LOCALSTACK_REQUEST_URL = "x-localstack-request-url"
+# HTTP header optionally added to LocalStack responses
+HEADER_LOCALSTACK_IDENTIFIER = "x-localstack"
+# xXx custom localstack authorization header only used in ext
+HEADER_LOCALSTACK_AUTHORIZATION = "x-localstack-authorization"
+HEADER_LOCALSTACK_TARGET = "x-localstack-target"
+HEADER_AMZN_ERROR_TYPE = "X-Amzn-Errortype"
+
+# backend service ports, for services that are behind a proxy (counting down from 4566)
+DEFAULT_PORT_EDGE = 4566
+
+# host name for localhost
+LOCALHOST = "localhost"
+LOCALHOST_IP = "127.0.0.1"
+LOCALHOST_HOSTNAME = "localhost.localstack.cloud"
+
+# User-agent string used in outgoing HTTP requests made by LocalStack
+USER_AGENT_STRING = f"localstack/{VERSION}"
+
+# version of the Maven dependency with Java utility code
+LOCALSTACK_MAVEN_VERSION = "0.2.21"
+MAVEN_REPO_URL = "https://repo1.maven.org/maven2"
+
+# URL of localstack's artifacts repository on GitHub
+ARTIFACTS_REPO = "https://github.com/localstack/localstack-artifacts"
+
+# Artifacts endpoint
+ASSETS_ENDPOINT = "https://assets.localstack.cloud"
+
+# Hugging Face endpoint for localstack
+HUGGING_FACE_ENDPOINT = "https://huggingface.co/localstack"
+
+# host to bind to when starting the services
+BIND_HOST = "0.0.0.0"
+
+# root code folder
+MODULE_MAIN_PATH = os.path.dirname(os.path.realpath(__file__))
+# TODO rename to "ROOT_FOLDER"!
+LOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(MODULE_MAIN_PATH, ".."))
+
+# virtualenv folder
+LOCALSTACK_VENV_FOLDER: str = os.environ.get("VIRTUAL_ENV")
+if not LOCALSTACK_VENV_FOLDER:
+ # fallback to the previous logic
+ LOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, ".venv")
+ if not os.path.isdir(LOCALSTACK_VENV_FOLDER):
+ # assuming this package lives here: /lib/pythonX.X/site-packages/localstack/
+ LOCALSTACK_VENV_FOLDER = os.path.realpath(
+ os.path.join(LOCALSTACK_ROOT_FOLDER, "..", "..", "..")
+ )
+
+# default volume directory containing shared data
+DEFAULT_VOLUME_DIR = "/var/lib/localstack"
+
+# API Gateway path to indicate a user request sent to the gateway
+PATH_USER_REQUEST = "_user_request_"
+
+# name of LocalStack Docker image
+DOCKER_IMAGE_NAME = "localstack/localstack"
+DOCKER_IMAGE_NAME_PRO = "localstack/localstack-pro"
+DOCKER_IMAGE_NAME_FULL = "localstack/localstack-full"
+
+# backdoor API path used to retrieve or update config variables
+CONFIG_UPDATE_PATH = "/?_config_"
+
+# API path for localstack internal resources
+INTERNAL_RESOURCE_PATH = "/_localstack"
+
+# environment variable name to tag local test runs
+ENV_INTERNAL_TEST_RUN = "LOCALSTACK_INTERNAL_TEST_RUN"
+
+# environment variable name to tag collect metrics during a test run
+ENV_INTERNAL_TEST_COLLECT_METRIC = "LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC"
+
+# environment variable that flags whether pro was activated. do not use it for security purposes!
+ENV_PRO_ACTIVATED = "PRO_ACTIVATED"
+
+# content types / encodings
+HEADER_CONTENT_TYPE = "Content-Type"
+TEXT_XML = "text/xml"
+APPLICATION_AMZ_JSON_1_0 = "application/x-amz-json-1.0"
+APPLICATION_AMZ_JSON_1_1 = "application/x-amz-json-1.1"
+APPLICATION_AMZ_CBOR_1_1 = "application/x-amz-cbor-1.1"
+APPLICATION_CBOR = "application/cbor"
+APPLICATION_JSON = "application/json"
+APPLICATION_XML = "application/xml"
+APPLICATION_OCTET_STREAM = "application/octet-stream"
+APPLICATION_X_WWW_FORM_URLENCODED = "application/x-www-form-urlencoded"
+HEADER_ACCEPT_ENCODING = "Accept-Encoding"
+
+# strings to indicate truthy/falsy values
+TRUE_STRINGS = ("1", "true", "True")
+FALSE_STRINGS = ("0", "false", "False")
+# strings with valid log levels for LS_LOG
+LOG_LEVELS = ("trace-internal", "trace", "debug", "info", "warn", "error", "warning")
+
+# the version of elasticsearch that is pre-seeded into the base image (sync with Dockerfile.base)
+ELASTICSEARCH_DEFAULT_VERSION = "Elasticsearch_7.10"
+# See https://docs.aws.amazon.com/ja_jp/elasticsearch-service/latest/developerguide/aes-supported-plugins.html
+ELASTICSEARCH_PLUGIN_LIST = [
+ "analysis-icu",
+ "ingest-attachment",
+ "analysis-kuromoji",
+ "mapper-murmur3",
+ "mapper-size",
+ "analysis-phonetic",
+ "analysis-smartcn",
+ "analysis-stempel",
+ "analysis-ukrainian",
+]
+# Default ES modules to exclude (save apprx 66MB in the final image)
+ELASTICSEARCH_DELETE_MODULES = ["ingest-geoip"]
+
+# the version of opensearch which is used by default
+OPENSEARCH_DEFAULT_VERSION = "OpenSearch_2.11"
+
+# See https://docs.aws.amazon.com/opensearch-service/latest/developerguide/supported-plugins.html
+OPENSEARCH_PLUGIN_LIST = [
+ "ingest-attachment",
+ "analysis-kuromoji",
+]
+
+# API endpoint for analytics events
+API_ENDPOINT = os.environ.get("API_ENDPOINT") or "https://api.localstack.cloud/v1"
+# new analytics API endpoint
+ANALYTICS_API = os.environ.get("ANALYTICS_API") or "https://analytics.localstack.cloud/v1"
+
+# environment variable to indicate this process should run the localstack infrastructure
+LOCALSTACK_INFRA_PROCESS = "LOCALSTACK_INFRA_PROCESS"
+
+# AWS region us-east-1
+AWS_REGION_US_EAST_1 = "us-east-1"
+
+# environment variable to override max pool connections
+try:
+ MAX_POOL_CONNECTIONS = int(os.environ["MAX_POOL_CONNECTIONS"])
+except Exception:
+ MAX_POOL_CONNECTIONS = 150
+
+# Fallback Account ID if not available in the client request
+DEFAULT_AWS_ACCOUNT_ID = "000000000000"
+
+# Credentials used for internal calls
+INTERNAL_AWS_ACCESS_KEY_ID = "__internal_call__"
+INTERNAL_AWS_SECRET_ACCESS_KEY = "__internal_call__"
+
+# trace log levels (excluding/including internal API calls), configurable via $LS_LOG
+LS_LOG_TRACE = "trace"
+LS_LOG_TRACE_INTERNAL = "trace-internal"
+TRACE_LOG_LEVELS = [LS_LOG_TRACE, LS_LOG_TRACE_INTERNAL]
+
+# list of official docker images
+OFFICIAL_IMAGES = [
+ "localstack/localstack",
+ "localstack/localstack-pro",
+]
+
+# port for debug py
+DEFAULT_DEVELOP_PORT = 5678
+
+# Default bucket name of the s3 bucket used for local lambda development
+# This name should be accepted by all IaC tools, so should respect s3 bucket naming conventions
+DEFAULT_BUCKET_MARKER_LOCAL = "hot-reload"
+LEGACY_DEFAULT_BUCKET_MARKER_LOCAL = "__local__"
+
+# user that starts the opensearch process if the current user is root
+OS_USER_OPENSEARCH = "localstack"
+
+# output string that indicates that the stack is ready
+READY_MARKER_OUTPUT = "Ready."
+
+# Regex for `Credential` field in the Authorization header in AWS signature version v4
+# The format is as follows:
+# Credential=////aws4_request
+# eg.
+# Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request
+AUTH_CREDENTIAL_REGEX = r"Credential=(?P[a-zA-Z0-9-_.]{1,})/(?P\d{8})/(?P[a-z0-9-]{1,})/(?P[a-z0-9]{1,})/"
+
+# Custom resource tag to override the generated resource ID.
+TAG_KEY_CUSTOM_ID = "_custom_id_"
diff --git a/localstack-core/localstack/deprecations.py b/localstack-core/localstack/deprecations.py
new file mode 100644
index 0000000000000..1690ca227d878
--- /dev/null
+++ b/localstack-core/localstack/deprecations.py
@@ -0,0 +1,410 @@
+# A simple module to track deprecations over time / versions, and some simple functions guiding the affected users.
+import logging
+import os
+from dataclasses import dataclass
+from typing import Callable, List, Optional
+
+from localstack.utils.analytics import log
+
+LOG = logging.getLogger(__name__)
+
+
+@dataclass
+class EnvVarDeprecation:
+ """
+ Simple class defining a deprecation of an environment variable config.
+ It helps keeping track of deprecations over time.
+ """
+
+ env_var: str
+ deprecation_version: str
+ deprecation_path: str = None
+
+ @property
+ def is_affected(self) -> bool:
+ """
+ Checks whether an environment is affected.
+ :return: true if the environment is affected / is using a deprecated config
+ """
+ return os.environ.get(self.env_var) is not None
+
+
+#
+# List of deprecations
+#
+# Please make sure this is in-sync with https://docs.localstack.cloud/references/configuration/
+#
+DEPRECATIONS = [
+ # Since 0.11.3 - HTTP / HTTPS multiplexing
+ EnvVarDeprecation(
+ "USE_SSL",
+ "0.11.3",
+ "Each endpoint now supports multiplexing HTTP/HTTPS traffic over the same port. Please remove this environment variable.", # noqa
+ ),
+ # Since 0.12.8 - PORT_UI was removed
+ EnvVarDeprecation(
+ "PORT_WEB_UI",
+ "0.12.8",
+ "PORT_WEB_UI has been removed, and is not available anymore. Please remove this environment variable.",
+ ),
+ # Deprecated in 0.12.7, removed in 3.0.0
+ EnvVarDeprecation(
+ "USE_SINGLE_REGION",
+ "0.12.7",
+ "LocalStack now has full multi-region support. This option has no effect. Please remove it from your configuration.", # noqa
+ ),
+ # Deprecated in 0.12.7, removed in 3.0.0
+ EnvVarDeprecation(
+ "DEFAULT_REGION",
+ "0.12.7",
+ "LocalStack now has full multi-region support. This option has no effect. Please remove it from your configuration.", # noqa
+ ),
+ # Since 1.0.0 - New Persistence and file system
+ EnvVarDeprecation(
+ "DATA_DIR",
+ "1.0.0",
+ "Please use PERSISTENCE instead. The state will be stored in your LocalStack volume in the state/ directory.",
+ ),
+ EnvVarDeprecation(
+ "HOST_TMP_FOLDER",
+ "1.0.0",
+ "This option has no effect anymore. Please remove this environment variable.",
+ ),
+ EnvVarDeprecation(
+ "LEGACY_DIRECTORIES",
+ "1.0.0",
+ "This option has no effect anymore. Please migrate to the new filesystem layout (introduced with v1.0).",
+ ),
+ EnvVarDeprecation(
+ "TMPDIR", "1.0.0", "Please migrate to the new filesystem layout (introduced with v1.0)."
+ ),
+ EnvVarDeprecation(
+ "PERSISTENCE_SINGLE_FILE",
+ "1.0.0",
+ "The legacy persistence mechanism is not supported anymore, please migrate to the advanced persistence mechanism of LocalStack Pro.", # noqa
+ ),
+ # Since 1.0.0 - New ASF Gateway
+ EnvVarDeprecation(
+ "LEGACY_EDGE_PROXY",
+ "1.0.0",
+ "This option has no effect anymore. Please remove this environment variable.",
+ ),
+ # Since 1.1.0 - Kinesalite removed with 1.3, only kinesis-mock is used as kinesis provider / backend
+ EnvVarDeprecation(
+ "KINESIS_PROVIDER",
+ "1.1.0",
+ "This option has no effect anymore. Please remove this environment variable.",
+ ),
+ # Since 1.1.0 - Init dir has been deprecated in favor of pluggable init hooks
+ EnvVarDeprecation(
+ "LEGACY_INIT_DIR",
+ "1.1.0",
+ "This option has no effect anymore. "
+ "Please use the pluggable initialization hooks in /etc/localhost/init/.d instead.",
+ ),
+ EnvVarDeprecation(
+ "INIT_SCRIPTS_PATH",
+ "1.1.0",
+ "This option has no effect anymore. "
+ "Please use the pluggable initialization hooks in /etc/localhost/init/.d instead.",
+ ),
+ # Since 1.3.0 - Synchronous events break AWS parity
+ EnvVarDeprecation(
+ "SYNCHRONOUS_SNS_EVENTS",
+ "1.3.0",
+ "This option has no effect anymore. Please remove this environment variable.",
+ ),
+ EnvVarDeprecation(
+ "SYNCHRONOUS_SQS_EVENTS",
+ "1.3.0",
+ "This option has no effect anymore. Please remove this environment variable.",
+ ),
+ EnvVarDeprecation(
+ "SYNCHRONOUS_API_GATEWAY_EVENTS",
+ "1.3.0",
+ "This option has no effect anymore. Please remove this environment variable.",
+ ),
+ EnvVarDeprecation(
+ "SYNCHRONOUS_KINESIS_EVENTS",
+ "1.3.0",
+ "This option has no effect anymore. Please remove this environment variable.",
+ ),
+ EnvVarDeprecation(
+ "SYNCHRONOUS_DYNAMODB_EVENTS",
+ "1.3.0",
+ "This option has no effect anymore. Please remove this environment variable.",
+ ),
+ # Since 1.3.0 - All non-pre-seeded infra is downloaded asynchronously
+ EnvVarDeprecation(
+ "SKIP_INFRA_DOWNLOADS",
+ "1.3.0",
+ "Infra downloads are triggered on-demand now. Please remove this environment variable.",
+ ),
+ # Since 1.3.0 - Mocking for unimplemented operations will be removed
+ EnvVarDeprecation(
+ "MOCK_UNIMPLEMENTED",
+ "1.3.0",
+ "This feature is not supported anymore. Please remove this environment variable.",
+ ),
+ # Since 1.4.0 - The Edge Forwarding is only used for legacy HTTPS proxying and will be removed
+ EnvVarDeprecation(
+ "EDGE_FORWARD_URL",
+ "1.4.0",
+ "This option has no effect anymore. Please remove this environment variable.",
+ ),
+ # Deprecated in 1.4.0, removed in 3.0.0
+ EnvVarDeprecation(
+ "KMS_PROVIDER",
+ "1.4.0",
+ "This option has no effect. Please remove it from your configuration.",
+ ),
+ # Since 2.0.0 - HOSTNAME_EXTERNAL will be replaced with LOCALSTACK_HOST
+ EnvVarDeprecation(
+ "HOSTNAME_EXTERNAL",
+ "2.0.0",
+ "This configuration will be migrated to LOCALSTACK_HOST",
+ ),
+ # Since 2.0.0 - LOCALSTACK_HOST will be replaced with LOCALSTACK_HOST
+ EnvVarDeprecation(
+ "LOCALSTACK_HOSTNAME",
+ "2.0.0",
+ "This configuration will be migrated to LOCALSTACK_HOST",
+ ),
+ # Since 2.0.0 - redefined as GATEWAY_LISTEN
+ EnvVarDeprecation(
+ "EDGE_BIND_HOST",
+ "2.0.0",
+ "This configuration will be migrated to GATEWAY_LISTEN",
+ ),
+ # Since 2.0.0 - redefined as GATEWAY_LISTEN
+ EnvVarDeprecation(
+ "EDGE_PORT",
+ "2.0.0",
+ "This configuration will be migrated to GATEWAY_LISTEN",
+ ),
+ # Since 2.0.0 - redefined as GATEWAY_LISTEN
+ EnvVarDeprecation(
+ "EDGE_PORT_HTTP",
+ "2.0.0",
+ "This configuration will be migrated to GATEWAY_LISTEN",
+ ),
+ EnvVarDeprecation(
+ "LAMBDA_EXECUTOR",
+ "2.0.0",
+ "This configuration is obsolete with the new lambda provider "
+ "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2\n"
+ "Please mount the Docker socket /var/run/docker.sock as a volume when starting LocalStack.",
+ ),
+ EnvVarDeprecation(
+ "LAMBDA_STAY_OPEN_MODE",
+ "2.0.0",
+ "Stay open mode is the default behavior in the new lambda provider "
+ "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2",
+ ),
+ EnvVarDeprecation(
+ "LAMBDA_REMOTE_DOCKER",
+ "2.0.0",
+ "The new lambda provider copies zip files by default and automatically configures hot reloading "
+ "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2",
+ ),
+ EnvVarDeprecation(
+ "LAMBDA_CODE_EXTRACT_TIME",
+ "2.0.0",
+ "Function creation now happens asynchronously in the new lambda provider "
+ "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2",
+ ),
+ EnvVarDeprecation(
+ "LAMBDA_CONTAINER_REGISTRY",
+ "2.0.0",
+ "The new lambda provider uses LAMBDA_RUNTIME_IMAGE_MAPPING instead "
+ "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2",
+ ),
+ EnvVarDeprecation(
+ "LAMBDA_FALLBACK_URL",
+ "2.0.0",
+ "This feature is not supported in the new lambda provider "
+ "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2",
+ ),
+ EnvVarDeprecation(
+ "LAMBDA_FORWARD_URL",
+ "2.0.0",
+ "This feature is not supported in the new lambda provider "
+ "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2",
+ ),
+ EnvVarDeprecation(
+ "LAMBDA_XRAY_INIT",
+ "2.0.0",
+ "The X-Ray daemon is always initialized in the new lambda provider "
+ "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2",
+ ),
+ EnvVarDeprecation(
+ "KINESIS_INITIALIZE_STREAMS",
+ "1.4.0",
+ "This option has no effect anymore. Please use the AWS client and init hooks instead.",
+ ),
+ EnvVarDeprecation(
+ "SQS_PORT_EXTERNAL",
+ "1.0.0",
+ "This option has no effect anymore. Please use LOCALSTACK_HOST instead.",
+ ),
+ EnvVarDeprecation(
+ "PROVIDER_OVERRIDE_LAMBDA",
+ "3.0.0",
+ "This option is ignored because the legacy Lambda provider (v1) has been removed since 3.0.0. "
+ "Please remove PROVIDER_OVERRIDE_LAMBDA and migrate to our new Lambda provider (v2): "
+ "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2",
+ ),
+ EnvVarDeprecation(
+ "ES_CUSTOM_BACKEND",
+ "0.14.0",
+ "This option has no effect anymore. Please use OPENSEARCH_CUSTOM_BACKEND instead.",
+ ),
+ EnvVarDeprecation(
+ "ES_MULTI_CLUSTER",
+ "0.14.0",
+ "This option has no effect anymore. Please use OPENSEARCH_MULTI_CLUSTER instead.",
+ ),
+ EnvVarDeprecation(
+ "ES_ENDPOINT_STRATEGY",
+ "0.14.0",
+ "This option has no effect anymore. Please use OPENSEARCH_ENDPOINT_STRATEGY instead.",
+ ),
+ EnvVarDeprecation(
+ "PERSIST_ALL",
+ "2.3.2",
+ "LocalStack treats backends and assets the same with respect to persistence. Please remove PERSIST_ALL.",
+ ),
+ EnvVarDeprecation(
+ "DNS_LOCAL_NAME_PATTERNS",
+ "3.0.0",
+ "This option was confusingly named. Please use DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM "
+ "instead.",
+ ),
+ EnvVarDeprecation(
+ "LAMBDA_EVENTS_INTERNAL_SQS",
+ "4.0.0",
+ "This option is ignored because the LocalStack SQS dependency for event invokes has been removed since 4.0.0"
+ " in favor of a lightweight Lambda-internal SQS implementation.",
+ ),
+ EnvVarDeprecation(
+ "LAMBDA_EVENT_SOURCE_MAPPING",
+ "4.0.0",
+ "This option has no effect anymore. Please remove this environment variable.",
+ ),
+ EnvVarDeprecation(
+ "LAMBDA_SQS_EVENT_SOURCE_MAPPING_INTERVAL_SEC",
+ "4.0.0",
+ "This option is not supported by the new Lambda Event Source Mapping v2 implementation."
+ " Please create a GitHub issue if you experience any performance challenges.",
+ ),
+ EnvVarDeprecation(
+ "PROVIDER_OVERRIDE_STEPFUNCTIONS",
+ "4.0.0",
+ "This option is ignored because the legacy StepFunctions provider (v1) has been removed since 4.0.0."
+ " Please remove PROVIDER_OVERRIDE_STEPFUNCTIONS.",
+ ),
+ EnvVarDeprecation(
+ "EVENT_RULE_ENGINE",
+ "4.0.3",
+ "This option is ignored because the Java-based event ruler has been removed since 4.1.0."
+ " Our latest Python-native implementation introduced in 4.0.3"
+ " is faster, achieves great AWS parity, and fixes compatibility issues with the StepFunctions JSONata feature."
+ " Please remove EVENT_RULE_ENGINE.",
+ ),
+ EnvVarDeprecation(
+ "STEPFUNCTIONS_LAMBDA_ENDPOINT",
+ "4.0.0",
+ "This is only supported for the legacy provider. URL to use as the Lambda service endpoint in Step Functions. "
+ "By default this is the LocalStack Lambda endpoint. Use default to select the original AWS Lambda endpoint.",
+ ),
+ EnvVarDeprecation(
+ "LOCAL_PORT_STEPFUNCTIONS",
+ "4.0.0",
+ "This is only supported for the legacy provider."
+ "It defines the local port to which Step Functions traffic is redirected."
+ "By default, LocalStack routes Step Functions traffic to its internal runtime. "
+ "Use this variable only if you need to redirect traffic to a different local Step Functions runtime.",
+ ),
+]
+
+
+def collect_affected_deprecations(
+ deprecations: Optional[List[EnvVarDeprecation]] = None,
+) -> List[EnvVarDeprecation]:
+ """
+ Collects all deprecations which are used in the OS environ.
+ :param deprecations: List of deprecations to check. Uses DEPRECATIONS list by default.
+ :return: List of deprecations which are used in the current environment
+ """
+ if deprecations is None:
+ deprecations = DEPRECATIONS
+ return [deprecation for deprecation in deprecations if deprecation.is_affected]
+
+
+def log_env_warning(deprecations: List[EnvVarDeprecation]) -> None:
+ """
+ Logs warnings for the given deprecations.
+ :param deprecations: list of affected deprecations to show a warning for
+ """
+ """
+ Logs a warning if a given environment variable is set (no matter what the value is).
+ :param env_var: to check
+ :param deprecation_version: version with which the env variable has been deprecated
+ """
+ if deprecations:
+ env_vars = []
+
+ # Print warnings for the env vars and collect them (for the analytics event)
+ for deprecation in deprecations:
+ LOG.warning(
+ "%s is deprecated (since %s) and will be removed in upcoming releases of LocalStack! %s",
+ deprecation.env_var,
+ deprecation.deprecation_version,
+ deprecation.deprecation_path,
+ )
+ env_vars.append(deprecation.env_var)
+
+ # Log an event if deprecated env vars are used
+ log.event(event="deprecated_env_usage", payload={"deprecated_env_vars": env_vars})
+
+
+def log_deprecation_warnings(deprecations: Optional[List[EnvVarDeprecation]] = None) -> None:
+ affected_deprecations = collect_affected_deprecations(deprecations)
+ log_env_warning(affected_deprecations)
+
+ provider_override_events = os.environ.get("PROVIDER_OVERRIDE_EVENTS")
+ if provider_override_events and provider_override_events in ["v1", "legacy"]:
+ env_var_value = f"PROVIDER_OVERRIDE_EVENTS={provider_override_events}"
+ deprecation_version = "4.0.0"
+ deprecation_path = f"Remove {env_var_value} to use the new EventBridge implementation."
+ LOG.warning(
+ "%s is deprecated (since %s) and will be removed in upcoming releases of LocalStack! %s",
+ env_var_value,
+ deprecation_version,
+ deprecation_path,
+ )
+
+
+def deprecated_endpoint(
+ endpoint: Callable, previous_path: str, deprecation_version: str, new_path: str
+) -> Callable:
+ """
+ Wrapper function which logs a warning (and a deprecation path) whenever a deprecated URL is invoked by the router.
+
+ :param endpoint: to wrap (log a warning whenever it is invoked)
+ :param previous_path: route path it is triggered by
+ :param deprecation_version: version of LocalStack with which this endpoint is deprecated
+ :param new_path: new route path which should be used instead
+ :return: wrapped function which can be registered for a route
+ """
+
+ def deprecated_wrapper(*args, **kwargs):
+ LOG.warning(
+ "%s is deprecated (since %s) and will be removed in upcoming releases of LocalStack! Use %s instead.",
+ previous_path,
+ deprecation_version,
+ new_path,
+ )
+ return endpoint(*args, **kwargs)
+
+ return deprecated_wrapper
diff --git a/localstack/services/apigateway/__init__.py b/localstack-core/localstack/dev/__init__.py
similarity index 100%
rename from localstack/services/apigateway/__init__.py
rename to localstack-core/localstack/dev/__init__.py
diff --git a/localstack/services/awslambda/__init__.py b/localstack-core/localstack/dev/debugger/__init__.py
similarity index 100%
rename from localstack/services/awslambda/__init__.py
rename to localstack-core/localstack/dev/debugger/__init__.py
diff --git a/localstack-core/localstack/dev/debugger/plugins.py b/localstack-core/localstack/dev/debugger/plugins.py
new file mode 100644
index 0000000000000..aa1d163f57b85
--- /dev/null
+++ b/localstack-core/localstack/dev/debugger/plugins.py
@@ -0,0 +1,25 @@
+import logging
+
+from localstack import config, constants
+from localstack.runtime import hooks
+
+LOG = logging.getLogger(__name__)
+
+
+def enable_debugger():
+ from localstack.packages.debugpy import debugpy_package
+
+ debugpy_package.install()
+ import debugpy # noqa: T100
+
+ LOG.info("Starting debug server at: %s:%s", constants.BIND_HOST, config.DEVELOP_PORT)
+ debugpy.listen((constants.BIND_HOST, config.DEVELOP_PORT)) # noqa: T100
+
+ if config.WAIT_FOR_DEBUGGER:
+ debugpy.wait_for_client() # noqa: T100
+
+
+@hooks.on_infra_start()
+def conditionally_enable_debugger():
+ if config.DEVELOP:
+ enable_debugger()
diff --git a/localstack/services/cloudformation/__init__.py b/localstack-core/localstack/dev/kubernetes/__init__.py
similarity index 100%
rename from localstack/services/cloudformation/__init__.py
rename to localstack-core/localstack/dev/kubernetes/__init__.py
diff --git a/localstack-core/localstack/dev/kubernetes/__main__.py b/localstack-core/localstack/dev/kubernetes/__main__.py
new file mode 100644
index 0000000000000..8935027298ef0
--- /dev/null
+++ b/localstack-core/localstack/dev/kubernetes/__main__.py
@@ -0,0 +1,330 @@
+import dataclasses
+import os
+from typing import Literal
+
+import click
+import yaml
+
+
+@dataclasses.dataclass
+class MountPoint:
+ name: str
+ host_path: str
+ container_path: str
+ node_path: str
+ read_only: bool = True
+ volume_type: Literal["Directory", "File"] = "Directory"
+
+
+def generate_mount_points(
+ pro: bool = False, mount_moto: bool = False, mount_entrypoints: bool = False
+) -> list[MountPoint]:
+ mount_points = []
+ # host paths
+ root_path = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..")
+ localstack_code_path = os.path.join(root_path, "localstack-core", "localstack")
+ pro_path = os.path.join(root_path, "..", "localstack-ext")
+
+ # container paths
+ target_path = "/opt/code/localstack/"
+ venv_path = os.path.join(target_path, ".venv", "lib", "python3.11", "site-packages")
+
+ # Community code
+ if pro:
+ # Pro installs community code as a package, so it lives in the venv site-packages
+ mount_points.append(
+ MountPoint(
+ name="localstack",
+ host_path=os.path.normpath(localstack_code_path),
+ node_path="/code/localstack",
+ container_path=os.path.join(venv_path, "localstack"),
+ # Read only has to be false here, as we mount the pro code into this mount, as it is the entire namespace package
+ read_only=False,
+ )
+ )
+ else:
+ # Community does not install the localstack package in the venv, but has the code directly in `/opt/code/localstack`
+ mount_points.append(
+ MountPoint(
+ name="localstack",
+ host_path=os.path.normpath(localstack_code_path),
+ node_path="/code/localstack",
+ container_path=os.path.join(target_path, "localstack-core", "localstack"),
+ )
+ )
+
+ # Pro code
+ if pro:
+ pro_code_path = os.path.join(pro_path, "localstack-pro-core", "localstack", "pro", "core")
+ mount_points.append(
+ MountPoint(
+ name="localstack-pro",
+ host_path=os.path.normpath(pro_code_path),
+ node_path="/code/localstack-pro",
+ container_path=os.path.join(venv_path, "localstack", "pro", "core"),
+ )
+ )
+
+ # entrypoints
+ if mount_entrypoints:
+ if pro:
+ # Community entrypoints in pro image
+ # TODO actual package version detection
+ print(
+ "WARNING: Package version detection is not implemented."
+ "You need to adapt the version in the .egg-info paths to match the package version installed in the used localstack-pro image."
+ )
+ community_version = "4.1.1.dev14"
+ pro_version = "4.1.1.dev16"
+ egg_path = os.path.join(
+ root_path, "localstack-core", "localstack_core.egg-info/entry_points.txt"
+ )
+ mount_points.append(
+ MountPoint(
+ name="entry-points-community",
+ host_path=os.path.normpath(egg_path),
+ node_path="/code/entry-points-community",
+ container_path=os.path.join(
+ venv_path, f"localstack-{community_version}.egg-info", "entry_points.txt"
+ ),
+ volume_type="File",
+ )
+ )
+ # Pro entrypoints in pro image
+ egg_path = os.path.join(
+ pro_path, "localstack-pro-core", "localstack_ext.egg-info/entry_points.txt"
+ )
+ mount_points.append(
+ MountPoint(
+ name="entry-points-pro",
+ host_path=os.path.normpath(egg_path),
+ node_path="/code/entry-points-pro",
+ container_path=os.path.join(
+ venv_path, f"localstack_ext-{pro_version}.egg-info", "entry_points.txt"
+ ),
+ volume_type="File",
+ )
+ )
+ else:
+ # Community entrypoints in community repo
+ # In the community image, the code is not installed as package, so the paths are predictable
+ egg_path = os.path.join(
+ root_path, "localstack-core", "localstack_core.egg-info/entry_points.txt"
+ )
+ mount_points.append(
+ MountPoint(
+ name="entry-points-community",
+ host_path=os.path.normpath(egg_path),
+ node_path="/code/entry-points-community",
+ container_path=os.path.join(
+ target_path,
+ "localstack-core",
+ "localstack_core.egg-info",
+ "entry_points.txt",
+ ),
+ volume_type="File",
+ )
+ )
+
+ if mount_moto:
+ moto_path = os.path.join(root_path, "..", "moto", "moto")
+ mount_points.append(
+ MountPoint(
+ name="moto",
+ host_path=os.path.normpath(moto_path),
+ node_path="/code/moto",
+ container_path=os.path.join(venv_path, "moto"),
+ )
+ )
+ return mount_points
+
+
+def generate_k8s_cluster_config(mount_points: list[MountPoint], port: int = 4566):
+ volumes = [
+ {
+ "volume": f"{mount_point.host_path}:{mount_point.node_path}",
+ "nodeFilters": ["server:*", "agent:*"],
+ }
+ for mount_point in mount_points
+ ]
+
+ ports = [{"port": f"{port}:31566", "nodeFilters": ["server:0"]}]
+
+ config = {"apiVersion": "k3d.io/v1alpha5", "kind": "Simple", "volumes": volumes, "ports": ports}
+
+ return config
+
+
+def snake_to_kebab_case(string: str):
+ return string.lower().replace("_", "-")
+
+
+def generate_k8s_cluster_overrides(
+ mount_points: list[MountPoint], pro: bool = False, env: list[str] | None = None
+):
+ volumes = [
+ {
+ "name": mount_point.name,
+ "hostPath": {"path": mount_point.node_path, "type": mount_point.volume_type},
+ }
+ for mount_point in mount_points
+ ]
+
+ volume_mounts = [
+ {
+ "name": mount_point.name,
+ "readOnly": mount_point.read_only,
+ "mountPath": mount_point.container_path,
+ }
+ for mount_point in mount_points
+ ]
+
+ extra_env_vars = []
+ if env:
+ for env_variable in env:
+ lhs, _, rhs = env_variable.partition("=")
+ extra_env_vars.append(
+ {
+ "name": lhs,
+ "value": rhs,
+ }
+ )
+
+ if pro:
+ extra_env_vars += [
+ {
+ "name": "LOCALSTACK_AUTH_TOKEN",
+ "value": "test",
+ },
+ {
+ "name": "CONTAINER_RUNTIME",
+ "value": "kubernetes",
+ },
+ ]
+
+ image_repository = "localstack/localstack-pro" if pro else "localstack/localstack"
+
+ overrides = {
+ "debug": True,
+ "volumes": volumes,
+ "volumeMounts": volume_mounts,
+ "extraEnvVars": extra_env_vars,
+ "image": {"repository": image_repository},
+ "lambda": {"executor": "kubernetes"},
+ }
+
+ return overrides
+
+
+def write_file(content: dict, output_path: str, file_name: str):
+ path = os.path.join(output_path, file_name)
+ with open(path, "w") as f:
+ f.write(yaml.dump(content))
+ f.close()
+ print(f"Generated file at {path}")
+
+
+def print_file(content: dict, file_name: str):
+ print(f"Generated file:\t{file_name}")
+ print("=====================================")
+ print(yaml.dump(content))
+ print("=====================================")
+
+
+@click.command("run")
+@click.option(
+ "--pro", is_flag=True, default=None, help="Mount the localstack-pro code into the cluster."
+)
+@click.option(
+ "--mount-moto", is_flag=True, default=None, help="Mount the moto code into the cluster."
+)
+@click.option(
+ "--mount-entrypoints", is_flag=True, default=None, help="Mount the entrypoints into the pod."
+)
+@click.option(
+ "--write",
+ is_flag=True,
+ default=None,
+ help="Write the configuration and overrides to files.",
+)
+@click.option(
+ "--output-dir",
+ "-o",
+ type=click.Path(exists=True, file_okay=False, resolve_path=True),
+ help="Output directory for generated files.",
+)
+@click.option(
+ "--overrides-file",
+ "-of",
+ default=None,
+ help="Name of the overrides file (default: overrides.yml).",
+)
+@click.option(
+ "--config-file",
+ "-cf",
+ default=None,
+ help="Name of the configuration file (default: configuration.yml).",
+)
+@click.option(
+ "--env", "-e", default=None, help="Environment variable to set in the pod", multiple=True
+)
+@click.option(
+ "--port",
+ "-p",
+ default=4566,
+ help="Port to expose from the kubernetes node",
+ type=click.IntRange(0, 65535),
+)
+@click.argument("command", nargs=-1, required=False)
+def run(
+ pro: bool = None,
+ mount_moto: bool = False,
+ mount_entrypoints: bool = False,
+ write: bool = False,
+ output_dir=None,
+ overrides_file: str = None,
+ config_file: str = None,
+ command: str = None,
+ env: list[str] = None,
+ port: int = None,
+):
+ """
+ A tool for localstack developers to generate the kubernetes cluster configuration file and the overrides to mount the localstack code into the cluster.
+ """
+ mount_points = generate_mount_points(pro, mount_moto, mount_entrypoints)
+
+ config = generate_k8s_cluster_config(mount_points, port=port)
+
+ overrides = generate_k8s_cluster_overrides(mount_points, pro=pro, env=env)
+
+ output_dir = output_dir or os.getcwd()
+ overrides_file = overrides_file or "overrides.yml"
+ config_file = config_file or "configuration.yml"
+
+ if write:
+ write_file(config, output_dir, config_file)
+ write_file(overrides, output_dir, overrides_file)
+ else:
+ print_file(config, config_file)
+ print_file(overrides, overrides_file)
+
+ overrides_file_path = os.path.join(output_dir, overrides_file)
+ config_file_path = os.path.join(output_dir, config_file)
+
+ print("\nTo create a k3d cluster with the generated configuration, follow these steps:")
+ print("1. Run the following command to create the cluster:")
+ print(f"\n k3d cluster create --config {config_file_path}\n")
+
+ print("2. Once the cluster is created, start LocalStack with the generated overrides:")
+ print("\n helm repo add localstack https://localstack.github.io/helm-charts # (if required)")
+ print(
+ f"\n helm upgrade --install localstack localstack/localstack -f {overrides_file_path}\n"
+ )
+
+
+def main():
+ run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/localstack/services/dynamodb/__init__.py b/localstack-core/localstack/dev/run/__init__.py
similarity index 100%
rename from localstack/services/dynamodb/__init__.py
rename to localstack-core/localstack/dev/run/__init__.py
diff --git a/localstack-core/localstack/dev/run/__main__.py b/localstack-core/localstack/dev/run/__main__.py
new file mode 100644
index 0000000000000..39ab236c9e3c2
--- /dev/null
+++ b/localstack-core/localstack/dev/run/__main__.py
@@ -0,0 +1,408 @@
+import dataclasses
+import os
+from typing import Iterable, Tuple
+
+import click
+from rich.rule import Rule
+
+from localstack import config
+from localstack.cli import console
+from localstack.runtime import hooks
+from localstack.utils.bootstrap import Container, ContainerConfigurators
+from localstack.utils.container_utils.container_client import (
+ ContainerConfiguration,
+ PortMappings,
+ VolumeMappings,
+)
+from localstack.utils.container_utils.docker_cmd_client import CmdDockerClient
+from localstack.utils.files import cache_dir
+from localstack.utils.run import run_interactive
+from localstack.utils.strings import short_uid
+
+from .configurators import (
+ ConfigEnvironmentConfigurator,
+ DependencyMountConfigurator,
+ EntryPointMountConfigurator,
+ ImageConfigurator,
+ PortConfigurator,
+ SourceVolumeMountConfigurator,
+)
+from .paths import HOST_PATH_MAPPINGS, HostPaths
+
+
+@click.command("run")
+@click.option(
+ "--image",
+ type=str,
+ required=False,
+ help="Overwrite the container image to be used (defaults to localstack/localstack or "
+ "localstack/localstack-pro).",
+)
+@click.option(
+ "--volume-dir",
+ type=click.Path(file_okay=False, dir_okay=True),
+ required=False,
+ help="The localstack volume on the host, default: ~/.cache/localstack/volume",
+)
+@click.option(
+ "--pro/--community",
+ is_flag=True,
+ default=None,
+ help="Whether to start localstack pro or community. If not set, it will guess from the current directory",
+)
+@click.option(
+ "--develop/--no-develop",
+ is_flag=True,
+ default=False,
+ help="Install debugpy and expose port 5678",
+)
+@click.option(
+ "--randomize",
+ is_flag=True,
+ default=False,
+ help="Randomize container name and ports to start multiple instances",
+)
+@click.option(
+ "--mount-source/--no-mount-source",
+ is_flag=True,
+ default=True,
+ help="Mount source files from localstack and localstack-ext. Use --local-packages for optional dependencies such as moto.",
+)
+@click.option(
+ "--mount-dependencies/--no-mount-dependencies",
+ is_flag=True,
+ default=False,
+ help="Whether to mount the dependencies of the current .venv directory into the container. Note this only works if the dependencies are compatible with the python and platform version from the venv and the container.",
+)
+@click.option(
+ "--mount-entrypoints/--no-mount-entrypoints",
+ is_flag=True,
+ default=False,
+ help="Mount entrypoints",
+)
+@click.option("--mount-docker-socket/--no-docker-socket", is_flag=True, default=True)
+@click.option(
+ "--env",
+ "-e",
+ help="Additional environment variables that are passed to the LocalStack container",
+ multiple=True,
+ required=False,
+)
+@click.option(
+ "--volume",
+ "-v",
+ help="Additional volume mounts that are passed to the LocalStack container",
+ multiple=True,
+ required=False,
+)
+@click.option(
+ "--publish",
+ "-p",
+ help="Additional ports that are published to the host",
+ multiple=True,
+ required=False,
+)
+@click.option(
+ "--entrypoint",
+ type=str,
+ required=False,
+ help="Additional entrypoint flag passed to docker",
+)
+@click.option(
+ "--network",
+ type=str,
+ required=False,
+ help="Docker network to start the container in",
+)
+@click.option(
+ "--local-packages",
+ "-l",
+ multiple=True,
+ required=False,
+ type=click.Choice(HOST_PATH_MAPPINGS.keys(), case_sensitive=False),
+ help="Mount specified packages into the container",
+)
+@click.argument("command", nargs=-1, required=False)
+def run(
+ image: str = None,
+ volume_dir: str = None,
+ pro: bool = None,
+ develop: bool = False,
+ randomize: bool = False,
+ mount_source: bool = True,
+ mount_dependencies: bool = False,
+ mount_entrypoints: bool = False,
+ mount_docker_socket: bool = True,
+ env: Tuple = (),
+ volume: Tuple = (),
+ publish: Tuple = (),
+ entrypoint: str = None,
+ network: str = None,
+ local_packages: list[str] | None = None,
+ command: str = None,
+):
+ """
+ A tool for localstack developers to start localstack containers. Run this in your localstack or
+ localstack-ext source tree to mount local source files or dependencies into the container.
+ Here are some examples::
+
+ \b
+ python -m localstack.dev.run
+ python -m localstack.dev.run -e DEBUG=1 -e LOCALSTACK_AUTH_TOKEN=test
+ python -m localstack.dev.run -- bash -c 'echo "hello"'
+
+ Explanations and more examples:
+
+ Start a normal container localstack container. If you run this from the localstack-ext repo,
+ it will start localstack-pro::
+
+ python -m localstack.dev.run
+
+ If you start localstack-pro, you might also want to add the API KEY as environment variable::
+
+ python -m localstack.dev.run -e DEBUG=1 -e LOCALSTACK_AUTH_TOKEN=test
+
+ If your local changes are making modifications to plux plugins (e.g., adding new providers or hooks),
+ then you also want to mount the newly generated entry_point.txt files into the container::
+
+ python -m localstack.dev.run --mount-entrypoints
+
+ Start a new container with randomized gateway and service ports, and randomized container name::
+
+ python -m localstack.dev.run --randomize
+
+ You can also run custom commands:
+
+ python -m localstack.dev.run bash -c 'echo "hello"'
+
+ Or use custom entrypoints:
+
+ python -m localstack.dev.run --entrypoint /bin/bash -- echo "hello"
+
+ You can import and expose debugpy:
+
+ python -m localstack.dev.run --develop
+
+ You can also mount local dependencies (e.g., pytest and other test dependencies, and then use that
+ in the container)::
+
+ \b
+ python -m localstack.dev.run --mount-dependencies \\
+ -v $PWD/tests:/opt/code/localstack/tests \\
+ -- .venv/bin/python -m pytest tests/unit/http_/
+
+ The script generally assumes that you are executing in either localstack or localstack-ext source
+ repositories that are organized like this::
+
+ \b
+ somedir <- your workspace directory
+ βββ localstack <- execute script in here
+ β βββ ...
+ β βββ localstack-core
+ β β βββ localstack <- will be mounted into the container
+ β β βββ localstack_core.egg-info
+ β βββ pyproject.toml
+ β βββ tests
+ β βββ ...
+ βββ localstack-ext <- or execute script in here
+ β βββ ...
+ β βββ localstack-pro-core
+ β β βββ localstack
+ β β β βββ pro
+ β β β βββ core <- will be mounted into the container
+ β β βββ localstack_ext.egg-info
+ β β βββ pyproject.toml
+ β β βββ tests
+ β βββ ...
+ βββ moto
+ β βββ AUTHORS.md
+ β βββ ...
+ β βββ moto <- will be mounted into the container
+ β βββ moto_ext.egg-info
+ β βββ pyproject.toml
+ β βββ tests
+ β βββ ...
+
+ You can choose which local source repositories are mounted in. For example, if `moto` and `rolo` are
+ both present, only mount `rolo` into the container.
+
+ \b
+ python -m localstack.dev.run --local-packages rolo
+
+ If both `rolo` and `moto` are available and both should be mounted, use the flag twice.
+
+ \b
+ python -m localstack.dev.run --local-packages rolo --local-packages moto
+ """
+ with console.status("Configuring") as status:
+ env_vars = parse_env_vars(env)
+ configure_licensing_credentials_environment(env_vars)
+
+ # run all prepare_host hooks
+ hooks.prepare_host.run()
+
+ # set the VOLUME_DIR config variable like in the CLI
+ if not os.environ.get("LOCALSTACK_VOLUME_DIR", "").strip():
+ config.VOLUME_DIR = str(cache_dir() / "volume")
+
+ # setup important paths on the host
+ host_paths = HostPaths(
+ # we assume that python -m localstack.dev.run is always executed in the repo source
+ workspace_dir=os.path.abspath(os.path.join(os.getcwd(), "..")),
+ volume_dir=volume_dir or config.VOLUME_DIR,
+ )
+
+ # auto-set pro flag
+ if pro is None:
+ if os.getcwd().endswith("localstack-ext"):
+ pro = True
+ else:
+ pro = False
+
+ # setup base configuration
+ container_config = ContainerConfiguration(
+ image_name=image,
+ name=config.MAIN_CONTAINER_NAME if not randomize else f"localstack-{short_uid()}",
+ remove=True,
+ interactive=True,
+ tty=True,
+ env_vars=dict(),
+ volumes=VolumeMappings(),
+ ports=PortMappings(),
+ network=network,
+ )
+
+ # replicate pro startup
+ if pro:
+ try:
+ from localstack.pro.core.plugins import modify_gateway_listen_config
+
+ modify_gateway_listen_config(config)
+ except ImportError:
+ pass
+
+ # setup configurators
+ configurators = [
+ ImageConfigurator(pro, image),
+ PortConfigurator(randomize),
+ ConfigEnvironmentConfigurator(pro),
+ ContainerConfigurators.mount_localstack_volume(host_paths.volume_dir),
+ ContainerConfigurators.config_env_vars,
+ ]
+
+ # create stub container with configuration to apply
+ c = Container(container_config=container_config)
+
+ # apply existing hooks first that can later be overwritten
+ hooks.configure_localstack_container.run(c)
+
+ if command:
+ configurators.append(ContainerConfigurators.custom_command(list(command)))
+ if entrypoint:
+ container_config.entrypoint = entrypoint
+ if mount_docker_socket:
+ configurators.append(ContainerConfigurators.mount_docker_socket)
+ if mount_source:
+ configurators.append(
+ SourceVolumeMountConfigurator(
+ host_paths=host_paths,
+ pro=pro,
+ chosen_packages=local_packages,
+ )
+ )
+ if mount_entrypoints:
+ configurators.append(EntryPointMountConfigurator(host_paths=host_paths, pro=pro))
+ if mount_dependencies:
+ configurators.append(DependencyMountConfigurator(host_paths=host_paths))
+ if develop:
+ configurators.append(ContainerConfigurators.develop)
+
+ # make sure anything coming from CLI arguments has priority
+ configurators.extend(
+ [
+ ContainerConfigurators.volume_cli_params(volume),
+ ContainerConfigurators.port_cli_params(publish),
+ ContainerConfigurators.env_cli_params(env),
+ ]
+ )
+
+ # run configurators
+ for configurator in configurators:
+ configurator(container_config)
+ # print the config
+ print_config(container_config)
+
+ # run the container
+ docker = CmdDockerClient()
+ status.update("Creating container")
+ container_id = docker.create_container_from_config(container_config)
+
+ rule = Rule(f"Interactive session with {container_id[:12]} π»")
+ console.print(rule)
+ try:
+ cmd = [*docker._docker_cmd(), "start", "--interactive", "--attach", container_id]
+ run_interactive(cmd)
+ finally:
+ if container_config.remove:
+ try:
+ if docker.is_container_running(container_id):
+ docker.stop_container(container_id)
+ docker.remove_container(container_id)
+ except Exception:
+ pass
+
+
+def print_config(cfg: ContainerConfiguration):
+ d = dataclasses.asdict(cfg)
+
+ d["volumes"] = [v.to_str() for v in d["volumes"].mappings]
+ d["ports"] = [p for p in d["ports"].to_list() if p != "-p"]
+
+ for k in list(d.keys()):
+ if d[k] is None:
+ d.pop(k)
+
+ console.print(d)
+
+
+def parse_env_vars(params: Iterable[str] = None) -> dict[str, str]:
+ env = {}
+
+ if not params:
+ return env
+
+ for e in params:
+ if "=" in e:
+ k, v = e.split("=", maxsplit=1)
+ env[k] = v
+ else:
+ # there's currently no way in our abstraction to only pass the variable name (as
+ # you can do in docker) so we resolve the value here.
+ env[e] = os.getenv(e)
+
+ return env
+
+
+def configure_licensing_credentials_environment(env_vars: dict[str, str]):
+ """
+ If an api key or auth token is set in the parsed CLI parameters, then we also set them into the OS environment
+ unless they are already set. This is just convenience so you don't have to set them twice.
+
+ :param env_vars: the environment variables parsed from the CLI parameters
+ """
+ if os.environ.get("LOCALSTACK_API_KEY"):
+ return
+ if os.environ.get("LOCALSTACK_AUTH_TOKEN"):
+ return
+ if api_key := env_vars.get("LOCALSTACK_API_KEY"):
+ os.environ["LOCALSTACK_API_KEY"] = api_key
+ if api_key := env_vars.get("LOCALSTACK_AUTH_TOKEN"):
+ os.environ["LOCALSTACK_AUTH_TOKEN"] = api_key
+
+
+def main():
+ run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/localstack-core/localstack/dev/run/configurators.py b/localstack-core/localstack/dev/run/configurators.py
new file mode 100644
index 0000000000000..4f1b9e3e29cde
--- /dev/null
+++ b/localstack-core/localstack/dev/run/configurators.py
@@ -0,0 +1,375 @@
+"""
+Several ContainerConfigurator implementations to set up a development version of a localstack container.
+"""
+
+import gzip
+import os
+from pathlib import Path, PurePosixPath
+from tempfile import gettempdir
+
+from localstack import config, constants
+from localstack.utils.bootstrap import ContainerConfigurators
+from localstack.utils.container_utils.container_client import (
+ BindMount,
+ ContainerClient,
+ ContainerConfiguration,
+ VolumeMappings,
+)
+from localstack.utils.docker_utils import DOCKER_CLIENT
+from localstack.utils.files import get_user_cache_dir
+from localstack.utils.run import run
+from localstack.utils.strings import md5
+
+from .paths import (
+ HOST_PATH_MAPPINGS,
+ CommunityContainerPaths,
+ ContainerPaths,
+ HostPaths,
+ ProContainerPaths,
+)
+
+
+class ConfigEnvironmentConfigurator:
+ """Configures the environment variables from the localstack and localstack-pro config."""
+
+ def __init__(self, pro: bool):
+ self.pro = pro
+
+ def __call__(self, cfg: ContainerConfiguration):
+ if cfg.env_vars is None:
+ cfg.env_vars = {}
+
+ if self.pro:
+ # import localstack.pro.core.config extends the list of config vars
+ from localstack.pro.core import config as config_pro # noqa
+
+ ContainerConfigurators.config_env_vars(cfg)
+
+
+class PortConfigurator:
+ """
+ Configures the port mappings. Can be randomized to run multiple localstack instances.
+ """
+
+ def __init__(self, randomize: bool = True):
+ self.randomize = randomize
+
+ def __call__(self, cfg: ContainerConfiguration):
+ cfg.ports.bind_host = config.GATEWAY_LISTEN[0].host
+
+ if self.randomize:
+ ContainerConfigurators.random_gateway_port(cfg)
+ ContainerConfigurators.random_service_port_range()(cfg)
+ else:
+ ContainerConfigurators.gateway_listen(config.GATEWAY_LISTEN)(cfg)
+ ContainerConfigurators.service_port_range(cfg)
+
+
+class ImageConfigurator:
+ """
+ Sets the container image to use for the container (by default either localstack/localstack or
+ localstack/localstack-pro)
+ """
+
+ def __init__(self, pro: bool, image_name: str | None):
+ self.pro = pro
+ self.image_name = image_name
+
+ def __call__(self, cfg: ContainerConfiguration):
+ if self.image_name:
+ cfg.image_name = self.image_name
+ else:
+ if self.pro:
+ cfg.image_name = constants.DOCKER_IMAGE_NAME_PRO
+ else:
+ cfg.image_name = constants.DOCKER_IMAGE_NAME
+
+
+class CustomEntryPointConfigurator:
+ """
+ Creates a ``docker-entrypoint-.sh`` script from the given source and mounts it into the container.
+ It also configures the container to then use that entrypoint.
+ """
+
+ def __init__(self, script: str, tmp_dir: str = None):
+ self.script = script.lstrip(os.linesep)
+ self.container_paths = ProContainerPaths()
+ self.tmp_dir = tmp_dir
+
+ def __call__(self, cfg: ContainerConfiguration):
+ h = md5(self.script)
+ tempdir = gettempdir() if not self.tmp_dir else self.tmp_dir
+ file_name = f"docker-entrypoint-{h}.sh"
+
+ file = Path(tempdir, file_name)
+ if not file.exists():
+ # newline separator should be '\n' independent of the os, since the entrypoint is executed in the container
+ # encoding needs to be "utf-8" since scripts could include emojis
+ file.write_text(self.script, newline="\n", encoding="utf-8")
+ file.chmod(0o777)
+ cfg.volumes.add(BindMount(str(file), f"/tmp/{file.name}"))
+ cfg.entrypoint = f"/tmp/{file.name}"
+
+
+class SourceVolumeMountConfigurator:
+ """
+ Mounts source code of localstack, localstack_ext, and moto into the container. It does this by assuming
+ that there is a "workspace" directory in which the source repositories are checked out into.
+ Depending on whether we want to start the pro container, the source paths for localstack are different.
+ """
+
+ def __init__(
+ self,
+ *,
+ host_paths: HostPaths = None,
+ pro: bool = False,
+ chosen_packages: list[str] | None = None,
+ ):
+ self.host_paths = host_paths or HostPaths()
+ self.container_paths = ProContainerPaths() if pro else CommunityContainerPaths()
+ self.pro = pro
+ self.chosen_packages = chosen_packages or []
+
+ def __call__(self, cfg: ContainerConfiguration):
+ # localstack source code if available
+ source = self.host_paths.aws_community_package_dir
+ if source.exists():
+ cfg.volumes.add(
+ # read_only=False is a temporary workaround to make the mounting of the pro source work
+ # this can be reverted once we don't need the nested mounting anymore
+ BindMount(str(source), self.container_paths.localstack_source_dir, read_only=False)
+ )
+
+ # ext source code if available
+ if self.pro:
+ source = self.host_paths.aws_pro_package_dir
+ if source.exists():
+ cfg.volumes.add(
+ BindMount(
+ str(source), self.container_paths.localstack_pro_source_dir, read_only=True
+ )
+ )
+
+ # mount local code checkouts if possible
+ for package_name in self.chosen_packages:
+ # Unconditional lookup because the CLI rejects incorect items
+ extractor = HOST_PATH_MAPPINGS[package_name]
+ self.try_mount_to_site_packages(cfg, extractor(self.host_paths))
+
+ # docker entrypoint
+ if self.pro:
+ source = self.host_paths.localstack_pro_project_dir / "bin" / "docker-entrypoint.sh"
+ else:
+ source = self.host_paths.localstack_project_dir / "bin" / "docker-entrypoint.sh"
+ if source.exists():
+ cfg.volumes.add(
+ BindMount(str(source), self.container_paths.docker_entrypoint, read_only=True)
+ )
+
+ def try_mount_to_site_packages(self, cfg: ContainerConfiguration, sources_path: Path):
+ """
+ Attempts to mount something like `~/workspace/plux/plugin` on the host into
+ ``.venv/.../site-packages/plugin``.
+
+ :param cfg:
+ :param sources_path:
+ :return:
+ """
+ if sources_path.exists():
+ cfg.volumes.add(
+ BindMount(
+ str(sources_path),
+ self.container_paths.dependency_source(sources_path.name),
+ read_only=True,
+ )
+ )
+
+
+class EntryPointMountConfigurator:
+ """
+ Mounts ``entry_points.txt`` files of localstack and dependencies into the venv in the container.
+
+ For example, when starting the pro container, the entrypoints of localstack-ext on the host would be in
+ ``~/workspace/localstack-ext/localstack-pro-core/localstack_ext.egg-info/entry_points.txt``
+ which needs to be mounted into the distribution info of the installed dependency within the container:
+ ``/opt/code/localstack/.venv/.../site-packages/localstack_ext-2.1.0.dev0.dist-info/entry_points.txt``.
+ """
+
+ entry_point_glob = (
+ "/opt/code/localstack/.venv/lib/python3.*/site-packages/*.dist-info/entry_points.txt"
+ )
+ localstack_community_entry_points = (
+ "/opt/code/localstack/localstack_core.egg-info/entry_points.txt"
+ )
+
+ def __init__(
+ self,
+ *,
+ host_paths: HostPaths = None,
+ container_paths: ContainerPaths = None,
+ pro: bool = False,
+ ):
+ self.host_paths = host_paths or HostPaths()
+ self.pro = pro
+ self.container_paths = container_paths or None
+
+ def __call__(self, cfg: ContainerConfiguration):
+ # special case for community code
+ if not self.pro:
+ host_path = self.host_paths.aws_community_package_dir
+ if host_path.exists():
+ cfg.volumes.append(
+ BindMount(
+ str(host_path), self.localstack_community_entry_points, read_only=True
+ )
+ )
+
+ # locate all relevant entry_point.txt files within the container
+ pattern = self.entry_point_glob
+ files = _list_files_in_container_image(DOCKER_CLIENT, cfg.image_name)
+ paths = [PurePosixPath(f) for f in files]
+ paths = [p for p in paths if p.match(pattern)]
+
+ # then, check whether they exist in some form on the host within the workspace directory
+ for container_path in paths:
+ dep_path = container_path.parent.name.removesuffix(".dist-info")
+ dep, ver = dep_path.split("-")
+
+ if dep == "localstack_core":
+ host_path = (
+ self.host_paths.localstack_project_dir
+ / "localstack-core"
+ / "localstack_core.egg-info"
+ / "entry_points.txt"
+ )
+ if host_path.is_file():
+ cfg.volumes.add(
+ BindMount(
+ str(host_path),
+ str(container_path),
+ read_only=True,
+ )
+ )
+ continue
+ elif dep == "localstack_ext":
+ host_path = (
+ self.host_paths.localstack_pro_project_dir
+ / "localstack-pro-core"
+ / "localstack_ext.egg-info"
+ / "entry_points.txt"
+ )
+ if host_path.is_file():
+ cfg.volumes.add(
+ BindMount(
+ str(host_path),
+ str(container_path),
+ read_only=True,
+ )
+ )
+ continue
+ for host_path in self.host_paths.workspace_dir.glob(
+ f"*/{dep}.egg-info/entry_points.txt"
+ ):
+ cfg.volumes.add(BindMount(str(host_path), str(container_path), read_only=True))
+ break
+
+
+class DependencyMountConfigurator:
+ """
+ Mounts source folders from your host's .venv directory into the container's .venv.
+ """
+
+ dependency_glob = "/opt/code/localstack/.venv/lib/python3.*/site-packages/*"
+
+ # skip mounting dependencies with incompatible binaries (e.g., on macOS)
+ skipped_dependencies = ["cryptography", "psutil", "rpds"]
+
+ def __init__(
+ self,
+ *,
+ host_paths: HostPaths = None,
+ container_paths: ContainerPaths = None,
+ pro: bool = False,
+ ):
+ self.host_paths = host_paths or HostPaths()
+ self.pro = pro
+ self.container_paths = container_paths or (
+ ProContainerPaths() if pro else CommunityContainerPaths()
+ )
+
+ def __call__(self, cfg: ContainerConfiguration):
+ # locate all relevant dependency directories
+ pattern = self.dependency_glob
+ files = _list_files_in_container_image(DOCKER_CLIENT, cfg.image_name)
+ paths = [PurePosixPath(f) for f in files]
+ # builds an index of "jinja2: /opt/code/.../site-packages/jinja2"
+ container_path_index = {p.name: p for p in paths if p.match(pattern)}
+
+ # find dependencies from the host
+ for dep_path in self.host_paths.venv_dir.glob("lib/python3.*/site-packages/*"):
+ # filter out everything that heuristically cannot be a source path
+ if not self._can_be_source_path(dep_path):
+ continue
+ if dep_path.name.endswith(".dist-info"):
+ continue
+ if dep_path.name == "__pycache__":
+ continue
+
+ if dep_path.name in self.skipped_dependencies:
+ continue
+
+ if dep_path.name in container_path_index:
+ # find the target path in the index if it exists
+ target_path = str(container_path_index[dep_path.name])
+ else:
+ # if the given dependency is not in the container, then we mount it anyway
+ # FIXME: we should also mount the dist-info directory. perhaps this method should be
+ # re-written completely
+ target_path = self.container_paths.dependency_source(dep_path.name)
+
+ if self._has_mount(cfg.volumes, target_path):
+ continue
+
+ cfg.volumes.append(BindMount(str(dep_path), target_path))
+
+ def _can_be_source_path(self, path: Path) -> bool:
+ return path.is_dir() or (path.name.endswith(".py") and not path.name.startswith("__"))
+
+ def _has_mount(self, volumes: VolumeMappings, target_path: str) -> bool:
+ return True if volumes.find_target_mapping(target_path) else False
+
+
+def _list_files_in_container_image(container_client: ContainerClient, image_name: str) -> list[str]:
+ """
+ Uses ``docker export | tar -t`` to list all files in a given docker image. It caches the result based on
+ the image ID into a gziped file into ``~/.cache/localstack-dev-cli`` to (significantly) speed up
+ subsequent calls.
+
+ :param container_client: the container client to use
+ :param image_name: the container image to analyze
+ :return: a list of file paths
+ """
+ if not image_name:
+ raise ValueError("missing image name")
+
+ image_id = container_client.inspect_image(image_name)["Id"]
+
+ cache_dir = get_user_cache_dir() / "localstack-dev-cli"
+ cache_dir.mkdir(exist_ok=True, parents=True)
+ cache_file = cache_dir / f"{image_id}.files.txt.gz"
+
+ if not cache_file.exists():
+ container_id = container_client.create_container(image_name=image_name)
+ try:
+ # docker export yields paths without prefixed slashes, so we add them here
+ # since the file is pretty big (~4MB for community, ~7MB for pro) we gzip it
+ cmd = "docker export %s | tar -t | awk '{ print \"/\" $0 }' | gzip > %s" % (
+ container_id,
+ cache_file,
+ )
+ run(cmd, shell=True)
+ finally:
+ container_client.remove_container(container_id)
+
+ with gzip.open(cache_file, mode="rt") as fd:
+ return fd.read().splitlines(keepends=False)
diff --git a/localstack-core/localstack/dev/run/paths.py b/localstack-core/localstack/dev/run/paths.py
new file mode 100644
index 0000000000000..b1fe9a95f24fd
--- /dev/null
+++ b/localstack-core/localstack/dev/run/paths.py
@@ -0,0 +1,94 @@
+"""Utilities to resolve important paths on the host and in the container."""
+
+import os
+from pathlib import Path
+from typing import Callable, Optional, Union
+
+
+class HostPaths:
+ workspace_dir: Path
+ """We assume all repositories live in a workspace directory, e.g., ``~/workspace/ls/localstack``,
+ ``~/workspace/ls/localstack-ext``, ..."""
+
+ localstack_project_dir: Path
+ localstack_pro_project_dir: Path
+ moto_project_dir: Path
+ postgresql_proxy: Path
+ rolo_dir: Path
+ volume_dir: Path
+ venv_dir: Path
+
+ def __init__(
+ self,
+ workspace_dir: Union[os.PathLike, str] = None,
+ volume_dir: Union[os.PathLike, str] = None,
+ venv_dir: Union[os.PathLike, str] = None,
+ ):
+ self.workspace_dir = Path(workspace_dir or os.path.abspath(os.path.join(os.getcwd(), "..")))
+ self.localstack_project_dir = self.workspace_dir / "localstack"
+ self.localstack_pro_project_dir = self.workspace_dir / "localstack-ext"
+ self.moto_project_dir = self.workspace_dir / "moto"
+ self.postgresql_proxy = self.workspace_dir / "postgresql-proxy"
+ self.rolo_dir = self.workspace_dir / "rolo"
+ self.volume_dir = Path(volume_dir or "/tmp/localstack")
+ self.venv_dir = Path(
+ venv_dir
+ or os.getenv("VIRTUAL_ENV")
+ or os.getenv("VENV_DIR")
+ or os.path.join(os.getcwd(), ".venv")
+ )
+
+ @property
+ def aws_community_package_dir(self) -> Path:
+ return self.localstack_project_dir / "localstack-core" / "localstack"
+
+ @property
+ def aws_pro_package_dir(self) -> Path:
+ return (
+ self.localstack_pro_project_dir / "localstack-pro-core" / "localstack" / "pro" / "core"
+ )
+
+
+# Type representing how to extract a specific path from a common root path, typically a lambda function
+PathMappingExtractor = Callable[[HostPaths], Path]
+
+# Declaration of which local packages can be mounted into the container, and their locations on the host
+HOST_PATH_MAPPINGS: dict[
+ str,
+ PathMappingExtractor,
+] = {
+ "moto": lambda paths: paths.moto_project_dir / "moto",
+ "postgresql_proxy": lambda paths: paths.postgresql_proxy / "postgresql_proxy",
+ "rolo": lambda paths: paths.rolo_dir / "rolo",
+ "plux": lambda paths: paths.workspace_dir / "plux" / "plugin",
+}
+
+
+class ContainerPaths:
+ """Important paths in the container"""
+
+ project_dir: str = "/opt/code/localstack"
+ site_packages_target_dir: str = "/opt/code/localstack/.venv/lib/python3.11/site-packages"
+ docker_entrypoint: str = "/usr/local/bin/docker-entrypoint.sh"
+ localstack_supervisor: str = "/usr/local/bin/localstack-supervisor"
+ localstack_source_dir: str
+ localstack_pro_source_dir: Optional[str]
+
+ def dependency_source(self, name: str) -> str:
+ """Returns path of the given source dependency in the site-packages directory."""
+ return self.site_packages_target_dir + f"/{name}"
+
+
+class CommunityContainerPaths(ContainerPaths):
+ """In the community image, code is copied into /opt/code/localstack/localstack-core/localstack"""
+
+ def __init__(self):
+ self.localstack_source_dir = f"{self.project_dir}/localstack-core/localstack"
+
+
+class ProContainerPaths(ContainerPaths):
+ """In the pro image, localstack and ext are installed into the venv as dependency"""
+
+ def __init__(self):
+ self.localstack_source_dir = self.dependency_source("localstack")
+ self.localstack_pro_source_dir = self.dependency_source("localstack") + "/pro/core"
diff --git a/localstack/services/dynamodbstreams/__init__.py b/localstack-core/localstack/dns/__init__.py
similarity index 100%
rename from localstack/services/dynamodbstreams/__init__.py
rename to localstack-core/localstack/dns/__init__.py
diff --git a/localstack-core/localstack/dns/models.py b/localstack-core/localstack/dns/models.py
new file mode 100644
index 0000000000000..6df70bf6e0d86
--- /dev/null
+++ b/localstack-core/localstack/dns/models.py
@@ -0,0 +1,175 @@
+import dataclasses
+from enum import Enum, auto
+from typing import Callable, Protocol
+
+
+class RecordType(Enum):
+ A = auto()
+ AAAA = auto()
+ CNAME = auto()
+ TXT = auto()
+ MX = auto()
+ SOA = auto()
+ NS = auto()
+ SRV = auto()
+
+
+@dataclasses.dataclass(frozen=True)
+class NameRecord:
+ """
+ Dataclass of a stored record
+ """
+
+ record_type: RecordType
+ record_id: str | None = None
+
+
+@dataclasses.dataclass(frozen=True)
+class _TargetRecordBase:
+ """
+ Dataclass of a stored record
+ """
+
+ target: str
+
+
+@dataclasses.dataclass(frozen=True)
+class TargetRecord(NameRecord, _TargetRecordBase):
+ pass
+
+
+@dataclasses.dataclass(frozen=True)
+class _SOARecordBase:
+ m_name: str
+ r_name: str
+
+
+@dataclasses.dataclass(frozen=True)
+class SOARecord(NameRecord, _SOARecordBase):
+ pass
+
+
+@dataclasses.dataclass(frozen=True)
+class AliasTarget:
+ target: str
+ alias_id: str | None = None
+ health_check: Callable[[], bool] | None = None
+
+
+@dataclasses.dataclass(frozen=True)
+class _DynamicRecordBase:
+ """
+ Dataclass of a record that is dynamically determined at query time to return the IP address
+ of the LocalStack container
+ """
+
+ record_type: RecordType
+
+
+@dataclasses.dataclass(frozen=True)
+class DynamicRecord(NameRecord, _DynamicRecordBase):
+ pass
+
+
+# TODO decide if we need the whole concept of multiple zones in our DNS implementation
+class DnsServerProtocol(Protocol):
+ def add_host(self, name: str, record: NameRecord) -> None:
+ """
+ Add a host resolution to the DNS server.
+ This will resolve the given host to the record provided, if it matches.
+
+ :param name: Name pattern to add resolution for. Can be arbitrary regex.
+ :param record: Record, consisting of a record type, an optional record id, and the attached data.
+ Has to be a subclass of a NameRecord, not a NameRecord itself to contain some data.
+ """
+ pass
+
+ def delete_host(self, name: str, record: NameRecord) -> None:
+ """
+ Deletes a host resolution from the DNS server.
+ Only the name, the record type, and optionally the given record id will be used to find entries to delete.
+ All matching entries will be deleted.
+
+ :param name: Name pattern, identically to the one registered with `add_host`
+ :param record: Record, ideally identically to the one registered with add_host but only record_type and
+ record_id have to match to find the record.
+
+ :raises ValueError: If no record that was previously registered with `add_host` was found which matches the provided record
+ """
+ pass
+
+ def add_host_pointing_to_localstack(self, name: str) -> None:
+ """
+ Add a dns name which should be pointing to LocalStack when resolved.
+
+ :param name: Name which should be pointing to LocalStack when resolved
+ """
+ pass
+
+ def delete_host_pointing_to_localstack(self, name: str) -> None:
+ """
+ Removes a dns name from pointing to LocalStack
+
+ :param name: Name to be removed
+ :raises ValueError: If the host pointing to LocalStack was not previously registered using `add_host_pointing_to_localstack`
+ """
+ pass
+
+ def add_alias(self, source_name: str, record_type: RecordType, target: AliasTarget) -> None:
+ """
+ Adds an alias to the DNS, with an optional healthcheck callback.
+ When a request which matches `source_name` comes in, the DNS will check the aliases, and if the healthcheck
+ (if provided) succeeds, the resolution result for the `target_name` will be returned instead.
+ If multiple aliases are registered for the same source_name record_type tuple, and no health checks interfere,
+ the server will process requests with the first added alias
+
+ :param source_name: Alias name
+ :param record_type: Record type of the alias
+ :param target: Target of the alias
+ """
+ pass
+
+ def delete_alias(self, source_name: str, record_type: RecordType, target: AliasTarget) -> None:
+ """
+ Removes an alias from the DNS.
+ Only the name, the record type, and optionally the given alias id will be used to find entries to delete.
+ All matching entries will be deleted.
+
+ :param source_name: Alias name
+ :param record_type: Record type of the alias to remove
+ :param target: Target of the alias. Only relevant data for deletion will be its id.
+ :raises ValueError: If the alias was not previously registered using `add_alias`
+ """
+ pass
+
+ # TODO: support regex or wildcard?
+ # need to update when custom cloudpod destination is enabled
+ # has standard list of skips: localstack.services.dns_server.SKIP_PATTERNS
+ def add_skip(self, skip_pattern: str) -> None:
+ """
+ Add a skip pattern to the DNS server.
+
+ A skip pattern will prevent the DNS server from resolving a matching request against it's internal zones or
+ aliases, and will directly contact an upstream DNS for resolution.
+
+ This is usually helpful if AWS endpoints are overwritten by internal entries, but we have to reach AWS for
+ some reason. (Often used for cloudpods or installers).
+
+ :param skip_pattern: Skip pattern to add. Can be a valid regex.
+ """
+ pass
+
+ def delete_skip(self, skip_pattern: str) -> None:
+ """
+ Removes a skip pattern from the DNS server.
+
+ :param skip_pattern: Skip pattern to remove
+ :raises ValueError: If the skip pattern was not previously registered using `add_skip`
+ """
+ pass
+
+ def clear(self):
+ """
+ Removes all runtime configurations.
+ """
+ pass
diff --git a/localstack-core/localstack/dns/plugins.py b/localstack-core/localstack/dns/plugins.py
new file mode 100644
index 0000000000000..05566573cfec8
--- /dev/null
+++ b/localstack-core/localstack/dns/plugins.py
@@ -0,0 +1,45 @@
+import logging
+
+from localstack import config
+from localstack.runtime import hooks
+
+LOG = logging.getLogger(__name__)
+
+# Note: Don't want to introduce a possible import order conflict by importing SERVICE_SHUTDOWN_PRIORITY
+# TODO: consider extracting these priorities into some static configuration
+DNS_SHUTDOWN_PRIORITY = -30
+"""Make sure the DNS server is shut down after the ON_AFTER_SERVICE_SHUTDOWN_HANDLERS, which in turn is after
+SERVICE_SHUTDOWN_PRIORITY. Currently this value needs to be less than -20"""
+
+
+@hooks.on_infra_start(priority=10)
+def start_dns_server():
+ try:
+ from localstack.dns import server
+
+ server.start_dns_server(port=config.DNS_PORT, asynchronous=True)
+ except Exception as e:
+ LOG.warning("Unable to start DNS: %s", e)
+
+
+@hooks.on_infra_start()
+def setup_dns_configuration_on_host():
+ try:
+ from localstack.dns import server
+
+ if server.is_server_running():
+ # Prepare network interfaces for DNS server for the infra.
+ server.setup_network_configuration()
+ except Exception as e:
+ LOG.warning("error setting up dns server: %s", e)
+
+
+@hooks.on_infra_shutdown(priority=DNS_SHUTDOWN_PRIORITY)
+def stop_server():
+ try:
+ from localstack.dns import server
+
+ server.revert_network_configuration()
+ server.stop_servers()
+ except Exception as e:
+ LOG.warning("Unable to stop DNS servers: %s", e)
diff --git a/localstack-core/localstack/dns/server.py b/localstack-core/localstack/dns/server.py
new file mode 100644
index 0000000000000..f32d81292c75e
--- /dev/null
+++ b/localstack-core/localstack/dns/server.py
@@ -0,0 +1,1003 @@
+import argparse
+import copy
+import logging
+import os
+import re
+import textwrap
+import threading
+from datetime import datetime
+from functools import cache
+from ipaddress import IPv4Address, IPv4Interface
+from pathlib import Path
+from socket import AddressFamily
+from typing import Iterable, Literal, Tuple
+
+import psutil
+from cachetools import TTLCache, cached
+from dnslib import (
+ AAAA,
+ CNAME,
+ MX,
+ NS,
+ QTYPE,
+ RCODE,
+ RD,
+ RDMAP,
+ RR,
+ SOA,
+ TXT,
+ A,
+ DNSHeader,
+ DNSLabel,
+ DNSQuestion,
+ DNSRecord,
+)
+from dnslib.server import DNSHandler, DNSServer
+from psutil._common import snicaddr
+
+import dns.flags
+import dns.message
+import dns.query
+from dns.exception import Timeout
+
+# Note: avoid adding additional imports here, to avoid import issues when running the CLI
+from localstack import config
+from localstack.constants import LOCALHOST_HOSTNAME, LOCALHOST_IP
+from localstack.dns.models import (
+ AliasTarget,
+ DnsServerProtocol,
+ DynamicRecord,
+ NameRecord,
+ RecordType,
+ SOARecord,
+ TargetRecord,
+)
+from localstack.services.edge import run_module_as_sudo
+from localstack.utils import iputils
+from localstack.utils.net import Port, port_can_be_bound
+from localstack.utils.platform import in_docker
+from localstack.utils.serving import Server
+from localstack.utils.strings import to_bytes, to_str
+from localstack.utils.sync import sleep_forever
+
+EPOCH = datetime(1970, 1, 1)
+SERIAL = int((datetime.utcnow() - EPOCH).total_seconds())
+
+DEFAULT_FALLBACK_DNS_SERVER = "8.8.8.8"
+FALLBACK_DNS_LOCK = threading.RLock()
+VERIFICATION_DOMAIN = config.DNS_VERIFICATION_DOMAIN
+
+RCODE_REFUSED = 5
+
+DNS_SERVER: "DnsServerProtocol" = None
+PREVIOUS_RESOLV_CONF_FILE: str | None = None
+
+REQUEST_TIMEOUT_SECS = 7
+
+TYPE_LOOKUP = {
+ A: QTYPE.A,
+ AAAA: QTYPE.AAAA,
+ CNAME: QTYPE.CNAME,
+ MX: QTYPE.MX,
+ NS: QTYPE.NS,
+ SOA: QTYPE.SOA,
+ TXT: QTYPE.TXT,
+}
+
+LOG = logging.getLogger(__name__)
+
+THREAD_LOCAL = threading.local()
+
+# Type of the value given by DNSHandler.client_address
+# in the form (ip, port) e.g. ("127.0.0.1", 58291)
+ClientAddress = Tuple[str, int]
+
+psutil_cache = TTLCache(maxsize=100, ttl=10)
+
+
+# TODO: update route53 provider to use this util
+def normalise_dns_name(name: DNSLabel | str) -> str:
+ name = str(name)
+ if not name.endswith("."):
+ return f"{name}."
+
+ return name
+
+
+@cached(cache=psutil_cache)
+def list_network_interface_details() -> dict[str, list[snicaddr]]:
+ return psutil.net_if_addrs()
+
+
+class Record:
+ def __init__(self, rdata_type, *args, **kwargs):
+ rtype = kwargs.get("rtype")
+ rname = kwargs.get("rname")
+ ttl = kwargs.get("ttl")
+
+ if isinstance(rdata_type, RD):
+ # actually an instance, not a type
+ self._rtype = TYPE_LOOKUP[rdata_type.__class__]
+ rdata = rdata_type
+ else:
+ self._rtype = TYPE_LOOKUP[rdata_type]
+ if rdata_type == SOA and len(args) == 2:
+ # add sensible times to SOA
+ args += (
+ (
+ SERIAL, # serial number
+ 60 * 60 * 1, # refresh
+ 60 * 60 * 3, # retry
+ 60 * 60 * 24, # expire
+ 60 * 60 * 1, # minimum
+ ),
+ )
+ rdata = rdata_type(*args)
+
+ if rtype:
+ self._rtype = rtype
+ self._rname = rname
+ self.kwargs = dict(rdata=rdata, ttl=self.sensible_ttl() if ttl is None else ttl, **kwargs)
+
+ def try_rr(self, q):
+ if q.qtype == QTYPE.ANY or q.qtype == self._rtype:
+ return self.as_rr(q.qname)
+
+ def as_rr(self, alt_rname):
+ return RR(rname=self._rname or alt_rname, rtype=self._rtype, **self.kwargs)
+
+ def sensible_ttl(self):
+ if self._rtype in (QTYPE.NS, QTYPE.SOA):
+ return 60 * 60 * 24
+ else:
+ return 300
+
+ @property
+ def is_soa(self):
+ return self._rtype == QTYPE.SOA
+
+ def __str__(self):
+ return f"{QTYPE[self._rtype]}({self.kwargs})"
+
+ def __repr__(self):
+ return self.__str__()
+
+
+class RecordConverter:
+ """
+ Handles returning the correct DNS record for the stored name_record.
+
+ Particularly, if the record is a DynamicRecord, then perform dynamic IP address lookup.
+ """
+
+ def __init__(self, request: DNSRecord, client_address: ClientAddress):
+ self.request = request
+ self.client_address = client_address
+
+ def to_record(self, name_record: NameRecord) -> Record:
+ """
+ :param name_record: Internal representation of the name entry
+ :return: Record type for the associated name record
+ """
+ match name_record:
+ case TargetRecord(target=target, record_type=record_type):
+ return Record(RDMAP.get(record_type.name), target)
+ case SOARecord(m_name=m_name, r_name=r_name, record_type=_):
+ return Record(SOA, m_name, r_name)
+ case DynamicRecord(record_type=record_type):
+ # Marker indicating that the target of the domain name lookup should be resolved
+ # dynamically at query time to the most suitable LocalStack container IP address
+ ip = self._determine_best_ip()
+ # TODO: be more dynamic with IPv6
+ if record_type == RecordType.AAAA:
+ ip = "::1"
+ return Record(RDMAP.get(record_type.name), ip)
+ case _:
+ raise NotImplementedError(f"Record type '{type(name_record)}' not implemented")
+
+ def _determine_best_ip(self) -> str:
+ client_ip, _ = self.client_address
+ # allow for overriding if required
+ if config.DNS_RESOLVE_IP != LOCALHOST_IP:
+ return config.DNS_RESOLVE_IP
+
+ # Look up best matching ip address for the client
+ interfaces = self._fetch_interfaces()
+ for interface in interfaces:
+ subnet = interface.network
+ ip_address = IPv4Address(client_ip)
+ if ip_address in subnet:
+ # check if the request has come from the gateway or not. If so
+ # assume the request has come from the host, and return
+ # 127.0.0.1
+ if config.is_in_docker and self._is_gateway(ip_address):
+ return LOCALHOST_IP
+
+ return str(interface.ip)
+
+ # no best solution found
+ LOG.warning(
+ "could not determine subnet-matched IP address for %s, falling back to %s",
+ self.request.q.qname,
+ LOCALHOST_IP,
+ )
+ return LOCALHOST_IP
+
+ @staticmethod
+ def _is_gateway(ip: IPv4Address) -> bool:
+ """
+ Look up the gateways that this contianer has, and return True if the
+ supplied ip address is in that list.
+ """
+ return ip == iputils.get_default_gateway()
+
+ @staticmethod
+ def _fetch_interfaces() -> Iterable[IPv4Interface]:
+ interfaces = list_network_interface_details()
+ for _, addresses in interfaces.items():
+ for address in addresses:
+ if address.family != AddressFamily.AF_INET:
+ # TODO: IPv6
+ continue
+
+ # argument is of the form e.g. 127.0.0.1/255.0.0.0
+ net = IPv4Interface(f"{address.address}/{address.netmask}")
+ yield net
+
+
+class NonLoggingHandler(DNSHandler):
+ """Subclass of DNSHandler that avoids logging to stdout on error"""
+
+ def handle(self, *args, **kwargs):
+ try:
+ THREAD_LOCAL.client_address = self.client_address
+ THREAD_LOCAL.server = self.server
+ THREAD_LOCAL.request = self.request
+ return super(NonLoggingHandler, self).handle(*args, **kwargs)
+ except Exception:
+ pass
+
+
+# List of unique non-subdomain prefixes (e.g., data-) from endpoint.hostPrefix in the botocore specs.
+# Subdomain-prefixes (e.g., api.) work properly unless DNS rebind protection blocks DNS resolution, but
+# these `-` dash-prefixes require special consideration.
+# IMPORTANT: Adding a new host prefix here requires deploying a public DNS entry to ensure proper DNS resolution for
+# such non-dot prefixed domains (e.g., data-localhost.localstack.cloud)
+# LIMITATION: As of 2025-05-26, only used prefixes are deployed to our public DNS, including `sync-` and `data-`
+HOST_PREFIXES_NO_SUBDOMAIN = [
+ "analytics-",
+ "control-storage-",
+ "data-",
+ "query-",
+ "runtime-",
+ "storage-",
+ "streaming-",
+ "sync-",
+ "tags-",
+ "workflows-",
+]
+HOST_PREFIX_NAME_PATTERNS = [
+ f"{host_prefix}{LOCALHOST_HOSTNAME}" for host_prefix in HOST_PREFIXES_NO_SUBDOMAIN
+]
+
+NAME_PATTERNS_POINTING_TO_LOCALSTACK = [
+ f".*{LOCALHOST_HOSTNAME}",
+ *HOST_PREFIX_NAME_PATTERNS,
+]
+
+
+def exclude_from_resolution(domain_regex: str):
+ """
+ Excludes the given domain pattern from being resolved to LocalStack.
+ Currently only works in docker, since in host mode dns is started as separate process
+ :param domain_regex: Domain regex string
+ """
+ if DNS_SERVER:
+ DNS_SERVER.add_skip(domain_regex)
+
+
+def revert_exclude_from_resolution(domain_regex: str):
+ """
+ Reverts the exclusion of the given domain pattern
+ :param domain_regex: Domain regex string
+ """
+ try:
+ if DNS_SERVER:
+ DNS_SERVER.delete_skip(domain_regex)
+ except ValueError:
+ pass
+
+
+def _should_delete_zone(record_to_delete: NameRecord, record_to_check: NameRecord):
+ """
+ Helper function to check if we should delete the record_to_check from the list we are iterating over
+ :param record_to_delete: Record which we got from the delete request
+ :param record_to_check: Record to be checked if it should be included in the records after delete
+ :return:
+ """
+ if record_to_delete == record_to_check:
+ return True
+ return (
+ record_to_delete.record_type == record_to_check.record_type
+ and record_to_delete.record_id == record_to_check.record_id
+ )
+
+
+def _should_delete_alias(alias_to_delete: AliasTarget, alias_to_check: AliasTarget):
+ """
+ Helper function to check if we should delete the alias_to_check from the list we are iterating over
+ :param alias_to_delete: Alias which we got from the delete request
+ :param alias_to_check: Alias to be checked if it should be included in the records after delete
+ :return:
+ """
+ return alias_to_delete.alias_id == alias_to_check.alias_id
+
+
+class NoopLogger:
+ """
+ Necessary helper class to avoid logging of any dns records by dnslib
+ """
+
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def log_pass(self, *args, **kwargs):
+ pass
+
+ def log_prefix(self, *args, **kwargs):
+ pass
+
+ def log_recv(self, *args, **kwargs):
+ pass
+
+ def log_send(self, *args, **kwargs):
+ pass
+
+ def log_request(self, *args, **kwargs):
+ pass
+
+ def log_reply(self, *args, **kwargs):
+ pass
+
+ def log_truncated(self, *args, **kwargs):
+ pass
+
+ def log_error(self, *args, **kwargs):
+ pass
+
+ def log_data(self, *args, **kwargs):
+ pass
+
+
+class Resolver(DnsServerProtocol):
+ # Upstream DNS server
+ upstream_dns: str
+ # List of patterns which will be skipped for local resolution and always forwarded to upstream
+ skip_patterns: list[str]
+ # Dict of zones: (domain name or pattern) -> list[dns records]
+ zones: dict[str, list[NameRecord]]
+ # Alias map (source_name, record_type) => target_name (target name then still has to be resolved!)
+ aliases: dict[tuple[DNSLabel, RecordType], list[AliasTarget]]
+ # Lock to prevent issues due to concurrent modifications
+ lock: threading.RLock
+
+ def __init__(self, upstream_dns: str):
+ self.upstream_dns = upstream_dns
+ self.skip_patterns = []
+ self.zones = {}
+ self.aliases = {}
+ self.lock = threading.RLock()
+
+ def resolve(self, request: DNSRecord, handler: DNSHandler) -> DNSRecord | None:
+ """
+ Resolve a given request, by either checking locally registered records, or forwarding to the defined
+ upstream DNS server.
+
+ :param request: DNS Request
+ :param handler: Unused.
+ :return: DNS Reply
+ """
+ reply = request.reply()
+ found = False
+
+ try:
+ if not self._skip_local_resolution(request):
+ found = self._resolve_name(request, reply, handler.client_address)
+ except Exception as e:
+ LOG.info("Unable to get DNS result: %s", e)
+
+ if found:
+ return reply
+
+ # If we did not find a matching record in our local zones, we forward to our upstream dns
+ try:
+ req_parsed = dns.message.from_wire(bytes(request.pack()))
+ r = dns.query.udp(req_parsed, self.upstream_dns, timeout=REQUEST_TIMEOUT_SECS)
+ result = self._map_response_dnspython_to_dnslib(r)
+ return result
+ except Exception as e:
+ LOG.info(
+ "Unable to get DNS result from upstream server %s for domain %s: %s",
+ self.upstream_dns,
+ str(request.q.qname),
+ e,
+ )
+
+ # if we cannot reach upstream dns, return SERVFAIL
+ if not reply.rr and reply.header.get_rcode == RCODE.NOERROR:
+ # setting this return code will cause commands like 'host' to try the next nameserver
+ reply.header.set_rcode(RCODE.SERVFAIL)
+ return None
+
+ return reply
+
+ def _skip_local_resolution(self, request) -> bool:
+ """
+ Check whether we should skip local resolution for the given request, and directly contact upstream
+
+ :param request: DNS Request
+ :return: Whether the request local resolution should be skipped
+ """
+ request_name = to_str(str(request.q.qname))
+ for p in self.skip_patterns:
+ if re.match(p, request_name):
+ return True
+ return False
+
+ def _resolve_alias(
+ self, request: DNSRecord, reply: DNSRecord, client_address: ClientAddress
+ ) -> bool:
+ if request.q.qtype in (QTYPE.A, QTYPE.AAAA, QTYPE.CNAME):
+ key = (DNSLabel(to_bytes(request.q.qname)), RecordType[QTYPE[request.q.qtype]])
+ # check if we have aliases defined for our given qname/qtype pair
+ if aliases := self.aliases.get(key):
+ for alias in aliases:
+ # if there is no health check, or the healthcheck is successful, we will consider this alias
+ # take the first alias passing this check
+ if not alias.health_check or alias.health_check():
+ request_copy: DNSRecord = copy.deepcopy(request)
+ request_copy.q.qname = alias.target
+ # check if we can resolve the alias
+ found = self._resolve_name_from_zones(request_copy, reply, client_address)
+ if found:
+ LOG.debug(
+ "Found entry for AliasTarget '%s' ('%s')", request.q.qname, alias
+ )
+ # change the replaced rr-DNS names back to the original request
+ for rr in reply.rr:
+ rr.set_rname(request.q.qname)
+ else:
+ reply.header.set_rcode(RCODE.REFUSED)
+ return True
+ return False
+
+ def _resolve_name(
+ self, request: DNSRecord, reply: DNSRecord, client_address: ClientAddress
+ ) -> bool:
+ if alias_found := self._resolve_alias(request, reply, client_address):
+ LOG.debug("Alias found: %s", request.q.qname)
+ return alias_found
+ return self._resolve_name_from_zones(request, reply, client_address)
+
+ def _resolve_name_from_zones(
+ self, request: DNSRecord, reply: DNSRecord, client_address: ClientAddress
+ ) -> bool:
+ found = False
+
+ converter = RecordConverter(request, client_address)
+
+ # check for direct (not regex based) response
+ zone = self.zones.get(normalise_dns_name(request.q.qname))
+ if zone is not None:
+ for zone_records in zone:
+ rr = converter.to_record(zone_records).try_rr(request.q)
+ if rr:
+ found = True
+ reply.add_answer(rr)
+ else:
+ # no direct zone so look for an SOA record for a higher level zone
+ for zone_label, zone_records in self.zones.items():
+ # try regex match
+ pattern = re.sub(r"(^|[^.])\*", ".*", str(zone_label))
+ if re.match(pattern, str(request.q.qname)):
+ for record in zone_records:
+ rr = converter.to_record(record).try_rr(request.q)
+ if rr:
+ found = True
+ reply.add_answer(rr)
+ # try suffix match
+ elif request.q.qname.matchSuffix(to_bytes(zone_label)):
+ try:
+ soa_record = next(r for r in zone_records if converter.to_record(r).is_soa)
+ except StopIteration:
+ continue
+ else:
+ found = True
+ reply.add_answer(converter.to_record(soa_record).as_rr(zone_label))
+ break
+ return found
+
+ def _parse_section(self, section: str) -> list[RR]:
+ result = []
+ for line in section.split("\n"):
+ line = line.strip()
+ if line:
+ if line.startswith(";"):
+ # section ended, stop parsing
+ break
+ else:
+ result += RR.fromZone(line)
+ return result
+
+ def _map_response_dnspython_to_dnslib(self, response):
+ """Map response object from dnspython to dnslib (looks like we cannot
+ simply export/import the raw messages from the wire)"""
+ flags = dns.flags.to_text(response.flags)
+
+ def flag(f):
+ return 1 if f.upper() in flags else 0
+
+ questions = []
+ for q in response.question:
+ questions.append(DNSQuestion(qname=str(q.name), qtype=q.rdtype, qclass=q.rdclass))
+
+ result = DNSRecord(
+ DNSHeader(
+ qr=flag("qr"), aa=flag("aa"), ra=flag("ra"), id=response.id, rcode=response.rcode()
+ ),
+ q=questions[0],
+ )
+
+ # extract answers
+ answer_parts = str(response).partition(";ANSWER")
+ result.add_answer(*self._parse_section(answer_parts[2]))
+ # extract authority information
+ authority_parts = str(response).partition(";AUTHORITY")
+ result.add_auth(*self._parse_section(authority_parts[2]))
+ return result
+
+ def add_host(self, name: str, record: NameRecord):
+ LOG.debug("Adding host %s with record %s", name, record)
+ name = normalise_dns_name(name)
+ with self.lock:
+ self.zones.setdefault(name, [])
+ self.zones[name].append(record)
+
+ def delete_host(self, name: str, record: NameRecord):
+ LOG.debug("Deleting host %s with record %s", name, record)
+ name = normalise_dns_name(name)
+ with self.lock:
+ if not self.zones.get(name):
+ raise ValueError("Could not find entry %s for name %s in zones", record, name)
+ self.zones.setdefault(name, [])
+ current_zones = self.zones[name]
+ self.zones[name] = [
+ zone for zone in self.zones[name] if not _should_delete_zone(record, zone)
+ ]
+ if self.zones[name] == current_zones:
+ raise ValueError("Could not find entry %s for name %s in zones", record, name)
+ # if we deleted the last entry, clean up
+ if not self.zones[name]:
+ del self.zones[name]
+
+ def add_alias(self, source_name: str, record_type: RecordType, target: AliasTarget):
+ LOG.debug("Adding alias %s with record type %s target %s", source_name, record_type, target)
+ label = (DNSLabel(to_bytes(source_name)), record_type)
+ with self.lock:
+ self.aliases.setdefault(label, [])
+ self.aliases[label].append(target)
+
+ def delete_alias(self, source_name: str, record_type: RecordType, target: AliasTarget):
+ LOG.debug(
+ "Deleting alias %s with record type %s",
+ source_name,
+ record_type,
+ )
+ label = (DNSLabel(to_bytes(source_name)), record_type)
+ with self.lock:
+ if not self.aliases.get(label):
+ raise ValueError(
+ "Could not find entry %s for name %s, record type %s in aliases",
+ target,
+ source_name,
+ record_type,
+ )
+ self.aliases.setdefault(label, [])
+ current_aliases = self.aliases[label]
+ self.aliases[label] = [
+ alias for alias in self.aliases[label] if not _should_delete_alias(target, alias)
+ ]
+ if self.aliases[label] == current_aliases:
+ raise ValueError(
+ "Could not find entry %s for name %s, record_type %s in aliases",
+ target,
+ source_name,
+ record_type,
+ )
+ # if we deleted the last entry, clean up
+ if not self.aliases[label]:
+ del self.aliases[label]
+
+ def add_host_pointing_to_localstack(self, name: str):
+ LOG.debug("Adding host %s pointing to LocalStack", name)
+ self.add_host(name, DynamicRecord(record_type=RecordType.A))
+ if config.DNS_RESOLVE_IP == config.LOCALHOST_IP:
+ self.add_host(name, DynamicRecord(record_type=RecordType.AAAA))
+
+ def delete_host_pointing_to_localstack(self, name: str):
+ LOG.debug("Deleting host %s pointing to LocalStack", name)
+ self.delete_host(name, DynamicRecord(record_type=RecordType.A))
+ if config.DNS_RESOLVE_IP == config.LOCALHOST_IP:
+ self.delete_host(name, DynamicRecord(record_type=RecordType.AAAA))
+
+ def add_skip(self, skip_pattern: str):
+ LOG.debug("Adding skip pattern %s", skip_pattern)
+ self.skip_patterns.append(skip_pattern)
+
+ def delete_skip(self, skip_pattern: str):
+ LOG.debug("Deleting skip pattern %s", skip_pattern)
+ self.skip_patterns.remove(skip_pattern)
+
+ def clear(self):
+ LOG.debug("Clearing DNS zones")
+ self.skip_patterns.clear()
+ self.zones.clear()
+ self.aliases.clear()
+
+
+class DnsServer(Server, DnsServerProtocol):
+ servers: list[DNSServer]
+ resolver: Resolver | None
+
+ def __init__(
+ self,
+ port: int,
+ protocols: list[Literal["udp", "tcp"]],
+ upstream_dns: str,
+ host: str = "0.0.0.0",
+ ) -> None:
+ super().__init__(port, host)
+ self.resolver = Resolver(upstream_dns=upstream_dns)
+ self.protocols = protocols
+ self.servers = []
+ self.handler_class = NonLoggingHandler
+
+ def _get_servers(self) -> list[DNSServer]:
+ servers = []
+ for protocol in self.protocols:
+ # TODO add option to use normal logger instead of NoopLogger for verbose debug mode
+ servers.append(
+ DNSServer(
+ self.resolver,
+ handler=self.handler_class,
+ logger=NoopLogger(),
+ port=self.port,
+ address=self.host,
+ tcp=protocol == "tcp",
+ )
+ )
+ return servers
+
+ @property
+ def protocol(self):
+ return "udp"
+
+ def health(self):
+ """
+ Runs a health check on the server. The default implementation performs is_port_open on the server URL.
+ """
+ try:
+ request = dns.message.make_query("localhost.localstack.cloud", "A")
+ answers = dns.query.udp(request, "127.0.0.1", port=self.port, timeout=0.5).answer
+ return len(answers) > 0
+ except Exception:
+ return False
+
+ def do_run(self):
+ self.servers = self._get_servers()
+ for server in self.servers:
+ server.start_thread()
+ LOG.debug("DNS Server started")
+ for server in self.servers:
+ server.thread.join()
+
+ def do_shutdown(self):
+ for server in self.servers:
+ server.stop()
+
+ def add_host(self, name: str, record: NameRecord):
+ self.resolver.add_host(name, record)
+
+ def delete_host(self, name: str, record: NameRecord):
+ self.resolver.delete_host(name, record)
+
+ def add_alias(self, source_name: str, record_type: RecordType, target: AliasTarget):
+ self.resolver.add_alias(source_name, record_type, target)
+
+ def delete_alias(self, source_name: str, record_type: RecordType, target: AliasTarget):
+ self.resolver.delete_alias(source_name, record_type, target)
+
+ def add_host_pointing_to_localstack(self, name: str):
+ self.resolver.add_host_pointing_to_localstack(name)
+
+ def delete_host_pointing_to_localstack(self, name: str):
+ self.resolver.delete_host_pointing_to_localstack(name)
+
+ def add_skip(self, skip_pattern: str):
+ self.resolver.add_skip(skip_pattern)
+
+ def delete_skip(self, skip_pattern: str):
+ self.resolver.delete_skip(skip_pattern)
+
+ def clear(self):
+ self.resolver.clear()
+
+
+class SeparateProcessDNSServer(Server, DnsServerProtocol):
+ def __init__(
+ self,
+ port: int = 53,
+ host: str = "0.0.0.0",
+ ) -> None:
+ super().__init__(port, host)
+
+ @property
+ def protocol(self):
+ return "udp"
+
+ def health(self):
+ """
+ Runs a health check on the server. The default implementation performs is_port_open on the server URL.
+ """
+ try:
+ request = dns.message.make_query("localhost.localstack.cloud", "A")
+ answers = dns.query.udp(request, "127.0.0.1", port=self.port, timeout=0.5).answer
+ return len(answers) > 0
+ except Exception:
+ return False
+
+ def do_start_thread(self):
+ # For host mode
+ env_vars = {}
+ for env_var in config.CONFIG_ENV_VARS:
+ if env_var.startswith("DNS_"):
+ value = os.environ.get(env_var, None)
+ if value is not None:
+ env_vars[env_var] = value
+
+ # note: running in a separate process breaks integration with Route53 (to be fixed for local dev mode!)
+ thread = run_module_as_sudo(
+ "localstack.dns.server",
+ asynchronous=True,
+ env_vars=env_vars,
+ arguments=["-p", str(self.port)],
+ )
+ return thread
+
+
+def get_fallback_dns_server():
+ return config.DNS_SERVER or get_available_dns_server()
+
+
+@cache
+def get_available_dns_server():
+ # TODO check if more loop-checks are necessary than just not using our own DNS server
+ with FALLBACK_DNS_LOCK:
+ resolver = dns.resolver.Resolver()
+ # we do not want to include localhost here, or a loop might happen
+ candidates = [r for r in resolver.nameservers if r != "127.0.0.1"]
+ result = None
+ candidates.append(DEFAULT_FALLBACK_DNS_SERVER)
+ for ns in candidates:
+ resolver.nameservers = [ns]
+ try:
+ try:
+ answer = resolver.resolve(VERIFICATION_DOMAIN, "a", lifetime=3)
+ answer = [
+ res.to_text() for answers in answer.response.answer for res in answers.items
+ ]
+ except Timeout:
+ answer = None
+ if not answer:
+ continue
+ result = ns
+ break
+ except Exception:
+ pass
+
+ if result:
+ LOG.debug("Determined fallback dns: %s", result)
+ else:
+ LOG.info(
+ "Unable to determine fallback DNS. Please check if '%s' is reachable by your configured DNS servers"
+ "DNS fallback will be disabled.",
+ VERIFICATION_DOMAIN,
+ )
+ return result
+
+
+# ###### LEGACY METHODS ######
+def add_resolv_entry(file_path: Path | str = Path("/etc/resolv.conf")):
+ global PREVIOUS_RESOLV_CONF_FILE
+ # never overwrite the host configuration without the user's permission
+ if not in_docker():
+ LOG.warning("Incorrectly attempted to alter host networking config")
+ return
+
+ LOG.debug("Overwriting container DNS server to point to localhost")
+ content = textwrap.dedent(
+ """
+ # The following line is required by LocalStack
+ nameserver 127.0.0.1
+ """
+ )
+ file_path = Path(file_path)
+ try:
+ with file_path.open("r+") as outfile:
+ PREVIOUS_RESOLV_CONF_FILE = outfile.read()
+ previous_resolv_conf_without_nameservers = [
+ line
+ for line in PREVIOUS_RESOLV_CONF_FILE.splitlines()
+ if not line.startswith("nameserver")
+ ]
+ outfile.seek(0)
+ outfile.write(content)
+ outfile.write("\n".join(previous_resolv_conf_without_nameservers))
+ outfile.truncate()
+ except Exception:
+ LOG.warning(
+ "Could not update container DNS settings", exc_info=LOG.isEnabledFor(logging.DEBUG)
+ )
+
+
+def revert_resolv_entry(file_path: Path | str = Path("/etc/resolv.conf")):
+ # never overwrite the host configuration without the user's permission
+ if not in_docker():
+ LOG.warning("Incorrectly attempted to alter host networking config")
+ return
+
+ if not PREVIOUS_RESOLV_CONF_FILE:
+ LOG.warning("resolv.conf file to restore not found.")
+ return
+
+ LOG.debug("Reverting container DNS config")
+ file_path = Path(file_path)
+ try:
+ with file_path.open("w") as outfile:
+ outfile.write(PREVIOUS_RESOLV_CONF_FILE)
+ except Exception:
+ LOG.warning(
+ "Could not revert container DNS settings", exc_info=LOG.isEnabledFor(logging.DEBUG)
+ )
+
+
+def setup_network_configuration():
+ # check if DNS is disabled
+ if not config.use_custom_dns():
+ return
+
+ # add entry to /etc/resolv.conf
+ if in_docker():
+ add_resolv_entry()
+
+
+def revert_network_configuration():
+ # check if DNS is disabled
+ if not config.use_custom_dns():
+ return
+
+ # add entry to /etc/resolv.conf
+ if in_docker():
+ revert_resolv_entry()
+
+
+def start_server(upstream_dns: str, host: str, port: int = config.DNS_PORT):
+ global DNS_SERVER
+
+ if DNS_SERVER:
+ # already started - bail
+ LOG.debug("DNS servers are already started. Avoid starting again.")
+ return
+
+ LOG.debug("Starting DNS servers (tcp/udp port %s on %s)...", port, host)
+ dns_server = DnsServer(port, protocols=["tcp", "udp"], host=host, upstream_dns=upstream_dns)
+
+ for name in NAME_PATTERNS_POINTING_TO_LOCALSTACK:
+ dns_server.add_host_pointing_to_localstack(name)
+ if config.LOCALSTACK_HOST.host != LOCALHOST_HOSTNAME:
+ dns_server.add_host_pointing_to_localstack(f".*{config.LOCALSTACK_HOST.host}")
+
+ # support both DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM and DNS_LOCAL_NAME_PATTERNS
+ # until the next major version change
+ # TODO(srw): remove the usage of DNS_LOCAL_NAME_PATTERNS
+ skip_local_resolution = " ".join(
+ [
+ config.DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM,
+ config.DNS_LOCAL_NAME_PATTERNS,
+ ]
+ ).strip()
+ if skip_local_resolution:
+ for skip_pattern in re.split(r"[,;\s]+", skip_local_resolution):
+ dns_server.add_skip(skip_pattern.strip(" \"'"))
+
+ dns_server.start()
+ if not dns_server.wait_is_up(timeout=5):
+ LOG.warning("DNS server did not come up within 5 seconds.")
+ dns_server.shutdown()
+ return
+ DNS_SERVER = dns_server
+ LOG.debug("DNS server startup finished.")
+
+
+def stop_servers():
+ if DNS_SERVER:
+ DNS_SERVER.shutdown()
+
+
+def start_dns_server_as_sudo(port: int):
+ global DNS_SERVER
+ LOG.debug(
+ "Starting the DNS on its privileged port (%s) needs root permissions. Trying to start DNS with sudo.",
+ config.DNS_PORT,
+ )
+
+ dns_server = SeparateProcessDNSServer(port)
+ dns_server.start()
+
+ if not dns_server.wait_is_up(timeout=5):
+ LOG.warning("DNS server did not come up within 5 seconds.")
+ dns_server.shutdown()
+ return
+
+ DNS_SERVER = dns_server
+ LOG.debug("DNS server startup finished (as sudo).")
+
+
+def start_dns_server(port: int, asynchronous: bool = False, standalone: bool = False):
+ if DNS_SERVER:
+ # already started - bail
+ LOG.error("DNS servers are already started. Avoid starting again.")
+ return
+
+ # check if DNS server is disabled
+ if not config.use_custom_dns():
+ LOG.debug("Not starting DNS. DNS_ADDRESS=%s", config.DNS_ADDRESS)
+ return
+
+ upstream_dns = get_fallback_dns_server()
+ if not upstream_dns:
+ LOG.warning("Error starting the DNS server: No upstream dns server found.")
+ return
+
+ # host to bind the DNS server to. In docker we always want to bind to "0.0.0.0"
+ host = config.DNS_ADDRESS
+ if in_docker():
+ host = "0.0.0.0"
+
+ if port_can_be_bound(Port(port, "udp"), address=host):
+ start_server(port=port, host=host, upstream_dns=upstream_dns)
+ if not asynchronous:
+ sleep_forever()
+ return
+
+ if standalone:
+ LOG.debug("Already in standalone mode and port binding still fails.")
+ return
+
+ start_dns_server_as_sudo(port)
+
+
+def get_dns_server() -> DnsServerProtocol:
+ return DNS_SERVER
+
+
+def is_server_running() -> bool:
+ return DNS_SERVER is not None
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-p", "--port", required=False, default=53, type=int)
+ args = parser.parse_args()
+
+ start_dns_server(asynchronous=False, port=args.port, standalone=True)
diff --git a/localstack-core/localstack/extensions/__init__.py b/localstack-core/localstack/extensions/__init__.py
new file mode 100644
index 0000000000000..3b52add044d38
--- /dev/null
+++ b/localstack-core/localstack/extensions/__init__.py
@@ -0,0 +1,3 @@
+"""Extensions are third-party software modules to customize localstack."""
+
+name = "extensions"
diff --git a/localstack-core/localstack/extensions/api/__init__.py b/localstack-core/localstack/extensions/api/__init__.py
new file mode 100644
index 0000000000000..9335bae5fe7c2
--- /dev/null
+++ b/localstack-core/localstack/extensions/api/__init__.py
@@ -0,0 +1,7 @@
+"""Public facing API for users to build LocalStack extensions."""
+
+from .extension import Extension
+
+name = "api"
+
+__all__ = ["Extension"]
diff --git a/localstack-core/localstack/extensions/api/aws.py b/localstack-core/localstack/extensions/api/aws.py
new file mode 100644
index 0000000000000..871e2e8a583ee
--- /dev/null
+++ b/localstack-core/localstack/extensions/api/aws.py
@@ -0,0 +1,33 @@
+from localstack.aws.api import (
+ CommonServiceException,
+ RequestContext,
+ ServiceException,
+ ServiceRequest,
+ ServiceResponse,
+)
+from localstack.aws.chain import (
+ CompositeExceptionHandler,
+ CompositeFinalizer,
+ CompositeHandler,
+ CompositeResponseHandler,
+ ExceptionHandler,
+ HandlerChain,
+)
+from localstack.aws.chain import Handler as RequestHandler
+from localstack.aws.chain import Handler as ResponseHandler
+
+__all__ = [
+ "RequestContext",
+ "ServiceRequest",
+ "ServiceResponse",
+ "ServiceException",
+ "CommonServiceException",
+ "RequestHandler",
+ "ResponseHandler",
+ "HandlerChain",
+ "CompositeHandler",
+ "ExceptionHandler",
+ "CompositeResponseHandler",
+ "CompositeExceptionHandler",
+ "CompositeFinalizer",
+]
diff --git a/localstack-core/localstack/extensions/api/extension.py b/localstack-core/localstack/extensions/api/extension.py
new file mode 100644
index 0000000000000..57a795bfbc2a9
--- /dev/null
+++ b/localstack-core/localstack/extensions/api/extension.py
@@ -0,0 +1,112 @@
+from plux import Plugin
+
+from .aws import (
+ CompositeExceptionHandler,
+ CompositeFinalizer,
+ CompositeHandler,
+ CompositeResponseHandler,
+)
+from .http import RouteHandler, Router
+
+
+class BaseExtension(Plugin):
+ """
+ Base extension.
+ """
+
+ def load(self, *args, **kwargs):
+ """
+ Provided to plux to load the plugins. Do NOT overwrite! PluginManagers managing extensions expect the load method to return the Extension itself.
+
+ :param args: load arguments
+ :param kwargs: load keyword arguments
+ :return: this extension object
+ """
+ return self
+
+ def on_extension_load(self, *args, **kwargs):
+ """
+ Called when LocalStack loads the extension.
+ """
+ raise NotImplementedError
+
+
+class Extension(BaseExtension):
+ """
+ An extension that is loaded into LocalStack dynamically.
+
+ The method execution order of an extension is as follows:
+
+ - on_extension_load
+ - on_platform_start
+ - update_gateway_routes
+ - update_request_handlers
+ - update_response_handlers
+ - on_platform_ready
+ """
+
+ namespace = "localstack.extensions"
+
+ def on_extension_load(self):
+ """
+ Called when LocalStack loads the extension.
+ """
+ pass
+
+ def on_platform_start(self):
+ """
+ Called when LocalStack starts the main runtime.
+ """
+ pass
+
+ def update_gateway_routes(self, router: Router[RouteHandler]):
+ """
+ Called with the Router attached to the LocalStack gateway. Overwrite this to add or update routes.
+
+ :param router: the Router attached in the gateway
+ """
+ pass
+
+ def update_request_handlers(self, handlers: CompositeHandler):
+ """
+ Called with the custom request handlers of the LocalStack gateway. Overwrite this to add or update handlers.
+
+ :param handlers: custom request handlers of the gateway
+ """
+ pass
+
+ def update_response_handlers(self, handlers: CompositeResponseHandler):
+ """
+ Called with the custom response handlers of the LocalStack gateway. Overwrite this to add or update handlers.
+
+ :param handlers: custom response handlers of the gateway
+ """
+ pass
+
+ def update_exception_handlers(self, handlers: CompositeExceptionHandler):
+ """
+ Called with the custom exception handlers of the LocalStack gateway. Overwrite this to add or update handlers.
+
+ :param handlers: custom exception handlers of the gateway
+ """
+ pass
+
+ def update_finalizers(self, handlers: CompositeFinalizer):
+ """
+ Called with the custom finalizer handlers of the LocalStack gateway. Overwrite this to add or update handlers.
+
+ :param handlers: custom finalizer handlers of the gateway
+ """
+ pass
+
+ def on_platform_ready(self):
+ """
+ Called when LocalStack is ready and the Ready marker has been printed.
+ """
+ pass
+
+ def on_platform_shutdown(self):
+ """
+ Called when LocalStack is shutting down. Can be used to close any resources (threads, processes, sockets, etc.).
+ """
+ pass
diff --git a/localstack-core/localstack/extensions/api/http.py b/localstack-core/localstack/extensions/api/http.py
new file mode 100644
index 0000000000000..5845856625206
--- /dev/null
+++ b/localstack-core/localstack/extensions/api/http.py
@@ -0,0 +1,16 @@
+from localstack.http import Request, Response, Router
+from localstack.http.client import HttpClient, SimpleRequestsClient
+from localstack.http.dispatcher import Handler as RouteHandler
+from localstack.http.proxy import Proxy, ProxyHandler, forward
+
+__all__ = [
+ "Request",
+ "Response",
+ "Router",
+ "HttpClient",
+ "SimpleRequestsClient",
+ "Proxy",
+ "ProxyHandler",
+ "forward",
+ "RouteHandler",
+]
diff --git a/localstack-core/localstack/extensions/api/runtime.py b/localstack-core/localstack/extensions/api/runtime.py
new file mode 100644
index 0000000000000..426036659c951
--- /dev/null
+++ b/localstack-core/localstack/extensions/api/runtime.py
@@ -0,0 +1,3 @@
+from localstack.utils.analytics import get_session_id
+
+__all__ = ["get_session_id"]
diff --git a/localstack-core/localstack/extensions/api/services.py b/localstack-core/localstack/extensions/api/services.py
new file mode 100644
index 0000000000000..c41152ef0d121
--- /dev/null
+++ b/localstack-core/localstack/extensions/api/services.py
@@ -0,0 +1,5 @@
+from localstack.utils.common import external_service_ports
+
+__all__ = [
+ "external_service_ports",
+]
diff --git a/localstack/services/es/__init__.py b/localstack-core/localstack/extensions/patterns/__init__.py
similarity index 100%
rename from localstack/services/es/__init__.py
rename to localstack-core/localstack/extensions/patterns/__init__.py
diff --git a/localstack-core/localstack/extensions/patterns/webapp.py b/localstack-core/localstack/extensions/patterns/webapp.py
new file mode 100644
index 0000000000000..ab69d935d729c
--- /dev/null
+++ b/localstack-core/localstack/extensions/patterns/webapp.py
@@ -0,0 +1,333 @@
+import importlib
+import logging
+import mimetypes
+import typing as t
+from functools import cached_property
+
+from rolo.gateway import HandlerChain
+from rolo.router import RuleAdapter, WithHost
+from werkzeug.routing import Submount
+
+from localstack import config
+from localstack.aws.api import RequestContext
+from localstack.extensions.api import Extension, http
+
+if t.TYPE_CHECKING:
+ # although jinja2 is included transitively via moto, let's make sure jinja2 stays optional
+ import jinja2
+
+LOG = logging.getLogger(__name__)
+
+_default = object()
+
+
+class WebAppExtension(Extension):
+ """
+ EXPERIMENTAL! This class is experimental and the API may change without notice.
+
+ A webapp extension serves routes, templates, and static files via a submount and a subdomain through
+ localstack.
+
+ It assumes you have the following directory layout::
+
+ my_extension
+ βββ extension.py
+ βββ __init__.py
+ βββ static <-- make sure static resources get packaged!
+ β βββ __init__.py
+ β βββ favicon.ico
+ β βββ style.css
+ βββ templates <-- jinja2 templates
+ βββ index.html
+
+ Given this layout, you can define your extensions in ``my_extension.extension`` like this. Routes defined in the
+ extension itself are automatically registered::
+
+ class MyExtension(WebAppExtension):
+ name = "my-extension"
+
+ @route("/")
+ def index(request: Request) -> Response:
+ # reference `static/style.css` to serve the static file from your package
+ return self.render_template_response("index.html")
+
+ @route("/hello")
+ def hello(request: Request):
+ return {"message": "Hello World!"}
+
+ This will create an extension that localstack serves via:
+
+ * Submount: https://localhost.localstack.cloud:4566/_extension/my-extension
+ * Subdomain: https://my-extension.localhost.localstack.cloud:4566/
+
+ Both are created for full flexibility:
+
+ * Subdomains: create a domain namespace that can be helpful for some extensions, especially when
+ running on the local machine
+ * Submounts: for some environments, like in ephemeral instances where subdomains are harder to control,
+ submounts are more convenient
+
+ Any routes added by the extension will be served relative to these URLs.
+ """
+
+ def __init__(
+ self,
+ mount: str = None,
+ submount: str | None = _default,
+ subdomain: str | None = _default,
+ template_package_path: str | None = _default,
+ static_package_path: str | None = _default,
+ static_url_path: str = None,
+ ):
+ """
+ Overwrite to customize your extension. For example, you can disable certain behavior by calling
+ ``super( ).__init__(subdomain=None, static_package_path=None)``, which will disable serving through
+ a subdomain, and disable static file serving.
+
+ :param mount: the "mount point" which will be used as default value for the submount and
+ subdirectory, i.e., ``.localhost.localstack.cloud`` and
+ ``localhost.localstack.cloud/_extension/``. Defaults to the extension name. Note that,
+ in case the mount name clashes with another extension, extensions may overwrite each other's
+ routes.
+ :param submount: the submount path, needs to start with a trailing slash (default
+ ``/_extension/``)
+ :param subdomain: the subdomain (defaults to the value of ``mount``)
+ :param template_package_path: the path to the templates within the module. defaults to
+ ``templates`` which expands to ``.templates``)
+ :param static_package_path: the package serving static files. defaults to ``static``, which expands to
+ ``.static``.
+ :param static_url_path: the URL path to serve static files from (defaults to `/static`)
+ """
+ mount = mount or self.name
+
+ self.submount = f"/_extension/{mount}" if submount is _default else submount
+ self.subdomain = mount if subdomain is _default else subdomain
+
+ self.template_package_path = (
+ "templates" if template_package_path is _default else template_package_path
+ )
+ self.static_package_path = (
+ "static" if static_package_path is _default else static_package_path
+ )
+ self.static_url_path = static_url_path or "/static"
+
+ self.static_resource_module = None
+
+ def collect_routes(self, routes: list[t.Any]):
+ """
+ This method can be overwritten to add more routes to the controller. Everything in ``routes`` will
+ be added to a ``RuleAdapter`` and subsequently mounted into the gateway router.
+
+ Here are some examples::
+
+ class MyRoutes:
+ @route("/hello")
+ def hello(request):
+ return "Hello World!"
+
+ class MyExtension(WebAppExtension):
+ name = "my-extension"
+
+ def collect_routes(self, routes: list[t.Any]):
+
+ # scans all routes of MyRoutes
+ routes.append(MyRoutes())
+ # use rule adapters to add routes without decorators
+ routes.append(RuleAdapter("/say-hello", self.say_hello))
+
+ # no idea why you would want to do this, but you can :-)
+ @route("/empty-dict")
+ def _inline_handler(request: Request) -> Response:
+ return Response.for_json({})
+ routes.append(_inline_handler)
+
+ def say_hello(request: Request):
+ return {"message": "Hello World!"}
+
+ This creates the following routes available through both subdomain and submount.
+
+ With subdomain:
+
+ * ``my-extension.localhost.localstack.cloud:4566/hello``
+ * ``my-extension.localhost.localstack.cloud:4566/say-hello``
+ * ``my-extension.localhost.localstack.cloud:4566/empty-dict``
+ * ``my-extension.localhost.localstack.cloud:4566/static`` <- automatically added static file endpoint
+
+ With submount:
+
+ * ``localhost.localstack.cloud:4566/_extension/my-extension/hello``
+ * ``localhost.localstack.cloud:4566/_extension/my-extension/say-hello``
+ * ``localhost.localstack.cloud:4566/_extension/my-extension/empty-dict``
+ * ``localhost.localstack.cloud:4566/_extension/my-extension/static`` <- auto-added static file serving
+
+ :param routes: the routes being collected
+ """
+ pass
+
+ @cached_property
+ def template_env(self) -> t.Optional["jinja2.Environment"]:
+ """
+ Returns the singleton jinja2 template environment. By default, the environment uses a
+ ``PackageLoader`` that loads from ``my_extension.templates`` (where ``my_extension`` is the root
+ module of the extension, and ``templates`` refers to ``self.template_package_path``,
+ which is ``templates`` by default).
+
+ :return: a template environment
+ """
+ if self.template_package_path:
+ return self._create_template_env()
+ return None
+
+ def _create_template_env(self) -> "jinja2.Environment":
+ """
+ Factory method to create the jinja2 template environment.
+ :return: a new jinja2 environment
+ """
+ import jinja2
+
+ return jinja2.Environment(
+ loader=jinja2.PackageLoader(
+ self.get_extension_module_root(), self.template_package_path
+ ),
+ autoescape=jinja2.select_autoescape(),
+ )
+
+ def render_template(self, template_name, **context) -> str:
+ """
+ Uses the ``template_env`` to render a template and return the string value.
+
+ :param template_name: the template name
+ :param context: template context
+ :return: the rendered result
+ """
+ template = self.template_env.get_template(template_name)
+ return template.render(**context)
+
+ def render_template_response(self, template_name, **context) -> http.Response:
+ """
+ Uses the ``template_env`` to render a template into an HTTP response. It guesses the mimetype from the
+ template's file name.
+
+ :param template_name: the template name
+ :param context: template context
+ :return: the rendered result as response
+ """
+ template = self.template_env.get_template(template_name)
+
+ mimetype = mimetypes.guess_type(template.filename)
+ mimetype = mimetype[0] if mimetype and mimetype[0] else "text/plain"
+
+ return http.Response(response=template.render(**context), mimetype=mimetype)
+
+ def on_extension_load(self):
+ logging.getLogger(self.get_extension_module_root()).setLevel(
+ logging.DEBUG if config.DEBUG else logging.INFO
+ )
+
+ if self.static_package_path and not self.static_resource_module:
+ try:
+ self.static_resource_module = importlib.import_module(
+ self.get_extension_module_root() + "." + self.static_package_path
+ )
+ except ModuleNotFoundError:
+ LOG.warning("disabling static resources for extension %s", self.name)
+
+ def _preprocess_request(
+ self, chain: HandlerChain, context: RequestContext, _response: http.Response
+ ):
+ """
+ Default pre-processor, which implements a default behavior to add a trailing slash to the path if the
+ submount is used directly. For instance ``/_extension/my-extension``, then it forwards to
+ ``/_extension/my-extension/``. This is so you can reference relative paths like `` `` in your HTML safely, and it will work with both subdomain and submount.
+ """
+ path = context.request.path
+
+ if path == self.submount.rstrip("/"):
+ chain.respond(301, headers={"Location": context.request.url + "/"})
+
+ def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
+ from localstack.aws.handlers import preprocess_request
+
+ if self.submount:
+ preprocess_request.append(self._preprocess_request)
+
+ # adding self here makes sure that any ``@route`` decorators to the extension are mapped automatically
+ routes = [self]
+
+ if self.static_resource_module:
+ routes.append(
+ RuleAdapter(f"{self.static_url_path}/", self._serve_static_file)
+ )
+
+ self.collect_routes(routes)
+
+ app = RuleAdapter(routes)
+
+ if self.submount:
+ router.add(Submount(self.submount, [app]))
+ LOG.info(
+ "%s extension available at %s%s",
+ self.name,
+ config.external_service_url(),
+ self.submount,
+ )
+
+ if self.subdomain:
+ router.add(WithHost(f"{self.subdomain}.<__host__>", [app]))
+ self._configure_cors_for_subdomain()
+ LOG.info(
+ "%s extension available at %s",
+ self.name,
+ config.external_service_url(subdomains=self.subdomain),
+ )
+
+ def _serve_static_file(self, _request: http.Request, path: str):
+ """Route for serving static files, for ``/_extension/my-extension/static/``."""
+ return http.Response.for_resource(self.static_resource_module, path)
+
+ def _configure_cors_for_subdomain(self):
+ """
+ Automatically configures CORS for the subdomain, for both HTTP and HTTPS.
+ """
+ from localstack.aws.handlers.cors import ALLOWED_CORS_ORIGINS
+
+ for protocol in ("http", "https"):
+ url = self.get_subdomain_url(protocol)
+ LOG.debug("adding %s to ALLOWED_CORS_ORIGINS", url)
+ ALLOWED_CORS_ORIGINS.append(url)
+
+ def get_subdomain_url(self, protocol: str = "https") -> str:
+ """
+ Returns the URL that serves the extension under its subdomain
+ ``https://my-extension.localhost.localstack.cloud:4566/``.
+
+ :return: a URL this extension is served at
+ """
+ if not self.subdomain:
+ raise ValueError(f"Subdomain for extension {self.name} is not set")
+ return config.external_service_url(subdomains=self.subdomain, protocol=protocol)
+
+ def get_submount_url(self, protocol: str = "https") -> str:
+ """
+ Returns the URL that serves the extension under its submount
+ ``https://localhost.localstack.cloud:4566/_extension/my-extension``.
+
+ :return: a URL this extension is served at
+ """
+
+ if not self.submount:
+ raise ValueError(f"Submount for extension {self.name} is not set")
+
+ return f"{config.external_service_url(protocol=protocol)}{self.submount}"
+
+ @classmethod
+ def get_extension_module_root(cls) -> str:
+ """
+ Returns the root of the extension module. For instance, if the extension lives in
+ ``my_extension/plugins/extension.py``, then this will return ``my_extension``. Used to set up the
+ logger as well as the template environment and the static file module.
+
+ :return: the root module the extension lives in
+ """
+ return cls.__module__.split(".")[0]
diff --git a/localstack-core/localstack/http/__init__.py b/localstack-core/localstack/http/__init__.py
new file mode 100644
index 0000000000000..d72ef9d669d66
--- /dev/null
+++ b/localstack-core/localstack/http/__init__.py
@@ -0,0 +1,6 @@
+from .request import Request
+from .resource import Resource, resource
+from .response import Response
+from .router import Router, route
+
+__all__ = ["route", "resource", "Resource", "Router", "Response", "Request"]
diff --git a/localstack-core/localstack/http/asgi.py b/localstack-core/localstack/http/asgi.py
new file mode 100644
index 0000000000000..8ba3dd3454bd3
--- /dev/null
+++ b/localstack-core/localstack/http/asgi.py
@@ -0,0 +1,21 @@
+from rolo.asgi import (
+ ASGIAdapter,
+ ASGILifespanListener,
+ RawHTTPRequestEventStreamAdapter,
+ WebSocketEnvironment,
+ WebSocketListener,
+ WsgiStartResponse,
+ create_wsgi_input,
+ populate_wsgi_environment,
+)
+
+__all__ = [
+ "WebSocketEnvironment",
+ "populate_wsgi_environment",
+ "create_wsgi_input",
+ "RawHTTPRequestEventStreamAdapter",
+ "WsgiStartResponse",
+ "ASGILifespanListener",
+ "WebSocketListener",
+ "ASGIAdapter",
+]
diff --git a/localstack-core/localstack/http/client.py b/localstack-core/localstack/http/client.py
new file mode 100644
index 0000000000000..cb8f4b33aee31
--- /dev/null
+++ b/localstack-core/localstack/http/client.py
@@ -0,0 +1,7 @@
+from rolo.client import HttpClient, SimpleRequestsClient, make_request
+
+__all__ = [
+ "HttpClient",
+ "SimpleRequestsClient",
+ "make_request",
+]
diff --git a/localstack-core/localstack/http/dispatcher.py b/localstack-core/localstack/http/dispatcher.py
new file mode 100644
index 0000000000000..308450fbd3296
--- /dev/null
+++ b/localstack-core/localstack/http/dispatcher.py
@@ -0,0 +1,25 @@
+from json import JSONEncoder
+from typing import Type
+
+from rolo.routing.handler import Handler, ResultValue
+from rolo.routing.handler import handler_dispatcher as _handler_dispatcher
+from rolo.routing.router import Dispatcher
+
+from localstack.utils.json import CustomEncoder
+
+__all__ = [
+ "ResultValue",
+ "Handler",
+ "handler_dispatcher",
+]
+
+
+def handler_dispatcher(json_encoder: Type[JSONEncoder] = None) -> Dispatcher[Handler]:
+ """
+ Replacement for ``rolo.dispatcher.handler_dispatcher`` that uses by default LocalStack's CustomEncoder for
+ serializing JSON documents.
+
+ :param json_encoder: the encoder to use
+ :return: a Dispatcher that dispatches to instances of a Handler
+ """
+ return _handler_dispatcher(json_encoder or CustomEncoder)
diff --git a/localstack-core/localstack/http/duplex_socket.py b/localstack-core/localstack/http/duplex_socket.py
new file mode 100644
index 0000000000000..8006f398668e5
--- /dev/null
+++ b/localstack-core/localstack/http/duplex_socket.py
@@ -0,0 +1,77 @@
+from __future__ import annotations
+
+import logging
+import socket
+import ssl
+from asyncio.selector_events import BaseSelectorEventLoop
+
+from localstack.utils.asyncio import run_sync
+from localstack.utils.objects import singleton_factory
+from localstack.utils.patch import Patch, patch
+
+# set up logger
+LOG = logging.getLogger(__name__)
+
+
+class DuplexSocket(ssl.SSLSocket):
+ """Simple duplex socket wrapper that allows serving HTTP/HTTPS over the same port."""
+
+ def accept(self):
+ newsock, addr = socket.socket.accept(self)
+ if DuplexSocket.is_ssl_socket(newsock) is not False:
+ newsock = self.context.wrap_socket(
+ newsock,
+ do_handshake_on_connect=self.do_handshake_on_connect,
+ suppress_ragged_eofs=self.suppress_ragged_eofs,
+ server_side=True,
+ )
+
+ return newsock, addr
+
+ @staticmethod
+ def is_ssl_socket(newsock):
+ """Returns True/False if the socket uses SSL or not, or None if the status cannot be
+ determined"""
+
+ def peek_ssl_header():
+ peek_bytes = 5
+ first_bytes = newsock.recv(peek_bytes, socket.MSG_PEEK)
+ if len(first_bytes or "") != peek_bytes:
+ return
+ first_byte = first_bytes[0]
+ return first_byte < 32 or first_byte >= 127
+
+ try:
+ return peek_ssl_header()
+ except Exception:
+ # Fix for "[Errno 11] Resource temporarily unavailable" - This can
+ # happen if we're using a non-blocking socket in a blocking thread.
+ newsock.setblocking(1)
+ newsock.settimeout(1)
+ try:
+ return peek_ssl_header()
+ except Exception:
+ return False
+
+
+@singleton_factory
+def enable_duplex_socket():
+ """
+ Function which replaces the ssl.SSLContext.sslsocket_class with the DuplexSocket, enabling serving both,
+ HTTP and HTTPS connections on a single port.
+ """
+
+ # set globally defined SSL socket implementation class
+ Patch(ssl.SSLContext, "sslsocket_class", DuplexSocket).apply()
+
+ if hasattr(BaseSelectorEventLoop, "_accept_connection2"):
+
+ @patch(BaseSelectorEventLoop._accept_connection2)
+ async def _accept_connection2(
+ fn, self, protocol_factory, conn, extra, sslcontext, *args, **kwargs
+ ):
+ is_ssl_socket = await run_sync(DuplexSocket.is_ssl_socket, conn)
+ if is_ssl_socket is False:
+ sslcontext = None
+ result = await fn(self, protocol_factory, conn, extra, sslcontext, *args, **kwargs)
+ return result
diff --git a/localstack-core/localstack/http/hypercorn.py b/localstack-core/localstack/http/hypercorn.py
new file mode 100644
index 0000000000000..e14f2e167c797
--- /dev/null
+++ b/localstack-core/localstack/http/hypercorn.py
@@ -0,0 +1,146 @@
+import asyncio
+import threading
+from asyncio import AbstractEventLoop
+
+from hypercorn import Config
+from hypercorn.asyncio import serve
+from hypercorn.typing import ASGIFramework
+
+from localstack.aws.gateway import Gateway
+from localstack.aws.handlers.proxy import ProxyHandler
+from localstack.aws.serving.asgi import AsgiGateway
+from localstack.config import HostAndPort
+from localstack.logging.setup import setup_hypercorn_logger
+from localstack.utils.collections import ensure_list
+from localstack.utils.functions import call_safe
+from localstack.utils.serving import Server
+from localstack.utils.ssl import create_ssl_cert, install_predefined_cert_if_available
+
+
+class HypercornServer(Server):
+ """
+ A sync wrapper around Hypercorn that implements the ``Server`` interface.
+ """
+
+ def __init__(self, app: ASGIFramework, config: Config, loop: AbstractEventLoop = None):
+ """
+ Create a new Hypercorn server instance. Note that, if you pass an event loop to the constructor,
+ you are yielding control of that event loop to the server, as it will invoke `run_until_complete` and
+ shutdown the loop.
+
+ :param app: the ASGI3 app
+ :param config: the hypercorn config
+ :param loop: optionally the event loop, otherwise ``asyncio.new_event_loop`` will be called
+ """
+ self.app = app
+ self.config = config
+ self.loop = loop or asyncio.new_event_loop()
+
+ self._close = asyncio.Event()
+ self._closed = threading.Event()
+
+ parts = config.bind[0].split(":")
+ if len(parts) == 1:
+ # check ssl
+ host = parts[0]
+ port = 443 if config.ssl_enabled else 80
+ else:
+ host, port = parts[0], int(parts[1])
+
+ super().__init__(port, host)
+
+ @property
+ def protocol(self):
+ return "https" if self.config.ssl_enabled else "http"
+
+ def do_run(self):
+ self.loop.run_until_complete(
+ serve(self.app, self.config, shutdown_trigger=self._shutdown_trigger)
+ )
+ self._closed.set()
+
+ def do_shutdown(self):
+ asyncio.run_coroutine_threadsafe(self._set_closed(), self.loop)
+ self._closed.wait(timeout=10)
+ asyncio.run_coroutine_threadsafe(self.loop.shutdown_asyncgens(), self.loop)
+ self.loop.shutdown_default_executor()
+ self.loop.stop()
+ call_safe(self.loop.close)
+
+ async def _set_closed(self):
+ self._close.set()
+
+ async def _shutdown_trigger(self):
+ await self._close.wait()
+
+
+class GatewayServer(HypercornServer):
+ """
+ A Hypercorn-based server implementation which serves a given Gateway.
+ It can be used to easily spawn new gateway servers, defining their individual request-, response-, and
+ exception-handlers.
+ """
+
+ def __init__(
+ self,
+ gateway: Gateway,
+ listen: HostAndPort | list[HostAndPort],
+ use_ssl: bool = False,
+ threads: int | None = None,
+ ):
+ """
+ Creates a new GatewayServer instance.
+
+ :param gateway: which will be served by this server
+ :param listen: defining the address and port pairs this server binds to. Can be a list of host and port pairs.
+ :param use_ssl: True if the LocalStack cert should be loaded and HTTP/HTTPS multiplexing should be enabled.
+ :param threads: Number of worker threads the gateway will use.
+ """
+ # build server config
+ config = Config()
+ config.h11_pass_raw_headers = True
+ setup_hypercorn_logger(config)
+
+ listens = ensure_list(listen)
+ config.bind = [str(host_and_port) for host_and_port in listens]
+
+ if use_ssl:
+ install_predefined_cert_if_available()
+ serial_number = listens[0].port
+ _, cert_file_name, key_file_name = create_ssl_cert(serial_number=serial_number)
+ config.certfile = cert_file_name
+ config.keyfile = key_file_name
+
+ # build gateway
+ loop = asyncio.new_event_loop()
+ app = AsgiGateway(gateway, event_loop=loop, threads=threads)
+
+ # start serving gateway
+ super().__init__(app, config, loop)
+
+ def do_shutdown(self):
+ super().do_shutdown()
+ self.app.close() # noqa (app will be of type AsgiGateway)
+
+
+class ProxyServer(GatewayServer):
+ """
+ Proxy server implementation which uses the localstack.http.proxy module.
+ These server instances can be spawned easily, while implementing HTTP/HTTPS multiplexing (if enabled),
+ and just forward all incoming requests to a backend.
+ """
+
+ def __init__(
+ self, forward_base_url: str, listen: HostAndPort | list[HostAndPort], use_ssl: bool = False
+ ):
+ """
+ Creates a new ProxyServer instance.
+
+ :param forward_base_url: URL of the backend system all requests this server receives should be forwarded to
+ :param port: defining the port of this server instance
+ :param bind_address: to bind this server instance to. Can be a host string or a list of host strings.
+ :param use_ssl: True if the LocalStack cert should be loaded and HTTP/HTTPS multiplexing should be enabled.
+ """
+ gateway = Gateway()
+ gateway.request_handlers.append(ProxyHandler(forward_base_url=forward_base_url))
+ super().__init__(gateway, listen, use_ssl)
diff --git a/localstack-core/localstack/http/proxy.py b/localstack-core/localstack/http/proxy.py
new file mode 100644
index 0000000000000..35cf74719277a
--- /dev/null
+++ b/localstack-core/localstack/http/proxy.py
@@ -0,0 +1,7 @@
+from rolo.proxy import Proxy, ProxyHandler, forward
+
+__all__ = [
+ "forward",
+ "Proxy",
+ "ProxyHandler",
+]
diff --git a/localstack-core/localstack/http/request.py b/localstack-core/localstack/http/request.py
new file mode 100644
index 0000000000000..411ead4ab6bde
--- /dev/null
+++ b/localstack-core/localstack/http/request.py
@@ -0,0 +1,21 @@
+from rolo.request import (
+ Request,
+ dummy_wsgi_environment,
+ get_full_raw_path,
+ get_raw_base_url,
+ get_raw_current_url,
+ get_raw_path,
+ restore_payload,
+ set_environment_headers,
+)
+
+__all__ = [
+ "dummy_wsgi_environment",
+ "set_environment_headers",
+ "Request",
+ "get_raw_path",
+ "get_full_raw_path",
+ "get_raw_base_url",
+ "get_raw_current_url",
+ "restore_payload",
+]
diff --git a/localstack-core/localstack/http/resource.py b/localstack-core/localstack/http/resource.py
new file mode 100644
index 0000000000000..40db6d941b0aa
--- /dev/null
+++ b/localstack-core/localstack/http/resource.py
@@ -0,0 +1,6 @@
+from rolo.resource import Resource, resource
+
+__all__ = [
+ "resource",
+ "Resource",
+]
diff --git a/localstack/services/firehose/__init__.py b/localstack-core/localstack/http/resources/__init__.py
similarity index 100%
rename from localstack/services/firehose/__init__.py
rename to localstack-core/localstack/http/resources/__init__.py
diff --git a/localstack/services/kinesis/__init__.py b/localstack-core/localstack/http/resources/swagger/__init__.py
similarity index 100%
rename from localstack/services/kinesis/__init__.py
rename to localstack-core/localstack/http/resources/swagger/__init__.py
diff --git a/localstack-core/localstack/http/resources/swagger/endpoints.py b/localstack-core/localstack/http/resources/swagger/endpoints.py
new file mode 100644
index 0000000000000..f6cef4c9a33f8
--- /dev/null
+++ b/localstack-core/localstack/http/resources/swagger/endpoints.py
@@ -0,0 +1,25 @@
+import os
+
+from jinja2 import Environment, FileSystemLoader
+from rolo import Request, route
+
+from localstack.config import external_service_url
+from localstack.http import Response
+
+
+def _get_service_url(request: Request) -> str:
+ # special case for ephemeral instances
+ if "sandbox.localstack.cloud" in request.host:
+ return external_service_url(protocol="https", port=443)
+ return external_service_url(protocol=request.scheme)
+
+
+class SwaggerUIApi:
+ @route("/_localstack/swagger", methods=["GET"])
+ def server_swagger_ui(self, request: Request) -> Response:
+ init_path = f"{_get_service_url(request)}/openapi.yaml"
+ oas_path = os.path.join(os.path.dirname(__file__), "templates")
+ env = Environment(loader=FileSystemLoader(oas_path))
+ template = env.get_template("index.html")
+ rendered_template = template.render(swagger_url=init_path)
+ return Response(rendered_template, content_type="text/html")
diff --git a/localstack-core/localstack/http/resources/swagger/plugins.py b/localstack-core/localstack/http/resources/swagger/plugins.py
new file mode 100644
index 0000000000000..2e464f50deacd
--- /dev/null
+++ b/localstack-core/localstack/http/resources/swagger/plugins.py
@@ -0,0 +1,23 @@
+import werkzeug
+import yaml
+from rolo.routing import RuleAdapter
+
+from localstack.http.resources.swagger.endpoints import SwaggerUIApi
+from localstack.runtime import hooks
+from localstack.services.edge import ROUTER
+from localstack.services.internal import get_internal_apis
+from localstack.utils.openapi import get_localstack_openapi_spec
+
+
+@hooks.on_infra_start()
+def register_swagger_endpoints():
+ get_internal_apis().add(SwaggerUIApi())
+
+ def _serve_openapi_spec(_request):
+ spec = get_localstack_openapi_spec()
+ response_body = yaml.dump(spec)
+ return werkzeug.Response(
+ response_body, content_type="application/yaml", direct_passthrough=True
+ )
+
+ ROUTER.add(RuleAdapter("/openapi.yaml", _serve_openapi_spec))
diff --git a/localstack-core/localstack/http/resources/swagger/templates/index.html b/localstack-core/localstack/http/resources/swagger/templates/index.html
new file mode 100644
index 0000000000000..a852b132deb56
--- /dev/null
+++ b/localstack-core/localstack/http/resources/swagger/templates/index.html
@@ -0,0 +1,22 @@
+
+
+
+
+
+
+ SwaggerUI
+
+
+
+
+
+
+
+
diff --git a/localstack-core/localstack/http/response.py b/localstack-core/localstack/http/response.py
new file mode 100644
index 0000000000000..66863c147d370
--- /dev/null
+++ b/localstack-core/localstack/http/response.py
@@ -0,0 +1,22 @@
+from json import JSONEncoder
+from typing import Any, Type
+
+from rolo import Response as RoloResponse
+
+from localstack.utils.common import CustomEncoder
+
+
+class Response(RoloResponse):
+ """
+ An HTTP Response object, which simply extends werkzeug's Response object with a few convenience methods.
+ """
+
+ def set_json(self, doc: Any, cls: Type[JSONEncoder] = CustomEncoder):
+ """
+ Serializes the given dictionary using localstack's ``CustomEncoder`` into a json response, and sets the
+ mimetype automatically to ``application/json``.
+
+ :param doc: the response dictionary to be serialized as JSON
+ :param cls: the json encoder used
+ """
+ return super().set_json(doc, cls or CustomEncoder)
diff --git a/localstack-core/localstack/http/router.py b/localstack-core/localstack/http/router.py
new file mode 100644
index 0000000000000..da3bcdfe043c0
--- /dev/null
+++ b/localstack-core/localstack/http/router.py
@@ -0,0 +1,52 @@
+from typing import (
+ Any,
+ Mapping,
+ TypeVar,
+)
+
+from rolo.routing import (
+ PortConverter,
+ RegexConverter,
+ Router,
+ RuleAdapter,
+ RuleGroup,
+ WithHost,
+ route,
+)
+from rolo.routing.router import Dispatcher, call_endpoint
+from werkzeug.routing import PathConverter
+
+HTTP_METHODS = ("GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS", "TRACE")
+
+E = TypeVar("E")
+RequestArguments = Mapping[str, Any]
+
+
+class GreedyPathConverter(PathConverter):
+ """
+ This converter makes sure that the path ``/mybucket//mykey`` can be matched to the pattern
+ ``/`` and will result in `Key` being `/mykey`.
+ """
+
+ regex = ".*?"
+
+ part_isolating = False
+ """From the werkzeug docs: If a custom converter can match a forward slash, /, it should have the
+ attribute part_isolating set to False. This will ensure that rules using the custom converter are
+ correctly matched."""
+
+
+__all__ = [
+ "RequestArguments",
+ "HTTP_METHODS",
+ "RegexConverter",
+ "PortConverter",
+ "Dispatcher",
+ "route",
+ "call_endpoint",
+ "Router",
+ "RuleAdapter",
+ "WithHost",
+ "RuleGroup",
+ "GreedyPathConverter",
+]
diff --git a/localstack-core/localstack/http/trace.py b/localstack-core/localstack/http/trace.py
new file mode 100644
index 0000000000000..7d52b9ebf36dc
--- /dev/null
+++ b/localstack-core/localstack/http/trace.py
@@ -0,0 +1,348 @@
+import dataclasses
+import inspect
+import logging
+import time
+from typing import Any, Callable
+
+from rolo import Response
+from rolo.gateway import ExceptionHandler, Handler, HandlerChain, RequestContext
+from werkzeug.datastructures import Headers
+
+from localstack.utils.patch import Patch, Patches
+
+LOG = logging.getLogger(__name__)
+
+
+class Action:
+ """
+ Encapsulates something that the handler performed on the request context, request, or response objects.
+ """
+
+ name: str
+
+ def __init__(self, name: str):
+ self.name = name
+
+ def __repr__(self):
+ return self.name
+
+
+class SetAttributeAction(Action):
+ """
+ The handler set an attribute of the request context or something else.
+ """
+
+ key: str
+ value: Any | None
+
+ def __init__(self, key: str, value: Any | None = None):
+ super().__init__("set")
+ self.key = key
+ self.value = value
+
+ def __repr__(self):
+ if self.value is None:
+ return f"set {self.key}"
+ return f"set {self.key} = {self.value!r}"
+
+
+class ModifyHeadersAction(Action):
+ """
+ The handler modified headers in some way, either adding, updating, or removing headers.
+ """
+
+ def __init__(self, name: str, before: Headers, after: Headers):
+ super().__init__(name)
+ self.before = before
+ self.after = after
+
+ @property
+ def header_actions(self) -> list[Action]:
+ after = self.after
+ before = self.before
+
+ actions = []
+
+ headers_set = dict(set(after.items()) - set(before.items()))
+ headers_removed = {k: v for k, v in before.items() if k not in after}
+
+ for k, v in headers_set.items():
+ actions.append(Action(f"set '{k}: {v}'"))
+ for k, v in headers_removed.items():
+ actions.append(Action(f"del '{k}: {v}'"))
+
+ return actions
+
+
+@dataclasses.dataclass
+class HandlerTrace:
+ handler: Handler
+ """The handler"""
+ duration_ms: float
+ """The runtime duration of the handler in milliseconds"""
+ actions: list[Action]
+ """The actions the handler chain performed"""
+
+ @property
+ def handler_module(self):
+ return self.handler.__module__
+
+ @property
+ def handler_name(self):
+ if inspect.isfunction(self.handler):
+ return self.handler.__name__
+ else:
+ return self.handler.__class__.__name__
+
+
+def _log_method_call(name: str, actions: list[Action]):
+ """Creates a wrapper around the original method `_fn`. It appends an action to the `actions`
+ list indicating that the function was called and then returns the original function."""
+
+ def _proxy(self, _fn, *args, **kwargs):
+ actions.append(Action(f"call {name}"))
+ return _fn(*args, **kwargs)
+
+ return _proxy
+
+
+class TracingHandlerBase:
+ """
+ This class is a Handler that records a trace of the execution of another request handler. It has two
+ attributes: `trace`, which stores the tracing information, and `delegate`, which is the handler or
+ exception handler that will be traced.
+ """
+
+ trace: HandlerTrace | None
+ delegate: Handler | ExceptionHandler
+
+ def __init__(self, delegate: Handler | ExceptionHandler):
+ self.trace = None
+ self.delegate = delegate
+
+ def do_trace_call(
+ self, fn: Callable, chain: HandlerChain, context: RequestContext, response: Response
+ ):
+ """
+ Wraps the function call with the tracing functionality and records a HandlerTrace.
+
+ The method determines changes made by the request handler to specific aspects of the request.
+ Changes made to the request context and the response headers/status by the request handler are then
+ examined, and appropriate actions are added to the `actions` list of the trace.
+
+ :param fn: which is the function to be traced, which is the request/response/exception handler
+ :param chain: the handler chain
+ :param context: the request context
+ :param response: the response object
+ """
+ then = time.perf_counter()
+
+ actions = []
+
+ prev_context = dict(context.__dict__)
+ prev_stopped = chain.stopped
+ prev_request_identity = id(context.request)
+ prev_terminated = chain.terminated
+ prev_request_headers = context.request.headers.copy()
+ prev_response_headers = response.headers.copy()
+ prev_response_status = response.status_code
+
+ # add patches to log invocations or certain functions
+ patches = Patches(
+ [
+ Patch.function(
+ context.request.get_data,
+ _log_method_call("request.get_data", actions),
+ ),
+ Patch.function(
+ context.request._load_form_data,
+ _log_method_call("request._load_form_data", actions),
+ ),
+ Patch.function(
+ response.get_data,
+ _log_method_call("response.get_data", actions),
+ ),
+ ]
+ )
+ patches.apply()
+
+ try:
+ return fn()
+ finally:
+ now = time.perf_counter()
+ # determine some basic things the handler changed in the context
+ patches.undo()
+
+ # chain
+ if chain.stopped and not prev_stopped:
+ actions.append(Action("stop chain"))
+ if chain.terminated and not prev_terminated:
+ actions.append(Action("terminate chain"))
+
+ # detect when attributes are set in the request contex
+ context_args = dict(context.__dict__)
+ context_args.pop("request", None) # request is handled separately
+
+ for k, v in context_args.items():
+ if not v:
+ continue
+ if prev_context.get(k):
+ # TODO: we could introduce "ModifyAttributeAction(k,v)" with an additional check
+ # ``if v != prev_context.get(k)``
+ continue
+ actions.append(SetAttributeAction(k, v))
+
+ # request
+ if id(context.request) != prev_request_identity:
+ actions.append(Action("replaced request object"))
+
+ # response
+ if response.status_code != prev_response_status:
+ actions.append(SetAttributeAction("response stats_code", response.status_code))
+ if context.request.headers != prev_request_headers:
+ actions.append(
+ ModifyHeadersAction(
+ "modify request headers",
+ prev_request_headers,
+ context.request.headers.copy(),
+ )
+ )
+ if response.headers != prev_response_headers:
+ actions.append(
+ ModifyHeadersAction(
+ "modify response headers", prev_response_headers, response.headers.copy()
+ )
+ )
+
+ self.trace = HandlerTrace(
+ handler=self.delegate, duration_ms=(now - then) * 1000, actions=actions
+ )
+
+
+class TracingHandler(TracingHandlerBase):
+ delegate: Handler
+
+ def __init__(self, delegate: Handler):
+ super().__init__(delegate)
+
+ def __call__(self, chain: HandlerChain, context: RequestContext, response: Response):
+ def _call():
+ return self.delegate(chain, context, response)
+
+ return self.do_trace_call(_call, chain, context, response)
+
+
+class TracingExceptionHandler(TracingHandlerBase):
+ delegate: ExceptionHandler
+
+ def __init__(self, delegate: ExceptionHandler):
+ super().__init__(delegate)
+
+ def __call__(
+ self, chain: HandlerChain, exception: Exception, context: RequestContext, response: Response
+ ):
+ def _call():
+ return self.delegate(chain, exception, context, response)
+
+ return self.do_trace_call(_call, chain, context, response)
+
+
+class TracingHandlerChain(HandlerChain):
+ """
+ DebuggingHandlerChain - A subclass of HandlerChain for logging and tracing handlers.
+
+ Attributes:
+ - duration (float): Total time taken for handling request in milliseconds.
+ - request_handler_traces (list[HandlerTrace]): List of request handler traces.
+ - response_handler_traces (list[HandlerTrace]): List of response handler traces.
+ - finalizer_traces (list[HandlerTrace]): List of finalizer traces.
+ - exception_handler_traces (list[HandlerTrace]): List of exception handler traces.
+ """
+
+ duration: float
+ request_handler_traces: list[HandlerTrace]
+ response_handler_traces: list[HandlerTrace]
+ finalizer_traces: list[HandlerTrace]
+ exception_handler_traces: list[HandlerTrace]
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.request_handler_traces = []
+ self.response_handler_traces = []
+ self.finalizer_traces = []
+ self.exception_handler_traces = []
+
+ def handle(self, context: RequestContext, response: Response):
+ """Overrides HandlerChain's handle method and adds tracing handler to request handlers. Logs the trace
+ report with request and response details."""
+ then = time.perf_counter()
+ try:
+ self.request_handlers = [TracingHandler(handler) for handler in self.request_handlers]
+ return super().handle(context, response)
+ finally:
+ self.duration = (time.perf_counter() - then) * 1000
+ self.request_handler_traces = [handler.trace for handler in self.request_handlers]
+ self._log_report()
+
+ def _call_response_handlers(self, response):
+ self.response_handlers = [TracingHandler(handler) for handler in self.response_handlers]
+ try:
+ return super()._call_response_handlers(response)
+ finally:
+ self.response_handler_traces = [handler.trace for handler in self.response_handlers]
+
+ def _call_finalizers(self, response):
+ self.finalizers = [TracingHandler(handler) for handler in self.finalizers]
+ try:
+ return super()._call_response_handlers(response)
+ finally:
+ self.finalizer_traces = [handler.trace for handler in self.finalizers]
+
+ def _call_exception_handlers(self, e, response):
+ self.exception_handlers = [
+ TracingExceptionHandler(handler) for handler in self.exception_handlers
+ ]
+ try:
+ return super()._call_exception_handlers(e, response)
+ finally:
+ self.exception_handler_traces = [handler.trace for handler in self.exception_handlers]
+
+ def _log_report(self):
+ report = []
+ request = self.context.request
+ response = self.response
+
+ def _append_traces(traces: list[HandlerTrace]):
+ """Format and appends a list of traces to the report, and recursively append the trace's
+ actions (if any)."""
+
+ for trace in traces:
+ if trace is None:
+ continue
+
+ report.append(
+ f"{trace.handler_module:43s} {trace.handler_name:30s} {trace.duration_ms:8.2f}ms"
+ )
+ _append_actions(trace.actions, 46)
+
+ def _append_actions(actions: list[Action], indent: int):
+ for action in actions:
+ report.append((" " * indent) + f"- {action!r}")
+
+ if isinstance(action, ModifyHeadersAction):
+ _append_actions(action.header_actions, indent + 2)
+
+ report.append(f"request: {request.method} {request.url}")
+ report.append(f"response: {response.status_code}")
+ report.append("---- request handlers " + ("-" * 63))
+ _append_traces(self.request_handler_traces)
+ report.append("---- response handlers " + ("-" * 63))
+ _append_traces(self.response_handler_traces)
+ report.append("---- finalizers " + ("-" * 63))
+ _append_traces(self.finalizer_traces)
+ report.append("---- exception handlers " + ("-" * 63))
+ _append_traces(self.exception_handler_traces)
+ # Add a separator and total duration value to the end of the report
+ report.append(f"{'=' * 68} total {self.duration:8.2f}ms")
+
+ LOG.info("handler chain trace report:\n%s\n%s", "=" * 85, "\n".join(report))
diff --git a/localstack-core/localstack/http/websocket.py b/localstack-core/localstack/http/websocket.py
new file mode 100644
index 0000000000000..9bd92a927a998
--- /dev/null
+++ b/localstack-core/localstack/http/websocket.py
@@ -0,0 +1,15 @@
+from rolo.websocket.websocket import (
+ WebSocket,
+ WebSocketDisconnectedError,
+ WebSocketError,
+ WebSocketProtocolError,
+ WebSocketRequest,
+)
+
+__all__ = [
+ "WebSocketError",
+ "WebSocketDisconnectedError",
+ "WebSocketProtocolError",
+ "WebSocket",
+ "WebSocketRequest",
+]
diff --git a/localstack/services/s3/__init__.py b/localstack-core/localstack/logging/__init__.py
similarity index 100%
rename from localstack/services/s3/__init__.py
rename to localstack-core/localstack/logging/__init__.py
diff --git a/localstack-core/localstack/logging/format.py b/localstack-core/localstack/logging/format.py
new file mode 100644
index 0000000000000..5f308e34d9ecf
--- /dev/null
+++ b/localstack-core/localstack/logging/format.py
@@ -0,0 +1,194 @@
+"""Tools for formatting localstack logs."""
+
+import logging
+import re
+from functools import lru_cache
+from typing import Any, Dict
+
+from localstack.utils.numbers import format_bytes
+from localstack.utils.strings import to_bytes
+
+MAX_THREAD_NAME_LEN = 12
+MAX_NAME_LEN = 26
+
+LOG_FORMAT = f"%(asctime)s.%(msecs)03d %(ls_level)5s --- [%(ls_thread){MAX_THREAD_NAME_LEN}s] %(ls_name)-{MAX_NAME_LEN}s : %(message)s"
+LOG_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S"
+LOG_INPUT_FORMAT = "%(input_type)s(%(input)s, headers=%(request_headers)s)"
+LOG_OUTPUT_FORMAT = "%(output_type)s(%(output)s, headers=%(response_headers)s)"
+LOG_CONTEXT_FORMAT = "%(account_id)s/%(region)s"
+
+CUSTOM_LEVEL_NAMES = {
+ 50: "FATAL",
+ 40: "ERROR",
+ 30: "WARN",
+ 20: "INFO",
+ 10: "DEBUG",
+}
+
+
+class DefaultFormatter(logging.Formatter):
+ """
+ A formatter that uses ``LOG_FORMAT`` and ``LOG_DATE_FORMAT``.
+ """
+
+ def __init__(self, fmt=LOG_FORMAT, datefmt=LOG_DATE_FORMAT):
+ super(DefaultFormatter, self).__init__(fmt=fmt, datefmt=datefmt)
+
+
+class AddFormattedAttributes(logging.Filter):
+ """
+ Filter that adds three attributes to a log record:
+
+ - ls_level: the abbreviated loglevel that's max 5 characters long
+ - ls_name: the abbreviated name of the logger (e.g., `l.bootstrap.install`), trimmed to ``MAX_NAME_LEN``
+ - ls_thread: the abbreviated thread name (prefix trimmed, .e.g, ``omeThread-108``)
+ """
+
+ max_name_len: int
+ max_thread_len: int
+
+ def __init__(self, max_name_len: int = None, max_thread_len: int = None):
+ super(AddFormattedAttributes, self).__init__()
+ self.max_name_len = max_name_len if max_name_len else MAX_NAME_LEN
+ self.max_thread_len = max_thread_len if max_thread_len else MAX_THREAD_NAME_LEN
+
+ def filter(self, record):
+ record.ls_level = CUSTOM_LEVEL_NAMES.get(record.levelno, record.levelname)
+ record.ls_name = self._get_compressed_logger_name(record.name)
+ record.ls_thread = record.threadName[-self.max_thread_len :]
+ return True
+
+ @lru_cache(maxsize=256)
+ def _get_compressed_logger_name(self, name):
+ return compress_logger_name(name, self.max_name_len)
+
+
+class MaskSensitiveInputFilter(logging.Filter):
+ """
+ Filter that hides sensitive from a binary json string in a record input.
+ It will find the mathing keys and replace their values with "******"
+
+ For example, if initialized with `sensitive_keys=["my_key"]`, the input
+ b'{"my_key": "sensitive_value"}' would become b'{"my_key": "******"}'.
+ """
+
+ patterns: list[tuple[re.Pattern[bytes], bytes]]
+
+ def __init__(self, sensitive_keys: list[str]):
+ super(MaskSensitiveInputFilter, self).__init__()
+
+ self.patterns = [
+ (re.compile(to_bytes(rf'"{key}":\s*"[^"]+"')), to_bytes(f'"{key}": "******"'))
+ for key in sensitive_keys
+ ]
+
+ def filter(self, record):
+ if record.input and isinstance(record.input, bytes):
+ record.input = self.mask_sensitive_msg(record.input)
+ return True
+
+ def mask_sensitive_msg(self, message: bytes) -> bytes:
+ for pattern, replacement in self.patterns:
+ message = re.sub(pattern, replacement, message)
+ return message
+
+
+def compress_logger_name(name: str, length: int) -> str:
+ """
+ Creates a short version of a logger name. For example ``my.very.long.logger.name`` with length=17 turns into
+ ``m.v.l.logger.name``.
+
+ :param name: the logger name
+ :param length: the max length of the logger name
+ :return: the compressed name
+ """
+ if len(name) <= length:
+ return name
+
+ parts = name.split(".")
+ parts.reverse()
+
+ new_parts = []
+
+ # we start by assuming that all parts are collapsed
+ # x.x.x requires 5 = 2n - 1 characters
+ cur_length = (len(parts) * 2) - 1
+
+ for i in range(len(parts)):
+ # try to expand the current part and calculate the resulting length
+ part = parts[i]
+ next_len = cur_length + (len(part) - 1)
+
+ if next_len > length:
+ # if the resulting length would exceed the limit, add only the first letter of the parts of all remaining
+ # parts
+ new_parts += [p[0] for p in parts[i:]]
+
+ # but if this is the first item, that means we would display nothing, so at least display as much of the
+ # max length as possible
+ if i == 0:
+ remaining = length - cur_length
+ if remaining > 0:
+ new_parts[0] = part[: (remaining + 1)]
+
+ break
+
+ # expanding the current part, i.e., instead of using just the one character, we add the entire part
+ new_parts.append(part)
+ cur_length = next_len
+
+ new_parts.reverse()
+ return ".".join(new_parts)
+
+
+class TraceLoggingFormatter(logging.Formatter):
+ aws_trace_log_format = "; ".join([LOG_FORMAT, LOG_INPUT_FORMAT, LOG_OUTPUT_FORMAT])
+ bytes_length_display_threshold = 512
+
+ def __init__(self):
+ super().__init__(fmt=self.aws_trace_log_format, datefmt=LOG_DATE_FORMAT)
+
+ def _replace_large_payloads(self, input: Any) -> Any:
+ """
+ Replaces large payloads in the logs with placeholders to avoid cluttering the logs with huge bytes payloads.
+ :param input: Input/output extra passed when logging. If it is bytes, it will be replaced if larger than
+ bytes_length_display_threshold
+ :return: Input, unless it is bytes and longer than bytes_length_display_threshold, then `Bytes(length_of_input)`
+ """
+ if isinstance(input, bytes) and len(input) > self.bytes_length_display_threshold:
+ return f"Bytes({format_bytes(len(input))})"
+ return input
+
+ def format(self, record: logging.LogRecord) -> str:
+ record.input = self._replace_large_payloads(record.input)
+ record.output = self._replace_large_payloads(record.output)
+ return super().format(record=record)
+
+
+class AwsTraceLoggingFormatter(TraceLoggingFormatter):
+ aws_trace_log_format = "; ".join(
+ [LOG_FORMAT, LOG_CONTEXT_FORMAT, LOG_INPUT_FORMAT, LOG_OUTPUT_FORMAT]
+ )
+
+ def __init__(self):
+ super().__init__()
+
+ def _copy_service_dict(self, service_dict: Dict) -> Dict:
+ if not isinstance(service_dict, Dict):
+ return service_dict
+ result = {}
+ for key, value in service_dict.items():
+ if isinstance(value, dict):
+ result[key] = self._copy_service_dict(value)
+ elif isinstance(value, bytes) and len(value) > self.bytes_length_display_threshold:
+ result[key] = f"Bytes({format_bytes(len(value))})"
+ elif isinstance(value, list):
+ result[key] = [self._copy_service_dict(item) for item in value]
+ else:
+ result[key] = value
+ return result
+
+ def format(self, record: logging.LogRecord) -> str:
+ record.input = self._copy_service_dict(record.input)
+ record.output = self._copy_service_dict(record.output)
+ return super().format(record=record)
diff --git a/localstack-core/localstack/logging/setup.py b/localstack-core/localstack/logging/setup.py
new file mode 100644
index 0000000000000..4a10d7cb7452d
--- /dev/null
+++ b/localstack-core/localstack/logging/setup.py
@@ -0,0 +1,142 @@
+import logging
+import sys
+import warnings
+
+from localstack import config, constants
+
+from ..utils.strings import key_value_pairs_to_dict
+from .format import AddFormattedAttributes, DefaultFormatter
+
+# The log levels for modules are evaluated incrementally for logging granularity,
+# from highest (DEBUG) to lowest (TRACE_INTERNAL). Hence, each module below should have
+# higher level which serves as the default.
+
+default_log_levels = {
+ "asyncio": logging.INFO,
+ "boto3": logging.INFO,
+ "botocore": logging.ERROR,
+ "docker": logging.WARNING,
+ "elasticsearch": logging.ERROR,
+ "hpack": logging.ERROR,
+ "moto": logging.WARNING,
+ "requests": logging.WARNING,
+ "s3transfer": logging.INFO,
+ "urllib3": logging.WARNING,
+ "werkzeug": logging.WARNING,
+ "rolo": logging.WARNING,
+ "parse": logging.WARNING,
+ "localstack.aws.accounts": logging.INFO,
+ "localstack.aws.protocol.serializer": logging.INFO,
+ "localstack.aws.serving.wsgi": logging.WARNING,
+ "localstack.request": logging.INFO,
+ "localstack.request.internal": logging.WARNING,
+ "localstack.state.inspect": logging.INFO,
+ "localstack_persistence": logging.INFO,
+}
+
+trace_log_levels = {
+ "rolo": logging.DEBUG,
+ "localstack.aws.protocol.serializer": logging.DEBUG,
+ "localstack.aws.serving.wsgi": logging.DEBUG,
+ "localstack.request": logging.DEBUG,
+ "localstack.request.internal": logging.INFO,
+ "localstack.state.inspect": logging.DEBUG,
+}
+
+trace_internal_log_levels = {
+ "localstack.aws.accounts": logging.DEBUG,
+ "localstack.request.internal": logging.DEBUG,
+}
+
+
+def setup_logging_for_cli(log_level=logging.INFO):
+ logging.basicConfig(level=log_level)
+
+ # set log levels of loggers
+ logging.root.setLevel(log_level)
+ logging.getLogger("localstack").setLevel(log_level)
+ for logger, level in default_log_levels.items():
+ logging.getLogger(logger).setLevel(level)
+
+
+def get_log_level_from_config():
+ # overriding the log level if LS_LOG has been set
+ if config.LS_LOG:
+ log_level = str(config.LS_LOG).upper()
+ if log_level.lower() in constants.TRACE_LOG_LEVELS:
+ log_level = "DEBUG"
+ log_level = logging._nameToLevel[log_level]
+ return log_level
+
+ return logging.DEBUG if config.DEBUG else logging.INFO
+
+
+def setup_logging_from_config():
+ log_level = get_log_level_from_config()
+ setup_logging(log_level)
+
+ if config.is_trace_logging_enabled():
+ for name, level in trace_log_levels.items():
+ logging.getLogger(name).setLevel(level)
+ if config.LS_LOG == constants.LS_LOG_TRACE_INTERNAL:
+ for name, level in trace_internal_log_levels.items():
+ logging.getLogger(name).setLevel(level)
+
+ raw_logging_override = config.LOG_LEVEL_OVERRIDES
+ if raw_logging_override:
+ logging_overrides = key_value_pairs_to_dict(raw_logging_override)
+ for logger, level_name in logging_overrides.items():
+ level = getattr(logging, level_name, None)
+ if not level:
+ raise ValueError(
+ f"Failed to configure logging overrides ({raw_logging_override}): '{level_name}' is not a valid log level"
+ )
+ logging.getLogger(logger).setLevel(level)
+
+
+def create_default_handler(log_level: int):
+ log_handler = logging.StreamHandler(stream=sys.stderr)
+ log_handler.setLevel(log_level)
+ log_handler.setFormatter(DefaultFormatter())
+ log_handler.addFilter(AddFormattedAttributes())
+ return log_handler
+
+
+def setup_logging(log_level=logging.INFO) -> None:
+ """
+ Configures the python logging environment for LocalStack.
+
+ :param log_level: the optional log level.
+ """
+ # set create a default handler for the root logger (basically logging.basicConfig but explicit)
+ log_handler = create_default_handler(log_level)
+
+ # replace any existing handlers
+ logging.basicConfig(level=log_level, handlers=[log_handler])
+
+ # disable some logs and warnings
+ warnings.filterwarnings("ignore")
+ logging.captureWarnings(True)
+
+ # set log levels of loggers
+ logging.root.setLevel(log_level)
+ logging.getLogger("localstack").setLevel(log_level)
+ for logger, level in default_log_levels.items():
+ logging.getLogger(logger).setLevel(level)
+
+
+def setup_hypercorn_logger(hypercorn_config) -> None:
+ """
+ Sets the hypercorn loggers, which are created in a peculiar way, to the localstack settings.
+
+ :param hypercorn_config: a hypercorn.Config object
+ """
+ logger = hypercorn_config.log.access_logger
+ if logger:
+ logger.handlers[0].addFilter(AddFormattedAttributes())
+ logger.handlers[0].setFormatter(DefaultFormatter())
+
+ logger = hypercorn_config.log.error_logger
+ if logger:
+ logger.handlers[0].addFilter(AddFormattedAttributes())
+ logger.handlers[0].setFormatter(DefaultFormatter())
diff --git a/localstack-core/localstack/openapi.yaml b/localstack-core/localstack/openapi.yaml
new file mode 100644
index 0000000000000..b3656c3f6f1af
--- /dev/null
+++ b/localstack-core/localstack/openapi.yaml
@@ -0,0 +1,1070 @@
+openapi: 3.1.0
+info:
+ contact:
+ email: info@localstack.cloud
+ name: LocalStack Support
+ url: https://www.localstack.cloud/contact
+ summary: The LocalStack REST API exposes functionality related to diagnostics, health
+ checks, plugins, initialisation hooks, service introspection, and more.
+ termsOfService: https://www.localstack.cloud/legal/tos
+ title: LocalStack REST API for Community
+ version: latest
+externalDocs:
+ description: LocalStack Documentation
+ url: https://docs.localstack.cloud
+servers:
+ - url: http://{host}:{port}
+ variables:
+ port:
+ default: '4566'
+ host:
+ default: 'localhost.localstack.cloud'
+components:
+ parameters:
+ SesIdFilter:
+ description: Filter for the `id` field in SES message
+ in: query
+ name: id
+ required: false
+ schema:
+ type: string
+ SesEmailFilter:
+ description: Filter for the `source` field in SES message
+ in: query
+ name: email
+ required: false
+ schema:
+ type: string
+ SnsAccountId:
+ description: '`accountId` field of the resource'
+ in: query
+ name: accountId
+ required: false
+ schema:
+ default: '000000000000'
+ type: string
+ SnsEndpointArn:
+ description: '`endpointArn` field of the resource'
+ in: query
+ name: endpointArn
+ required: false
+ schema:
+ type: string
+ SnsPhoneNumber:
+ description: '`phoneNumber` field of the resource'
+ in: query
+ name: phoneNumber
+ required: false
+ schema:
+ type: string
+ SnsRegion:
+ description: '`region` field of the resource'
+ in: query
+ name: region
+ required: false
+ schema:
+ default: us-east-1
+ type: string
+ schemas:
+ InitScripts:
+ additionalProperties: false
+ properties:
+ completed:
+ additionalProperties: false
+ properties:
+ BOOT:
+ type: boolean
+ READY:
+ type: boolean
+ SHUTDOWN:
+ type: boolean
+ START:
+ type: boolean
+ required:
+ - BOOT
+ - START
+ - READY
+ - SHUTDOWN
+ type: object
+ scripts:
+ items:
+ additionalProperties: false
+ properties:
+ name:
+ type: string
+ stage:
+ type: string
+ state:
+ type: string
+ required:
+ - stage
+ - name
+ - state
+ type: object
+ type: array
+ required:
+ - completed
+ - scripts
+ type: object
+ InitScriptsStage:
+ additionalProperties: false
+ properties:
+ completed:
+ type: boolean
+ scripts:
+ items:
+ additionalProperties: false
+ properties:
+ name:
+ type: string
+ stage:
+ type: string
+ state:
+ type: string
+ required:
+ - stage
+ - name
+ - state
+ type: object
+ type: array
+ required:
+ - completed
+ - scripts
+ type: object
+ SESDestination:
+ type: object
+ description: Possible destination of a SES message
+ properties:
+ ToAddresses:
+ type: array
+ items:
+ type: string
+ format: email
+ CcAddresses:
+ type: array
+ items:
+ type: string
+ format: email
+ BccAddresses:
+ type: array
+ items:
+ type: string
+ format: email
+ additionalProperties: false
+ SesSentEmail:
+ additionalProperties: false
+ properties:
+ Body:
+ additionalProperties: false
+ properties:
+ html_part:
+ type: string
+ text_part:
+ type: string
+ required:
+ - text_part
+ type: object
+ Destination:
+ $ref: '#/components/schemas/SESDestination'
+ Id:
+ type: string
+ RawData:
+ type: string
+ Region:
+ type: string
+ Source:
+ type: string
+ Subject:
+ type: string
+ Template:
+ type: string
+ TemplateData:
+ type: string
+ Timestamp:
+ type: string
+ required:
+ - Id
+ - Region
+ - Timestamp
+ - Source
+ type: object
+ SessionInfo:
+ additionalProperties: false
+ properties:
+ edition:
+ type: string
+ is_docker:
+ type: boolean
+ is_license_activated:
+ type: boolean
+ machine_id:
+ type: string
+ server_time_utc:
+ type: string
+ session_id:
+ type: string
+ system:
+ type: string
+ uptime:
+ type: integer
+ version:
+ type: string
+ required:
+ - version
+ - edition
+ - is_license_activated
+ - session_id
+ - machine_id
+ - system
+ - is_docker
+ - server_time_utc
+ - uptime
+ type: object
+ SnsSubscriptionTokenError:
+ additionalProperties: false
+ properties:
+ error:
+ type: string
+ subscription_arn:
+ type: string
+ required:
+ - error
+ - subscription_arn
+ type: object
+ SNSPlatformEndpointMessage:
+ type: object
+ description: Message sent to a platform endpoint via SNS
+ additionalProperties: false
+ properties:
+ TargetArn:
+ type: string
+ TopicArn:
+ type: string
+ Message:
+ type: string
+ MessageAttributes:
+ type: object
+ MessageStructure:
+ type: string
+ Subject:
+ type: [string, 'null']
+ MessageId:
+ type: string
+ SNSMessage:
+ type: object
+ description: Message sent via SNS
+ properties:
+ PhoneNumber:
+ type: string
+ TopicArn:
+ type: [string, 'null']
+ SubscriptionArn:
+ type: [string, 'null']
+ MessageId:
+ type: string
+ Message:
+ type: string
+ MessageAttributes:
+ type: object
+ MessageStructure:
+ type: [string, 'null']
+ Subject:
+ type: [string, 'null']
+ SNSPlatformEndpointMessages:
+ type: object
+ description: |
+ Messages sent to the platform endpoint retrieved via the retrospective endpoint.
+ The endpoint ARN is the key with a list of messages as value.
+ additionalProperties:
+ type: array
+ items:
+ $ref: '#/components/schemas/SNSPlatformEndpointMessage'
+ SMSMessages:
+ type: object
+ description: |
+ SMS messages retrieved via the retrospective endpoint.
+ The phone number is the key with a list of messages as value.
+ additionalProperties:
+ type: array
+ items:
+ $ref: '#/components/schemas/SNSMessage'
+ SNSPlatformEndpointResponse:
+ type: object
+ additionalProperties: false
+ description: Response payload for the /_aws/sns/platform-endpoint-messages endpoint
+ properties:
+ region:
+ type: string
+ description: "The AWS region, e.g., us-east-1"
+ platform_endpoint_messages:
+ $ref: '#/components/schemas/SNSPlatformEndpointMessages'
+ required:
+ - region
+ - platform_endpoint_messages
+ SNSSMSMessagesResponse:
+ type: object
+ additionalProperties: false
+ description: Response payload for the /_aws/sns/sms-messages endpoint
+ properties:
+ region:
+ type: string
+ description: "The AWS region, e.g., us-east-1"
+ sms_messages:
+ $ref: '#/components/schemas/SMSMessages'
+ required:
+ - region
+ - sms_messages
+ ReceiveMessageRequest:
+ type: object
+ description: https://github.com/boto/botocore/blob/develop/botocore/data/sqs/2012-11-05/service-2.json
+ required:
+ - QueueUrl
+ properties:
+ QueueUrl:
+ type: string
+ format: uri
+ AttributeNames:
+ type: array
+ items:
+ type: string
+ MessageSystemAttributeNames:
+ type: array
+ items:
+ type: string
+ MessageAttributeNames:
+ type: array
+ items:
+ type: string
+ MaxNumberOfMessages:
+ type: integer
+ VisibilityTimeout:
+ type: integer
+ WaitTimeSeconds:
+ type: integer
+ ReceiveRequestAttemptId:
+ type: string
+ ReceiveMessageResult:
+ type: object
+ description: https://github.com/boto/botocore/blob/develop/botocore/data/sqs/2012-11-05/service-2.json
+ properties:
+ Messages:
+ type: array
+ items:
+ $ref: '#/components/schemas/Message'
+ Message:
+ type: object
+ properties:
+ MessageId:
+ type: [string, 'null']
+ ReceiptHandle:
+ type: [string, 'null']
+ MD5OfBody:
+ type: [string, 'null']
+ Body:
+ type: [string, 'null']
+ Attributes:
+ type: object
+ MessageAttributes:
+ type: object
+ CloudWatchMetrics:
+ additionalProperties: false
+ properties:
+ metrics:
+ items:
+ additionalProperties: false
+ properties:
+ account:
+ description: Account ID
+ type: string
+ d:
+ description: Dimensions
+ items:
+ additionalProperties: false
+ properties:
+ n:
+ description: Dimension name
+ type: string
+ v:
+ description: Dimension value
+ oneOf:
+ - type: string
+ - type: integer
+ required:
+ - n
+ - v
+ type: object
+ type: array
+ n:
+ description: Metric name
+ type: string
+ ns:
+ description: Namespace
+ type: string
+ region:
+ description: Region name
+ type: string
+ t:
+ description: Timestamp
+ oneOf:
+ - type: string
+ format: date-time
+ - type: number
+ v:
+ description: Metric value
+ oneOf:
+ - type: string
+ - type: integer
+ required:
+ - ns
+ - n
+ - v
+ - t
+ - d
+ - account
+ - region
+ type: object
+ type: array
+ required:
+ - metrics
+ type: object
+paths:
+ /_aws/cloudwatch/metrics/raw:
+ get:
+ description: Retrieve CloudWatch metrics
+ operationId: get_cloudwatch_metrics
+ tags: [aws]
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/CloudWatchMetrics'
+ description: CloudWatch metrics
+ /_aws/dynamodb/expired:
+ delete:
+ description: Delete expired items from TTL-enabled DynamoDB tables
+ operationId: delete_ddb_expired_items
+ tags: [aws]
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ additionalProperties: false
+ properties:
+ ExpiredItems:
+ description: Number of expired items that were deleted
+ type: integer
+ required:
+ - ExpiredItems
+ type: object
+ description: Operation was successful
+ /_aws/events/rules/{rule_arn}/trigger:
+ get:
+ description: Trigger a scheduled EventBridge rule
+ operationId: trigger_event_bridge_rule
+ tags: [aws]
+ parameters:
+ - description: EventBridge rule ARN
+ in: path
+ name: rule_arn
+ required: true
+ schema:
+ type: string
+ responses:
+ '200':
+ description: EventBridge rule was triggered
+ '404':
+ description: Not found
+ /_aws/lambda/init:
+ get:
+ description: Retrieve Lambda runtime init binary
+ operationId: get_lambda_init
+ tags: [aws]
+ responses:
+ '200':
+ content:
+ application/octet-stream: {}
+ description: Lambda runtime init binary
+ /_aws/lambda/runtimes:
+ get:
+ description: List available Lambda runtimes
+ operationId: get_lambda_runtimes
+ tags: [aws]
+ parameters:
+ - in: query
+ name: filter
+ required: false
+ schema:
+ default: supported
+ enum:
+ - all
+ - deprecated
+ - supported
+ type: string
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ additionalProperties: false
+ properties:
+ Runtimes:
+ items:
+ type: string
+ type: array
+ required:
+ - Runtimes
+ type: object
+ description: Available Lambda runtimes
+ /_aws/ses:
+ delete:
+ description: Discard sent SES messages
+ operationId: discard_ses_messages
+ tags: [aws]
+ parameters:
+ - $ref: '#/components/parameters/SesIdFilter'
+ responses:
+ '204':
+ description: Message was successfully discarded
+ get:
+ description: Retrieve sent SES messages
+ operationId: get_ses_messages
+ tags: [aws]
+ parameters:
+ - $ref: '#/components/parameters/SesIdFilter'
+ - $ref: '#/components/parameters/SesEmailFilter'
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ additionalProperties: false
+ properties:
+ messages:
+ items:
+ $ref: '#/components/schemas/SesSentEmail'
+ type: array
+ required:
+ - messages
+ type: object
+ description: List of sent messages
+ /_aws/sns/platform-endpoint-messages:
+ delete:
+ description: Discard the messages published to a platform endpoint via SNS
+ operationId: discard_sns_endpoint_messages
+ tags: [aws]
+ parameters:
+ - $ref: '#/components/parameters/SnsAccountId'
+ - $ref: '#/components/parameters/SnsRegion'
+ - $ref: '#/components/parameters/SnsEndpointArn'
+ responses:
+ '204':
+ description: Platform endpoint message was discarded
+ get:
+ description: Retrieve the messages sent to a platform endpoint via SNS
+ operationId: get_sns_endpoint_messages
+ tags: [aws]
+ parameters:
+ - $ref: '#/components/parameters/SnsAccountId'
+ - $ref: '#/components/parameters/SnsRegion'
+ - $ref: '#/components/parameters/SnsEndpointArn'
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/SNSPlatformEndpointResponse"
+ description: SNS messages via retrospective access
+ /_aws/sns/sms-messages:
+ delete:
+ description: Discard SNS SMS messages
+ operationId: discard_sns_sms_messages
+ tags: [aws]
+ parameters:
+ - $ref: '#/components/parameters/SnsAccountId'
+ - $ref: '#/components/parameters/SnsRegion'
+ - $ref: '#/components/parameters/SnsPhoneNumber'
+ responses:
+ '204':
+ description: SMS message was discarded
+ get:
+ description: Retrieve SNS SMS messages
+ operationId: get_sns_sms_messages
+ tags: [aws]
+ parameters:
+ - $ref: '#/components/parameters/SnsAccountId'
+ - $ref: '#/components/parameters/SnsRegion'
+ - $ref: '#/components/parameters/SnsPhoneNumber'
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/SNSSMSMessagesResponse"
+ description: SNS messages via retrospective access
+ /_aws/sns/subscription-tokens/{subscription_arn}:
+ get:
+ description: Retrieve SNS subscription token for confirmation
+ operationId: get_sns_subscription_token
+ tags: [aws]
+ parameters:
+ - description: '`subscriptionArn` resource of subscription token'
+ in: path
+ name: subscription_arn
+ required: true
+ schema:
+ type: string
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ additionalProperties: false
+ properties:
+ subscription_arn:
+ type: string
+ subscription_token:
+ type: string
+ required:
+ - subscription_token
+ - subscription_arn
+ type: object
+ description: Subscription token
+ '400':
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SnsSubscriptionTokenError'
+ description: Bad request
+ '404':
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SnsSubscriptionTokenError'
+ description: Not found
+ /_aws/sqs/messages:
+ get:
+ description: List SQS queue messages without side effects
+ operationId: list_all_sqs_messages
+ tags: [aws]
+ parameters:
+ - description: SQS queue URL
+ in: query
+ name: QueueUrl
+ required: false
+ schema:
+ type: string
+ responses:
+ '200':
+ content:
+ text/xml:
+ schema:
+ $ref: '#/components/schemas/ReceiveMessageResult'
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ReceiveMessageResult'
+ description: SQS queue messages
+ '400':
+ content:
+ text/xml: {}
+ application/json: {}
+ description: Bad request
+ '404':
+ content:
+ text/xml: {}
+ application/json: {}
+ description: Not found
+ post:
+ summary: Retrieves one or more messages from the specified queue.
+ description: |
+ This API receives messages from an SQS queue.
+ https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html#API_ReceiveMessage_ResponseSyntax
+ operationId: receive_message
+ requestBody:
+ required: true
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/ReceiveMessageRequest'
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ReceiveMessageRequest'
+ responses:
+ '200':
+ content:
+ text/xml: {}
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ReceiveMessageResult'
+ description: SQS queue messages
+ '400':
+ content:
+ text/xml: {}
+ application/json: {}
+ description: Bad request
+ '404':
+ content:
+ text/xml: {}
+ application/json: {}
+ description: Not found
+ /_aws/sqs/messages/{region}/{account_id}/{queue_name}:
+ get:
+ description: List SQS messages without side effects
+ operationId: list_sqs_messages
+ tags: [aws]
+ parameters:
+ - description: SQS queue region
+ in: path
+ name: region
+ required: true
+ schema:
+ type: string
+ - description: SQS queue account ID
+ in: path
+ name: account_id
+ required: true
+ schema:
+ type: string
+ - description: SQS queue name
+ in: path
+ name: queue_name
+ required: true
+ schema:
+ type: string
+ responses:
+ '200':
+ content:
+ text/xml: {}
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ReceiveMessageResult'
+ description: SQS queue messages
+ '400':
+ content:
+ text/xml: {}
+ application/json: {}
+ description: Bad request
+ '404':
+ content:
+ text/xml: {}
+ application/json: {}
+ description: Not found
+ /_localstack/config:
+ get:
+ description: Get current LocalStack configuration
+ operationId: get_config
+ tags: [localstack]
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ type: object
+ description: Current LocalStack configuration
+ post:
+ description: Configuration option to update with new value
+ operationId: update_config_option
+ tags: [localstack]
+ requestBody:
+ content:
+ application/json:
+ schema:
+ additionalProperties: false
+ properties:
+ value:
+ type:
+ - number
+ - string
+ variable:
+ pattern: ^[_a-zA-Z0-9]+$
+ type: string
+ required:
+ - variable
+ - value
+ type: object
+ required: true
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ additionalProperties: false
+ properties:
+ value:
+ type:
+ - number
+ - string
+ variable:
+ type: string
+ required:
+ - variable
+ - value
+ type: object
+ description: Configuration option is updated
+ '400':
+ content:
+ application/json: {}
+ description: Bad request
+ /_localstack/diagnose:
+ get:
+ description: Get diagnostics report
+ operationId: get_diagnostics
+ tags: [localstack]
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ additionalProperties: false
+ properties:
+ config:
+ type: object
+ docker-dependent-image-hosts:
+ type: object
+ docker-inspect:
+ type: object
+ file-tree:
+ type: object
+ important-endpoints:
+ type: object
+ info:
+ $ref: '#/components/schemas/SessionInfo'
+ logs:
+ additionalProperties: false
+ properties:
+ docker:
+ type: string
+ required:
+ - docker
+ type: object
+ services:
+ type: object
+ usage:
+ type: object
+ version:
+ additionalProperties: false
+ properties:
+ host:
+ additionalProperties: false
+ properties:
+ kernel:
+ type: string
+ required:
+ - kernel
+ type: object
+ image-version:
+ additionalProperties: false
+ properties:
+ created:
+ type: string
+ id:
+ type: string
+ sha256:
+ type: string
+ tag:
+ type: string
+ required:
+ - id
+ - sha256
+ - tag
+ - created
+ type: object
+ localstack-version:
+ additionalProperties: false
+ properties:
+ build-date:
+ type:
+ - string
+ - 'null'
+ build-git-hash:
+ type:
+ - string
+ - 'null'
+ build-version:
+ type:
+ - string
+ - 'null'
+ required:
+ - build-date
+ - build-git-hash
+ - build-version
+ type: object
+ required:
+ - image-version
+ - localstack-version
+ - host
+ type: object
+ required:
+ - version
+ - info
+ - services
+ - config
+ - docker-inspect
+ - docker-dependent-image-hosts
+ - file-tree
+ - important-endpoints
+ - logs
+ - usage
+ type: object
+ description: Diagnostics report
+ /_localstack/health:
+ get:
+ description: Get available LocalStack features and AWS services
+ operationId: get_features_and_services
+ tags: [localstack]
+ parameters:
+ - allowEmptyValue: true
+ in: query
+ name: reload
+ required: false
+ schema:
+ type: string
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ additionalProperties: false
+ properties:
+ edition:
+ enum:
+ - community
+ - pro
+ - enterprise
+ - unknown
+ type: string
+ features:
+ type: object
+ services:
+ type: object
+ version:
+ type: string
+ required:
+ - edition
+ - services
+ - version
+ type: object
+ description: Available LocalStack features and AWS services
+ head:
+ tags: [localstack]
+ operationId: health
+ responses:
+ '200':
+ content:
+ text/plain: {}
+ description: ''
+ post:
+ description: Restart or terminate LocalStack session
+ operationId: manage_session
+ tags: [localstack]
+ requestBody:
+ content:
+ application/json:
+ schema:
+ additionalProperties: false
+ properties:
+ action:
+ enum:
+ - restart
+ - kill
+ type: string
+ required:
+ - action
+ type: object
+ description: Action to perform
+ required: true
+ responses:
+ '200':
+ content:
+ text/plain: {}
+ description: Action was successful
+ '400':
+ content:
+ text/plain: {}
+ description: Bad request
+ put:
+ description: Store arbitrary data to in-memory state
+ operationId: store_data
+ tags: [localstack]
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ description: Data to save
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ additionalProperties: false
+ properties:
+ status:
+ type: string
+ required:
+ - status
+ type: object
+ description: Data was saved
+ /_localstack/info:
+ get:
+ description: Get information about the current LocalStack session
+ operationId: get_session_info
+ tags: [localstack]
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SessionInfo'
+ description: Information about the current LocalStack session
+ /_localstack/init:
+ get:
+ description: Get information about init scripts
+ operationId: get_init_script_info
+ tags: [localstack]
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/InitScripts'
+ description: Information about init scripts
+ /_localstack/init/{stage}:
+ get:
+ description: Get information about init scripts in a specific stage
+ operationId: get_init_script_info_stage
+ tags: [localstack]
+ parameters:
+ - in: path
+ name: stage
+ required: true
+ schema:
+ type: string
+ responses:
+ '200':
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/InitScriptsStage'
+ description: Information about init scripts in a specific stage
+ /_localstack/plugins:
+ get:
+ description: ''
+ operationId: get_plugins
+ tags: [localstack]
+ responses:
+ '200':
+ content:
+ application/json: {}
+ description: ''
+ /_localstack/usage:
+ get:
+ description: ''
+ operationId: get_usage
+ tags: [localstack]
+ responses:
+ '200':
+ content:
+ application/json: {}
+ description: ''
diff --git a/localstack-core/localstack/packages/__init__.py b/localstack-core/localstack/packages/__init__.py
new file mode 100644
index 0000000000000..f4f7585cfbe95
--- /dev/null
+++ b/localstack-core/localstack/packages/__init__.py
@@ -0,0 +1,25 @@
+from .api import (
+ InstallTarget,
+ NoSuchVersionException,
+ Package,
+ PackageException,
+ PackageInstaller,
+ PackagesPlugin,
+ package,
+ packages,
+)
+from .core import DownloadInstaller, GitHubReleaseInstaller, SystemNotSupportedException
+
+__all__ = [
+ "Package",
+ "PackageInstaller",
+ "GitHubReleaseInstaller",
+ "DownloadInstaller",
+ "InstallTarget",
+ "PackageException",
+ "NoSuchVersionException",
+ "SystemNotSupportedException",
+ "PackagesPlugin",
+ "package",
+ "packages",
+]
diff --git a/localstack-core/localstack/packages/api.py b/localstack-core/localstack/packages/api.py
new file mode 100644
index 0000000000000..bcc8add9577c5
--- /dev/null
+++ b/localstack-core/localstack/packages/api.py
@@ -0,0 +1,415 @@
+import abc
+import functools
+import logging
+import os
+from collections import defaultdict
+from enum import Enum
+from inspect import getmodule
+from threading import RLock
+from typing import Any, Callable, Generic, List, Optional, ParamSpec, TypeVar
+
+from plux import Plugin, PluginManager, PluginSpec # type: ignore
+
+from localstack import config
+
+LOG = logging.getLogger(__name__)
+
+
+class PackageException(Exception):
+ """Basic exception indicating that a package-specific exception occurred."""
+
+ pass
+
+
+class NoSuchVersionException(PackageException):
+ """Exception indicating that a requested installer version is not available / supported."""
+
+ def __init__(self, package: str | None = None, version: str | None = None):
+ message = "Unable to find requested version"
+ if package and version:
+ message += f"Unable to find requested version '{version}' for package '{package}'"
+ super().__init__(message)
+
+
+class InstallTarget(Enum):
+ """
+ Different installation targets.
+ Attention:
+ - These targets are directly used in the LPM API and are therefore part of a public API!
+ - The order of the entries in the enum define the default lookup order when looking for package installations.
+
+ These targets refer to the directories in config#Directories.
+ - VAR_LIBS: Used for packages installed at runtime. They are installed in a host-mounted volume.
+ This directory / these installations persist across multiple containers.
+ - STATIC_LIBS: Used for packages installed at build time. They are installed in a non-host-mounted volume.
+ This directory is re-created whenever a container is recreated.
+ """
+
+ VAR_LIBS = config.dirs.var_libs
+ STATIC_LIBS = config.dirs.static_libs
+
+
+class PackageInstaller(abc.ABC):
+ """
+ Base class for a specific installer.
+ An instance of an installer manages the installation of a specific Package (in a specific version, if there are
+ multiple versions).
+ """
+
+ def __init__(self, name: str, version: str, install_lock: Optional[RLock] = None):
+ """
+ :param name: technical package name, f.e. "opensearch"
+ :param version: version of the package to install
+ :param install_lock: custom lock which should be used for this package installer instance for the
+ complete #install call. Defaults to a per-instance reentrant lock (RLock).
+ Package instances create one installer per version. Therefore, by default, the lock
+ ensures that package installations of the same package and version are mutually exclusive.
+ """
+ self.name = name
+ self.version = version
+ self.install_lock = install_lock or RLock()
+ self._setup_for_target: dict[InstallTarget, bool] = defaultdict(lambda: False)
+
+ def install(self, target: Optional[InstallTarget] = None) -> None:
+ """
+ Performs the package installation.
+
+ :param target: preferred installation target. Default is VAR_LIBS.
+ :return: None
+ :raises PackageException: if the installation fails
+ """
+ try:
+ if not target:
+ target = InstallTarget.VAR_LIBS
+ # We have to acquire the lock before checking if the package is installed, as the is_installed check
+ # is _only_ reliable if no other thread is currently actually installing
+ with self.install_lock:
+ # Skip the installation if it's already installed
+ if not self.is_installed():
+ LOG.debug("Starting installation of %s %s...", self.name, self.version)
+ self._prepare_installation(target)
+ self._install(target)
+ self._post_process(target)
+ LOG.debug("Installation of %s %s finished.", self.name, self.version)
+ else:
+ LOG.debug(
+ "Installation of %s %s skipped (already installed).",
+ self.name,
+ self.version,
+ )
+ if not self._setup_for_target[target]:
+ LOG.debug("Performing runtime setup for already installed package.")
+ self._setup_existing_installation(target)
+ except PackageException as e:
+ raise e
+ except Exception as e:
+ raise PackageException(f"Installation of {self.name} {self.version} failed.") from e
+
+ def is_installed(self) -> bool:
+ """
+ Checks if the package is already installed.
+
+ :return: True if the package is already installed (i.e. an installation is not necessary).
+ """
+ return self.get_installed_dir() is not None
+
+ def get_installed_dir(self) -> str | None:
+ """
+ Returns the directory of an existing installation. The directory can differ based on the installation target
+ and version.
+ :return: str representation of the installation directory path or None if the package is not installed anywhere
+ """
+ for target in InstallTarget:
+ directory = self._get_install_dir(target)
+ if directory and os.path.exists(self._get_install_marker_path(directory)):
+ return directory
+ return None
+
+ def _get_install_dir(self, target: InstallTarget) -> str:
+ """
+ Builds the installation directory for a specific target.
+ :param target: to create the installation directory path for
+ :return: str representation of the installation directory for the given target
+ """
+ return os.path.join(target.value, self.name, self.version)
+
+ def _get_install_marker_path(self, install_dir: str) -> str:
+ """
+ Builds the path for a specific "marker" whose presence indicates that the package has been installed
+ successfully in the given directory.
+
+ :param install_dir: base path for the check (f.e. /var/lib/localstack/lib/dynamodblocal/latest/)
+ :return: path which should be checked to indicate if the package has been installed successfully
+ (f.e. /var/lib/localstack/lib/dynamodblocal/latest/DynamoDBLocal.jar)
+ """
+ raise NotImplementedError()
+
+ def _setup_existing_installation(self, target: InstallTarget) -> None:
+ """
+ Internal function to perform the setup for an existing installation, f.e. adding a path to an environment.
+ This is only necessary for certain installers (like the PythonPackageInstaller).
+ This function will _always_ be executed _exactly_ once within a Python session for a specific installer
+ instance and target, if #install is called for the respective target.
+ :param target: of the installation
+ :return: None
+ """
+ pass
+
+ def _prepare_installation(self, target: InstallTarget) -> None:
+ """
+ Internal function to prepare an installation, f.e. by downloading some data or installing an OS package repo.
+ Can be implemented by specific installers.
+ :param target: of the installation
+ :return: None
+ """
+ pass
+
+ def _install(self, target: InstallTarget) -> None:
+ """
+ Internal function to perform the actual installation.
+ Must be implemented by specific installers.
+ :param target: of the installation
+ :return: None
+ """
+ raise NotImplementedError()
+
+ def _post_process(self, target: InstallTarget) -> None:
+ """
+ Internal function to perform some post-processing, f.e. patching an installation or creating symlinks.
+ :param target: of the installation
+ :return: None
+ """
+ pass
+
+
+# With Python 3.13 we should be able to set PackageInstaller as the default
+# https://typing.python.org/en/latest/spec/generics.html#type-parameter-defaults
+T = TypeVar("T", bound=PackageInstaller)
+
+
+class Package(abc.ABC, Generic[T]):
+ """
+ A Package defines a specific kind of software, mostly used as backends or supporting system for service
+ implementations.
+ """
+
+ def __init__(self, name: str, default_version: str):
+ """
+ :param name: Human readable name of the package, f.e. "PostgreSQL"
+ :param default_version: Default version of the package which is used for installations if no version is defined
+ """
+ self.name = name
+ self.default_version = default_version
+
+ def get_installed_dir(self, version: str | None = None) -> str | None:
+ """
+ Finds a directory where the package (in the specific version) is installed.
+ :param version: of the package to look for. If None, the default version of the package is used.
+ :return: str representation of the path to the existing installation directory or None if the package in this
+ version is not yet installed.
+ """
+ return self.get_installer(version).get_installed_dir()
+
+ def install(self, version: str | None = None, target: Optional[InstallTarget] = None) -> None:
+ """
+ Installs the package in the given version in the preferred target location.
+ :param version: version of the package to install. If None, the default version of the package will be used.
+ :param target: preferred installation target. If None, the var_libs directory is used.
+ :raises NoSuchVersionException: If the given version is not supported.
+ """
+ self.get_installer(version).install(target)
+
+ @functools.lru_cache()
+ def get_installer(self, version: str | None = None) -> T:
+ """
+ Returns the installer instance for a specific version of the package.
+
+ It is important that this be LRU cached. Installers have a mutex lock to prevent races, and it is necessary
+ that this method returns the same installer instance for a given version.
+
+ :param version: version of the package to install. If None, the default version of the package will be used.
+ :return: PackageInstaller instance for the given version.
+ :raises NoSuchVersionException: If the given version is not supported.
+ """
+ if not version:
+ return self.get_installer(self.default_version)
+ if version not in self.get_versions():
+ raise NoSuchVersionException(package=self.name, version=version)
+ return self._get_installer(version)
+
+ def get_versions(self) -> List[str]:
+ """
+ :return: List of all versions available for this package.
+ """
+ raise NotImplementedError()
+
+ def _get_installer(self, version: str) -> T:
+ """
+ Internal lookup function which needs to be implemented by specific packages.
+ It creates PackageInstaller instances for the specific version.
+
+ :param version: to find the installer for
+ :return: PackageInstaller instance responsible for installing the given version of the package.
+ """
+ raise NotImplementedError()
+
+ def __str__(self) -> str:
+ return self.name
+
+
+class MultiPackageInstaller(PackageInstaller):
+ """
+ PackageInstaller implementation which composes of multiple package installers.
+ """
+
+ def __init__(self, name: str, version: str, package_installer: List[PackageInstaller]):
+ """
+ :param name: of the (multi-)package installer
+ :param version: of this (multi-)package installer
+ :param package_installer: List of installers this multi-package installer consists of
+ """
+ super().__init__(name=name, version=version)
+
+ assert isinstance(package_installer, list)
+ assert len(package_installer) > 0
+ self.package_installer = package_installer
+
+ def install(self, target: Optional[InstallTarget] = None) -> None:
+ """
+ Installs the different packages this installer is composed of.
+
+ :param target: which defines where to install the packages.
+ :return: None
+ """
+ for package_installer in self.package_installer:
+ package_installer.install(target=target)
+
+ def get_installed_dir(self) -> str | None:
+ # By default, use the installed-dir of the first package
+ return self.package_installer[0].get_installed_dir()
+
+ def _install(self, target: InstallTarget) -> None:
+ # This package installer actually only calls other installers, we pass here
+ pass
+
+ def _get_install_dir(self, target: InstallTarget) -> str:
+ # By default, use the install-dir of the first package
+ return self.package_installer[0]._get_install_dir(target)
+
+ def _get_install_marker_path(self, install_dir: str) -> str:
+ # By default, use the install-marker-path of the first package
+ return self.package_installer[0]._get_install_marker_path(install_dir)
+
+
+PLUGIN_NAMESPACE = "localstack.packages"
+
+
+class PackagesPlugin(Plugin): # type: ignore[misc]
+ """
+ Plugin implementation for Package plugins.
+ A package plugin exposes a specific package instance.
+ """
+
+ api: str
+ name: str
+
+ def __init__(
+ self,
+ name: str,
+ scope: str,
+ get_package: Callable[[], Package[PackageInstaller] | List[Package[PackageInstaller]]],
+ should_load: Callable[[], bool] | None = None,
+ ) -> None:
+ super().__init__()
+ self.name = name
+ self.scope = scope
+ self._get_package = get_package
+ self._should_load = should_load
+
+ def should_load(self) -> bool:
+ if self._should_load:
+ return self._should_load()
+ return True
+
+ def get_package(self) -> Package[PackageInstaller]:
+ """
+ :return: returns the package instance of this package plugin
+ """
+ return self._get_package() # type: ignore[return-value]
+
+
+class NoSuchPackageException(PackageException):
+ """Exception raised by the PackagesPluginManager to indicate that a package / version is not available."""
+
+ pass
+
+
+class PackagesPluginManager(PluginManager[PackagesPlugin]): # type: ignore[misc]
+ """PluginManager which simplifies the loading / access of PackagesPlugins and their exposed package instances."""
+
+ def __init__(self) -> None:
+ super().__init__(PLUGIN_NAMESPACE)
+
+ def get_all_packages(self) -> list[tuple[str, str, Package[PackageInstaller]]]:
+ return sorted(
+ [(plugin.name, plugin.scope, plugin.get_package()) for plugin in self.load_all()]
+ )
+
+ def get_packages(
+ self, package_names: list[str], version: Optional[str] = None
+ ) -> list[Package[PackageInstaller]]:
+ # Plugin names are unique, but there could be multiple packages with the same name in different scopes
+ plugin_specs_per_name = defaultdict(list)
+ # Plugin names have the format "/", build a dict of specs per package name for the lookup
+ for plugin_spec in self.list_plugin_specs():
+ (package_name, _, _) = plugin_spec.name.rpartition("/")
+ plugin_specs_per_name[package_name].append(plugin_spec)
+
+ package_instances: list[Package[PackageInstaller]] = []
+ for package_name in package_names:
+ plugin_specs = plugin_specs_per_name.get(package_name)
+ if not plugin_specs:
+ raise NoSuchPackageException(
+ f"unable to locate installer for package {package_name}"
+ )
+ for plugin_spec in plugin_specs:
+ package_instance = self.load(plugin_spec.name).get_package()
+ package_instances.append(package_instance)
+ if version and version not in package_instance.get_versions():
+ raise NoSuchPackageException(
+ f"unable to locate installer for package {package_name} and version {version}"
+ )
+
+ return package_instances
+
+
+P = ParamSpec("P")
+T2 = TypeVar("T2")
+
+
+def package(
+ name: str | None = None,
+ scope: str = "community",
+ should_load: Optional[Callable[[], bool]] = None,
+) -> Callable[[Callable[[], Package[Any] | list[Package[Any]]]], PluginSpec]:
+ """
+ Decorator for marking methods that create Package instances as a PackagePlugin.
+ Methods marked with this decorator are discoverable as a PluginSpec within the namespace "localstack.packages",
+ with the name ":". If api is not explicitly specified, then the parent module name is used as
+ service name.
+ """
+
+ def wrapper(fn: Callable[[], Package[Any] | list[Package[Any]]]) -> PluginSpec:
+ _name = name or getmodule(fn).__name__.split(".")[-2] # type: ignore[union-attr]
+
+ @functools.wraps(fn)
+ def factory() -> PackagesPlugin:
+ return PackagesPlugin(name=_name, scope=scope, get_package=fn, should_load=should_load)
+
+ return PluginSpec(PLUGIN_NAMESPACE, f"{_name}/{scope}", factory=factory)
+
+ return wrapper
+
+
+# TODO remove (only used for migrating to new #package decorator)
+packages = package
diff --git a/localstack-core/localstack/packages/core.py b/localstack-core/localstack/packages/core.py
new file mode 100644
index 0000000000000..fde294492cc3a
--- /dev/null
+++ b/localstack-core/localstack/packages/core.py
@@ -0,0 +1,416 @@
+import logging
+import os
+import re
+from abc import ABC
+from functools import lru_cache
+from sys import version_info
+from typing import Any, Optional, Tuple
+
+import requests
+
+from localstack import config
+
+from ..constants import LOCALSTACK_VENV_FOLDER, MAVEN_REPO_URL
+from ..utils.archives import download_and_extract
+from ..utils.files import chmod_r, chown_r, mkdir, rm_rf
+from ..utils.http import download
+from ..utils.run import is_root, run
+from ..utils.venv import VirtualEnvironment
+from .api import InstallTarget, PackageException, PackageInstaller
+
+LOG = logging.getLogger(__name__)
+
+
+class SystemNotSupportedException(PackageException):
+ """Exception indicating that the current system is not allowed."""
+
+ pass
+
+
+class ExecutableInstaller(PackageInstaller, ABC):
+ """
+ This installer simply adds a clean interface for accessing a downloaded executable directly
+ """
+
+ def get_executable_path(self) -> str | None:
+ """
+ :return: the path to the downloaded binary or None if it's not yet downloaded / installed.
+ """
+ install_dir = self.get_installed_dir()
+ if install_dir:
+ return self._get_install_marker_path(install_dir)
+ return None
+
+
+class DownloadInstaller(ExecutableInstaller):
+ def __init__(self, name: str, version: str):
+ super().__init__(name, version)
+
+ def _get_download_url(self) -> str:
+ raise NotImplementedError()
+
+ def _get_install_marker_path(self, install_dir: str) -> str:
+ url = self._get_download_url()
+ binary_name = os.path.basename(url)
+ return os.path.join(install_dir, binary_name)
+
+ def _install(self, target: InstallTarget) -> None:
+ target_directory = self._get_install_dir(target)
+ mkdir(target_directory)
+ download_url = self._get_download_url()
+ target_path = self._get_install_marker_path(target_directory)
+ download(download_url, target_path)
+
+
+class ArchiveDownloadAndExtractInstaller(ExecutableInstaller):
+ def __init__(
+ self,
+ name: str,
+ version: str,
+ extract_single_directory: bool = False,
+ ):
+ """
+ :param name: technical package name, f.e. "opensearch"
+ :param version: version of the package to install
+ :param extract_single_directory: whether to extract files from single root folder in the archive
+ """
+ super().__init__(name, version)
+ self.extract_single_directory = extract_single_directory
+
+ def _get_install_marker_path(self, install_dir: str) -> str:
+ raise NotImplementedError()
+
+ def _get_download_url(self) -> str:
+ raise NotImplementedError()
+
+ def _get_checksum_url(self) -> str | None:
+ """
+ Checksum URL for the archive. This is used to verify the integrity of the downloaded archive.
+ This method can be implemented by subclasses to provide the correct URL for the checksum file.
+ If not implemented, checksum verification will be skipped.
+
+ :return: URL to the checksum file for the archive, or None if not available.
+ """
+ return None
+
+ def get_installed_dir(self) -> str | None:
+ installed_dir = super().get_installed_dir()
+ subdir = self._get_archive_subdir()
+
+ # If the specific installer defines a subdirectory, we return the subdirectory.
+ # f.e. /var/lib/localstack/lib/amazon-mq/5.16.5/apache-activemq-5.16.5/
+ if installed_dir and subdir:
+ return os.path.join(installed_dir, subdir)
+
+ return installed_dir
+
+ def _get_archive_subdir(self) -> str | None:
+ """
+ :return: name of the subdirectory contained in the archive or none if the package content is at the root level
+ of the archive
+ """
+ return None
+
+ def get_executable_path(self) -> str | None:
+ subdir = self._get_archive_subdir()
+ if subdir is None:
+ return super().get_executable_path()
+ else:
+ install_dir = self.get_installed_dir()
+ if install_dir:
+ install_dir = install_dir[: -len(subdir)]
+ return self._get_install_marker_path(install_dir)
+ return None
+
+ def _handle_single_directory_extraction(self, target_directory: str) -> None:
+ """
+ Handle extraction of archives that contain a single root directory.
+ Moves the contents up one level if extract_single_directory is True.
+
+ :param target_directory: The target extraction directory
+ :return: None
+ """
+ if not self.extract_single_directory:
+ return
+
+ dir_contents = os.listdir(target_directory)
+ if len(dir_contents) != 1:
+ return
+ target_subdir = os.path.join(target_directory, dir_contents[0])
+ if not os.path.isdir(target_subdir):
+ return
+ os.rename(target_subdir, f"{target_directory}.backup")
+ rm_rf(target_directory)
+ os.rename(f"{target_directory}.backup", target_directory)
+
+ def _download_archive(
+ self,
+ target: InstallTarget,
+ download_url: str,
+ ) -> None:
+ target_directory = self._get_install_dir(target)
+ mkdir(target_directory)
+ download_url = download_url or self._get_download_url()
+ archive_name = os.path.basename(download_url)
+ archive_path = os.path.join(config.dirs.tmp, archive_name)
+
+ # Get checksum info if available
+ checksum_url = self._get_checksum_url()
+
+ try:
+ download_and_extract(
+ download_url,
+ retries=3,
+ tmp_archive=archive_path,
+ target_dir=target_directory,
+ checksum_url=checksum_url,
+ )
+ self._handle_single_directory_extraction(target_directory)
+ finally:
+ rm_rf(archive_path)
+
+ def _install(self, target: InstallTarget) -> None:
+ self._download_archive(target, self._get_download_url())
+
+
+class PermissionDownloadInstaller(DownloadInstaller, ABC):
+ def _install(self, target: InstallTarget) -> None:
+ super()._install(target)
+ chmod_r(self.get_executable_path(), 0o777) # type: ignore[arg-type]
+
+
+class GitHubReleaseInstaller(PermissionDownloadInstaller):
+ """
+ Installer which downloads an asset from a GitHub project's tag.
+ """
+
+ def __init__(self, name: str, tag: str, github_slug: str):
+ super().__init__(name, tag)
+ self.github_tag_url = (
+ f"https://api.github.com/repos/{github_slug}/releases/tags/{self.version}"
+ )
+
+ @lru_cache()
+ def _get_download_url(self) -> str:
+ asset_name = self._get_github_asset_name()
+ # try to use a token when calling the GH API for increased API rate limits
+ headers = None
+ gh_token = os.environ.get("GITHUB_API_TOKEN")
+ if gh_token:
+ headers = {"authorization": f"Bearer {gh_token}"}
+ response = requests.get(self.github_tag_url, headers=headers)
+ if not response.ok:
+ raise PackageException(
+ f"Could not get list of releases from {self.github_tag_url}: {response.text}"
+ )
+ github_release = response.json()
+ download_url = None
+ for asset in github_release.get("assets", []):
+ # find the correct binary in the release
+ if asset["name"] == asset_name:
+ download_url = asset["browser_download_url"]
+ break
+ if download_url is None:
+ raise PackageException(
+ f"Could not find required binary {asset_name} in release {self.github_tag_url}"
+ )
+ return download_url
+
+ def _get_install_marker_path(self, install_dir: str) -> str:
+ # Use the GitHub asset name instead of the download URL (since the download URL needs to be fetched online).
+ return os.path.join(install_dir, self._get_github_asset_name())
+
+ def _get_github_asset_name(self) -> str:
+ """
+ Determines the name of the asset to download.
+ The asset name must be determinable without having any online data (because it is used in offline scenarios to
+ determine if the package is already installed).
+
+ :return: name of the asset to download from the GitHub project's tag / version
+ """
+ raise NotImplementedError()
+
+
+class NodePackageInstaller(ExecutableInstaller):
+ """Package installer for Node / NPM packages."""
+
+ def __init__(
+ self,
+ package_name: str,
+ version: str,
+ package_spec: Optional[str] = None,
+ main_module: str = "main.js",
+ ):
+ """
+ Initializes the Node / NPM package installer.
+ :param package_name: npm package name
+ :param version: version of the package which should be installed
+ :param package_spec: optional package spec for the installation.
+ If not set, the package name and version will be used for the installation.
+ :param main_module: main module file of the package
+ """
+ super().__init__(package_name, version)
+ self.package_name = package_name
+ # If the package spec is not explicitly set (f.e. to a repo), we build it and pin the version
+ self.package_spec = package_spec or f"{self.package_name}@{version}"
+ self.main_module = main_module
+
+ def _get_install_marker_path(self, install_dir: str) -> str:
+ return os.path.join(install_dir, "node_modules", self.package_name, self.main_module)
+
+ def _install(self, target: InstallTarget) -> None:
+ target_dir = self._get_install_dir(target)
+
+ run(
+ [
+ "npm",
+ "install",
+ "--prefix",
+ target_dir,
+ self.package_spec,
+ ]
+ )
+ # npm 9+ does _not_ set the ownership of files anymore if run as root
+ # - https://github.blog/changelog/2022-10-24-npm-v9-0-0-released/
+ # - https://github.com/npm/cli/pull/5704
+ # - https://github.com/localstack/localstack/issues/7620
+ if is_root():
+ # if the package was installed as root, set the ownership manually
+ LOG.debug("Setting ownership root:root on %s", target_dir)
+ chown_r(target_dir, "root")
+
+
+LOCALSTACK_VENV = VirtualEnvironment(LOCALSTACK_VENV_FOLDER)
+
+
+class PythonPackageInstaller(PackageInstaller):
+ """
+ Package installer which allows the runtime-installation of additional python packages used by certain services.
+ f.e. vosk as offline speech recognition toolkit (which is ~7MB in size compressed and ~26MB uncompressed).
+ """
+
+ normalized_name: str
+ """Normalized package name according to PEP440."""
+
+ def __init__(self, name: str, version: str, *args: Any, **kwargs: Any):
+ super().__init__(name, version, *args, **kwargs)
+ self.normalized_name = self._normalize_package_name(name)
+
+ def _normalize_package_name(self, name: str) -> str:
+ """
+ Normalized the Python package name according to PEP440.
+ https://packaging.python.org/en/latest/specifications/name-normalization/#name-normalization
+ """
+ return re.sub(r"[-_.]+", "-", name).lower()
+
+ def _get_install_dir(self, target: InstallTarget) -> str:
+ # all python installers share a venv
+ return os.path.join(target.value, "python-packages")
+
+ def _get_install_marker_path(self, install_dir: str) -> str:
+ python_subdir = f"python{version_info[0]}.{version_info[1]}"
+ dist_info_dir = f"{self.normalized_name}-{self.version}.dist-info"
+ # the METADATA file is mandatory, use it as install marker
+ return os.path.join(
+ install_dir, "lib", python_subdir, "site-packages", dist_info_dir, "METADATA"
+ )
+
+ def _get_venv(self, target: InstallTarget) -> VirtualEnvironment:
+ venv_dir = self._get_install_dir(target)
+ return VirtualEnvironment(venv_dir)
+
+ def _prepare_installation(self, target: InstallTarget) -> None:
+ # make sure the venv is properly set up before installing the package
+ venv = self._get_venv(target)
+ if not venv.exists:
+ LOG.info("creating virtual environment at %s", venv.venv_dir)
+ venv.create()
+ LOG.info("adding localstack venv path %s", venv.venv_dir)
+ venv.add_pth("localstack-venv", LOCALSTACK_VENV)
+ LOG.debug("injecting venv into path %s", venv.venv_dir)
+ venv.inject_to_sys_path()
+
+ def _install(self, target: InstallTarget) -> None:
+ venv = self._get_venv(target)
+ python_bin = os.path.join(venv.venv_dir, "bin/python")
+
+ # run pip via the python binary of the venv
+ run([python_bin, "-m", "pip", "install", f"{self.name}=={self.version}"], print_error=False)
+
+ def _setup_existing_installation(self, target: InstallTarget) -> None:
+ """If the venv is already present, it just needs to be initialized once."""
+ self._prepare_installation(target)
+
+
+class MavenDownloadInstaller(DownloadInstaller):
+ """The packageURL is easy copy/pastable from the Maven central repository and the first package URL
+ defines the package name and version.
+ Example package_url: pkg:maven/software.amazon.event.ruler/event-ruler@1.7.3
+ => name: event-ruler
+ => version: 1.7.3
+ """
+
+ # Example: software.amazon.event.ruler
+ group_id: str
+ # Example: event-ruler
+ artifact_id: str
+
+ # Custom installation directory
+ install_dir_suffix: str | None
+
+ def __init__(self, package_url: str, install_dir_suffix: str | None = None):
+ self.group_id, self.artifact_id, version = parse_maven_package_url(package_url)
+ super().__init__(self.artifact_id, version)
+ self.install_dir_suffix = install_dir_suffix
+
+ def _get_download_url(self) -> str:
+ group_id_path = self.group_id.replace(".", "/")
+ return f"{MAVEN_REPO_URL}/{group_id_path}/{self.artifact_id}/{self.version}/{self.artifact_id}-{self.version}.jar"
+
+ def _get_install_dir(self, target: InstallTarget) -> str:
+ """Allow to overwrite the default installation directory.
+ This enables downloading transitive dependencies into the same directory.
+ """
+ if self.install_dir_suffix:
+ return os.path.join(target.value, self.install_dir_suffix)
+ else:
+ return super()._get_install_dir(target)
+
+
+class MavenPackageInstaller(MavenDownloadInstaller):
+ """Package installer for downloading Maven JARs, including optional dependencies.
+ The first Maven package is used as main LPM package and other dependencies are installed additionally.
+ Follows the Maven naming conventions: https://maven.apache.org/guides/mini/guide-naming-conventions.html
+ """
+
+ # Installers for Maven dependencies
+ dependencies: list[MavenDownloadInstaller]
+
+ def __init__(self, *package_urls: str):
+ super().__init__(package_urls[0])
+ self.dependencies = []
+
+ # Create installers for dependencies
+ for package_url in package_urls[1:]:
+ install_dir_suffix = os.path.join(self.name, self.version)
+ self.dependencies.append(MavenDownloadInstaller(package_url, install_dir_suffix))
+
+ def _install(self, target: InstallTarget) -> None:
+ # Install all dependencies first
+ for dependency in self.dependencies:
+ dependency._install(target)
+ # Install the main Maven package once all dependencies are installed.
+ # This main package indicates whether all dependencies are installed.
+ super()._install(target)
+
+
+def parse_maven_package_url(package_url: str) -> Tuple[str, str, str]:
+ """Example: parse_maven_package_url("pkg:maven/software.amazon.event.ruler/event-ruler@1.7.3")
+ -> software.amazon.event.ruler, event-ruler, 1.7.3
+ """
+ parts = package_url.split("/")
+ group_id = parts[1]
+ sub_parts = parts[2].split("@")
+ artifact_id = sub_parts[0]
+ version = sub_parts[1]
+ return group_id, artifact_id, version
diff --git a/localstack-core/localstack/packages/debugpy.py b/localstack-core/localstack/packages/debugpy.py
new file mode 100644
index 0000000000000..2731236f747a1
--- /dev/null
+++ b/localstack-core/localstack/packages/debugpy.py
@@ -0,0 +1,42 @@
+from typing import List
+
+from localstack.packages import InstallTarget, Package, PackageInstaller
+from localstack.utils.run import run
+
+
+class DebugPyPackage(Package["DebugPyPackageInstaller"]):
+ def __init__(self) -> None:
+ super().__init__("DebugPy", "latest")
+
+ def get_versions(self) -> List[str]:
+ return ["latest"]
+
+ def _get_installer(self, version: str) -> "DebugPyPackageInstaller":
+ return DebugPyPackageInstaller("debugpy", version)
+
+
+class DebugPyPackageInstaller(PackageInstaller):
+ # TODO: migrate this to the upcoming pip installer
+
+ def is_installed(self) -> bool:
+ try:
+ import debugpy # type: ignore[import-not-found] # noqa: T100
+
+ assert debugpy
+ return True
+ except ModuleNotFoundError:
+ return False
+
+ def _get_install_marker_path(self, install_dir: str) -> str:
+ # TODO: This method currently does not provide the actual install_marker.
+ # Since we overwrote is_installed(), this installer does not install anything under
+ # var/static libs, and we also don't need an executable, we don't need it to operate the installer.
+ # fix with migration to pip installer
+ return install_dir
+
+ def _install(self, target: InstallTarget) -> None:
+ cmd = "pip install debugpy"
+ run(cmd)
+
+
+debugpy_package = DebugPyPackage()
diff --git a/localstack-core/localstack/packages/ffmpeg.py b/localstack-core/localstack/packages/ffmpeg.py
new file mode 100644
index 0000000000000..230d114347b68
--- /dev/null
+++ b/localstack-core/localstack/packages/ffmpeg.py
@@ -0,0 +1,51 @@
+import os
+from typing import List
+
+from localstack.packages import Package
+from localstack.packages.core import ArchiveDownloadAndExtractInstaller
+from localstack.utils.platform import Arch, get_arch
+
+# Mapping LocalStack architecture to BtbN's naming convention
+ARCH_MAPPING = {Arch.amd64: "linux64", Arch.arm64: "linuxarm64"}
+
+# Download URL template for ffmpeg 7.1 LGPL builds from BtbN GitHub Releases
+FFMPEG_BASE_URL = "https://github.com/BtbN/FFmpeg-Builds/releases/download/latest"
+FFMPEG_STATIC_BIN_URL = FFMPEG_BASE_URL + "/ffmpeg-n{version}-latest-{arch}-lgpl-{version}.tar.xz"
+FFMPEG_STATIC_CHECKSUM_URL = FFMPEG_BASE_URL + "/checksums.sha256"
+
+
+class FfmpegPackage(Package["FfmpegPackageInstaller"]):
+ def __init__(self) -> None:
+ super().__init__(name="ffmpeg", default_version="7.1")
+
+ def _get_installer(self, version: str) -> "FfmpegPackageInstaller":
+ return FfmpegPackageInstaller(version)
+
+ def get_versions(self) -> List[str]:
+ return ["7.1"]
+
+
+class FfmpegPackageInstaller(ArchiveDownloadAndExtractInstaller):
+ def __init__(self, version: str):
+ super().__init__("ffmpeg", version)
+
+ def _get_download_url(self) -> str:
+ return FFMPEG_STATIC_BIN_URL.format(arch=ARCH_MAPPING.get(get_arch()), version=self.version)
+
+ def _get_install_marker_path(self, install_dir: str) -> str:
+ return os.path.join(install_dir, self._get_archive_subdir())
+
+ def _get_archive_subdir(self) -> str:
+ return f"ffmpeg-n{self.version}-latest-{ARCH_MAPPING.get(get_arch())}-lgpl-{self.version}"
+
+ def get_ffmpeg_path(self) -> str:
+ return os.path.join(self.get_installed_dir(), "bin", "ffmpeg") # type: ignore[arg-type]
+
+ def get_ffprobe_path(self) -> str:
+ return os.path.join(self.get_installed_dir(), "bin", "ffprobe") # type: ignore[arg-type]
+
+ def _get_checksum_url(self) -> str | None:
+ return FFMPEG_STATIC_CHECKSUM_URL
+
+
+ffmpeg_package = FfmpegPackage()
diff --git a/localstack-core/localstack/packages/java.py b/localstack-core/localstack/packages/java.py
new file mode 100644
index 0000000000000..c8a2e9f7c7f21
--- /dev/null
+++ b/localstack-core/localstack/packages/java.py
@@ -0,0 +1,205 @@
+import logging
+import os
+from typing import List
+
+import requests
+
+from localstack.constants import USER_AGENT_STRING
+from localstack.packages import InstallTarget, Package
+from localstack.packages.core import ArchiveDownloadAndExtractInstaller
+from localstack.utils.files import rm_rf
+from localstack.utils.platform import Arch, get_arch, is_linux, is_mac_os
+from localstack.utils.run import run
+
+LOG = logging.getLogger(__name__)
+
+# Default version if not specified
+DEFAULT_JAVA_VERSION = "11"
+
+# Supported Java LTS versions mapped with Eclipse Temurin build semvers
+JAVA_VERSIONS = {
+ "8": "8u432-b06",
+ "11": "11.0.25+9",
+ "17": "17.0.13+11",
+ "21": "21.0.5+11",
+}
+
+
+class JavaInstallerMixin:
+ """
+ Mixin class for packages that depend on Java. It introduces methods that install Java and help build environment.
+ """
+
+ def _prepare_installation(self, target: InstallTarget) -> None:
+ java_package.install(target=target)
+
+ def get_java_home(self) -> str | None:
+ """
+ Returns path to JRE installation.
+ """
+ return java_package.get_installer().get_java_home()
+
+ def get_java_lib_path(self) -> str | None:
+ """
+ Returns the path to the Java shared library.
+ """
+ if java_home := self.get_java_home():
+ if is_mac_os():
+ return os.path.join(java_home, "lib", "jli", "libjli.dylib")
+ return os.path.join(java_home, "lib", "server", "libjvm.so")
+ return None
+
+ def get_java_env_vars(
+ self, path: str | None = None, ld_library_path: str | None = None
+ ) -> dict[str, str]:
+ """
+ Returns environment variables pointing to the Java installation. This is useful to build the environment where
+ the application will run.
+
+ :param path: If not specified, the value of PATH will be obtained from the environment
+ :param ld_library_path: If not specified, the value of LD_LIBRARY_PATH will be obtained from the environment
+ :return: dict consisting of two items:
+ - JAVA_HOME: path to JRE installation
+ - PATH: the env path variable updated with JRE bin path
+ """
+ java_home = self.get_java_home()
+ java_bin = f"{java_home}/bin"
+
+ path = path or os.environ["PATH"]
+
+ library_path = ld_library_path or os.environ.get("LD_LIBRARY_PATH")
+ # null paths (e.g. `:/foo`) have a special meaning according to the manpages
+ if library_path is None:
+ full_library_path = f"{java_home}/lib:{java_home}/lib/server"
+ else:
+ full_library_path = f"{java_home}/lib:{java_home}/lib/server:{library_path}"
+
+ return {
+ "JAVA_HOME": java_home, # type: ignore[dict-item]
+ "LD_LIBRARY_PATH": full_library_path,
+ "PATH": f"{java_bin}:{path}",
+ }
+
+
+class JavaPackageInstaller(ArchiveDownloadAndExtractInstaller):
+ def __init__(self, version: str):
+ super().__init__("java", version, extract_single_directory=True)
+
+ def _get_install_marker_path(self, install_dir: str) -> str:
+ if is_mac_os():
+ return os.path.join(install_dir, "Contents", "Home", "bin", "java")
+ return os.path.join(install_dir, "bin", "java")
+
+ def _get_download_url(self) -> str:
+ # Note: Eclipse Temurin does not provide Mac aarch64 Java 8 builds.
+ # See https://adoptium.net/en-GB/supported-platforms/
+ try:
+ LOG.debug("Determining the latest Java build version")
+ return self._download_url_latest_release()
+ except Exception as exc: # noqa
+ LOG.debug(
+ "Unable to determine the latest Java build version. Using pinned versions: %s", exc
+ )
+ return self._download_url_fallback()
+
+ def _post_process(self, target: InstallTarget) -> None:
+ target_directory = self._get_install_dir(target)
+ minimal_jre_path = os.path.join(target.value, self.name, f"{self.version}.minimal")
+ rm_rf(minimal_jre_path)
+
+ # If jlink is not available, use the environment as is
+ if not os.path.exists(os.path.join(target_directory, "bin", "jlink")):
+ LOG.warning("Skipping JRE optimisation because jlink is not available")
+ return
+
+ # Build a custom JRE with only the necessary bits to minimise disk footprint
+ LOG.debug("Optimising JRE installation")
+ cmd = (
+ "bin/jlink --add-modules "
+ # Required modules
+ "java.base,java.desktop,java.instrument,java.management,"
+ "java.naming,java.scripting,java.sql,java.xml,jdk.compiler,"
+ # jdk.unsupported contains sun.misc.Unsafe which is required by some dependencies
+ "jdk.unsupported,"
+ # Additional cipher suites
+ "jdk.crypto.cryptoki,"
+ # Archive support
+ "jdk.zipfs,"
+ # Required by MQ broker
+ "jdk.httpserver,jdk.management,jdk.management.agent,"
+ # Required by Spark and Hadoop
+ "java.security.jgss,jdk.security.auth,"
+ # Include required locales
+ "jdk.localedata --include-locales en "
+ # Supplementary args
+ "--compress 2 --strip-debug --no-header-files --no-man-pages "
+ # Output directory
+ "--output " + minimal_jre_path
+ )
+ run(cmd, cwd=target_directory)
+
+ rm_rf(target_directory)
+ os.rename(minimal_jre_path, target_directory)
+
+ def get_java_home(self) -> str | None:
+ """
+ Get JAVA_HOME for this installation of Java.
+ """
+ installed_dir = self.get_installed_dir()
+ if is_mac_os():
+ return os.path.join(installed_dir, "Contents", "Home") # type: ignore[arg-type]
+ return installed_dir
+
+ @property
+ def arch(self) -> str | None:
+ return (
+ "x64" if get_arch() == Arch.amd64 else "aarch64" if get_arch() == Arch.arm64 else None
+ )
+
+ @property
+ def os_name(self) -> str | None:
+ return "linux" if is_linux() else "mac" if is_mac_os() else None
+
+ def _download_url_latest_release(self) -> str:
+ """
+ Return the download URL for latest stable JDK build.
+ """
+ endpoint = (
+ f"https://api.adoptium.net/v3/assets/latest/{self.version}/hotspot?"
+ f"os={self.os_name}&architecture={self.arch}&image_type=jdk"
+ )
+ # Override user-agent because Adoptium API denies service to `requests` library
+ response = requests.get(endpoint, headers={"user-agent": USER_AGENT_STRING}).json()
+ return response[0]["binary"]["package"]["link"]
+
+ def _download_url_fallback(self) -> str:
+ """
+ Return the download URL for pinned JDK build.
+ """
+ semver = JAVA_VERSIONS[self.version]
+ tag_slug = f"jdk-{semver}"
+ semver_safe = semver.replace("+", "_")
+
+ # v8 uses a different tag and version scheme
+ if self.version == "8":
+ semver_safe = semver_safe.replace("-", "")
+ tag_slug = f"jdk{semver}"
+
+ return (
+ f"https://github.com/adoptium/temurin{self.version}-binaries/releases/download/{tag_slug}/"
+ f"OpenJDK{self.version}U-jdk_{self.arch}_{self.os_name}_hotspot_{semver_safe}.tar.gz"
+ )
+
+
+class JavaPackage(Package[JavaPackageInstaller]):
+ def __init__(self, default_version: str = DEFAULT_JAVA_VERSION):
+ super().__init__(name="Java", default_version=default_version)
+
+ def get_versions(self) -> List[str]:
+ return list(JAVA_VERSIONS.keys())
+
+ def _get_installer(self, version: str) -> JavaPackageInstaller:
+ return JavaPackageInstaller(version)
+
+
+java_package = JavaPackage()
diff --git a/localstack-core/localstack/packages/plugins.py b/localstack-core/localstack/packages/plugins.py
new file mode 100644
index 0000000000000..fdeba86a04204
--- /dev/null
+++ b/localstack-core/localstack/packages/plugins.py
@@ -0,0 +1,29 @@
+from typing import TYPE_CHECKING
+
+from localstack.packages.api import Package, package
+
+if TYPE_CHECKING:
+ from localstack.packages.ffmpeg import FfmpegPackageInstaller
+ from localstack.packages.java import JavaPackageInstaller
+ from localstack.packages.terraform import TerraformPackageInstaller
+
+
+@package(name="terraform")
+def terraform_package() -> Package["TerraformPackageInstaller"]:
+ from .terraform import terraform_package
+
+ return terraform_package
+
+
+@package(name="ffmpeg")
+def ffmpeg_package() -> Package["FfmpegPackageInstaller"]:
+ from localstack.packages.ffmpeg import ffmpeg_package
+
+ return ffmpeg_package
+
+
+@package(name="java")
+def java_package() -> Package["JavaPackageInstaller"]:
+ from localstack.packages.java import java_package
+
+ return java_package
diff --git a/localstack-core/localstack/packages/terraform.py b/localstack-core/localstack/packages/terraform.py
new file mode 100644
index 0000000000000..2a5da95b8472a
--- /dev/null
+++ b/localstack-core/localstack/packages/terraform.py
@@ -0,0 +1,47 @@
+import os
+import platform
+from typing import List
+
+from localstack.packages import InstallTarget, Package
+from localstack.packages.core import ArchiveDownloadAndExtractInstaller
+from localstack.utils.files import chmod_r
+from localstack.utils.platform import get_arch
+
+TERRAFORM_VERSION = os.getenv("TERRAFORM_VERSION", "1.5.7")
+TERRAFORM_URL_TEMPLATE = (
+ "https://releases.hashicorp.com/terraform/{version}/terraform_{version}_{os}_{arch}.zip"
+)
+TERRAFORM_CHECKSUM_URL_TEMPLATE = (
+ "https://releases.hashicorp.com/terraform/{version}/terraform_{version}_SHA256SUMS"
+)
+
+
+class TerraformPackage(Package["TerraformPackageInstaller"]):
+ def __init__(self) -> None:
+ super().__init__("Terraform", TERRAFORM_VERSION)
+
+ def get_versions(self) -> List[str]:
+ return [TERRAFORM_VERSION]
+
+ def _get_installer(self, version: str) -> "TerraformPackageInstaller":
+ return TerraformPackageInstaller("terraform", version)
+
+
+class TerraformPackageInstaller(ArchiveDownloadAndExtractInstaller):
+ def _get_install_marker_path(self, install_dir: str) -> str:
+ return os.path.join(install_dir, "terraform")
+
+ def _get_download_url(self) -> str:
+ system = platform.system().lower()
+ arch = get_arch()
+ return TERRAFORM_URL_TEMPLATE.format(version=TERRAFORM_VERSION, os=system, arch=arch)
+
+ def _install(self, target: InstallTarget) -> None:
+ super()._install(target)
+ chmod_r(self.get_executable_path(), 0o777) # type: ignore[arg-type]
+
+ def _get_checksum_url(self) -> str | None:
+ return TERRAFORM_CHECKSUM_URL_TEMPLATE.format(version=TERRAFORM_VERSION)
+
+
+terraform_package = TerraformPackage()
diff --git a/localstack-core/localstack/plugins.py b/localstack-core/localstack/plugins.py
new file mode 100644
index 0000000000000..a313032547bba
--- /dev/null
+++ b/localstack-core/localstack/plugins.py
@@ -0,0 +1,76 @@
+import logging
+import os
+import sys
+from pathlib import Path
+
+import yaml
+from plux import Plugin
+
+from localstack import config
+from localstack.runtime import hooks
+from localstack.utils.files import rm_rf
+from localstack.utils.ssl import get_cert_pem_file_path
+
+LOG = logging.getLogger(__name__)
+
+
+@hooks.on_infra_start()
+def deprecation_warnings() -> None:
+ LOG.debug("Checking for the usage of deprecated community features and configs...")
+ from localstack.deprecations import log_deprecation_warnings
+
+ log_deprecation_warnings()
+
+
+@hooks.on_infra_start(should_load=lambda: config.REMOVE_SSL_CERT)
+def delete_cached_certificate():
+ LOG.debug("Removing the cached local SSL certificate")
+ target_file = get_cert_pem_file_path()
+ rm_rf(target_file)
+
+
+class OASPlugin(Plugin):
+ """
+ This plugin allows to register an arbitrary number of OpenAPI specs, e.g., the spec for the public endpoints
+ of localstack.core.
+ The OpenAPIValidator handler uses (as opt-in) all the collected specs to validate the requests and the responses
+ to these public endpoints.
+
+ An OAS plugin assumes the following directory layout.
+
+ my_package
+ βββ sub_package
+ β βββ __init__.py <-- spec file
+ β βββ openapi.yaml
+ β βββ plugins.py <-- plugins
+ βββ plugins.py <-- plugins
+ βββ openapi.yaml <-- spec file
+
+ Each package can have its own OpenAPI yaml spec which is loaded by the correspondent plugin in plugins.py
+ You can simply create a plugin like the following:
+
+ class MyPackageOASPlugin(OASPlugin):
+ name = "my_package"
+
+ The only convention is that plugins.py and openapi.yaml have the same pathname.
+ """
+
+ namespace = "localstack.openapi.spec"
+
+ def __init__(self) -> None:
+ # By convention a plugins.py is at the same level (i.e., same pathname) of the openapi.yaml file.
+ # importlib.resources would be a better approach but has issues with namespace packages in editable mode
+ _module = sys.modules[self.__module__]
+ self.spec_path = Path(
+ os.path.join(os.path.dirname(os.path.abspath(_module.__file__)), "openapi.yaml")
+ )
+ assert self.spec_path.exists()
+ self.spec = {}
+
+ def load(self):
+ with self.spec_path.open("r") as f:
+ self.spec = yaml.safe_load(f)
+
+
+class CoreOASPlugin(OASPlugin):
+ name = "localstack"
diff --git a/localstack/services/sns/__init__.py b/localstack-core/localstack/py.typed
similarity index 100%
rename from localstack/services/sns/__init__.py
rename to localstack-core/localstack/py.typed
diff --git a/localstack-core/localstack/runtime/__init__.py b/localstack-core/localstack/runtime/__init__.py
new file mode 100644
index 0000000000000..99044a674080a
--- /dev/null
+++ b/localstack-core/localstack/runtime/__init__.py
@@ -0,0 +1,5 @@
+from .current import get_current_runtime
+
+__all__ = [
+ "get_current_runtime",
+]
diff --git a/localstack-core/localstack/runtime/analytics.py b/localstack-core/localstack/runtime/analytics.py
new file mode 100644
index 0000000000000..2612ee8637bf9
--- /dev/null
+++ b/localstack-core/localstack/runtime/analytics.py
@@ -0,0 +1,136 @@
+import logging
+import os
+
+from localstack import config
+from localstack.runtime import hooks
+from localstack.utils.analytics import log
+
+LOG = logging.getLogger(__name__)
+
+TRACKED_ENV_VAR = [
+ "ALLOW_NONSTANDARD_REGIONS",
+ "BEDROCK_PREWARM",
+ "CLOUDFRONT_LAMBDA_EDGE",
+ "CONTAINER_RUNTIME",
+ "DEBUG",
+ "DEFAULT_REGION", # Not functional; deprecated in 0.12.7, removed in 3.0.0
+ "DEFAULT_BEDROCK_MODEL",
+ "DISABLE_CORS_CHECK",
+ "DISABLE_CORS_HEADERS",
+ "DMS_SERVERLESS_DEPROVISIONING_DELAY",
+ "DMS_SERVERLESS_STATUS_CHANGE_WAITING_TIME",
+ "DNS_ADDRESS",
+ "DYNAMODB_ERROR_PROBABILITY",
+ "DYNAMODB_IN_MEMORY",
+ "DYNAMODB_REMOVE_EXPIRED_ITEMS",
+ "EAGER_SERVICE_LOADING",
+ "EC2_VM_MANAGER",
+ "ECS_TASK_EXECUTOR",
+ "EDGE_PORT",
+ "ENABLE_REPLICATOR",
+ "ENFORCE_IAM",
+ "ES_CUSTOM_BACKEND", # deprecated in 0.14.0, removed in 3.0.0
+ "ES_MULTI_CLUSTER", # deprecated in 0.14.0, removed in 3.0.0
+ "ES_ENDPOINT_STRATEGY", # deprecated in 0.14.0, removed in 3.0.0
+ "EVENT_RULE_ENGINE",
+ "IAM_SOFT_MODE",
+ "KINESIS_PROVIDER", # Not functional; deprecated in 2.0.0, removed in 3.0.0
+ "KINESIS_ERROR_PROBABILITY",
+ "KMS_PROVIDER", # defunct since 1.4.0
+ "LAMBDA_DEBUG_MODE",
+ "LAMBDA_DOWNLOAD_AWS_LAYERS",
+ "LAMBDA_EXECUTOR", # Not functional; deprecated in 2.0.0, removed in 3.0.0
+ "LAMBDA_STAY_OPEN_MODE", # Not functional; deprecated in 2.0.0, removed in 3.0.0
+ "LAMBDA_REMOTE_DOCKER", # Not functional; deprecated in 2.0.0, removed in 3.0.0
+ "LAMBDA_CODE_EXTRACT_TIME", # Not functional; deprecated in 2.0.0, removed in 3.0.0
+ "LAMBDA_CONTAINER_REGISTRY", # Not functional; deprecated in 2.0.0, removed in 3.0.0
+ "LAMBDA_FALLBACK_URL", # Not functional; deprecated in 2.0.0, removed in 3.0.0
+ "LAMBDA_FORWARD_URL", # Not functional; deprecated in 2.0.0, removed in 3.0.0
+ "LAMBDA_XRAY_INIT", # Not functional; deprecated in 2.0.0, removed in 3.0.0
+ "LAMBDA_PREBUILD_IMAGES",
+ "LAMBDA_RUNTIME_EXECUTOR",
+ "LAMBDA_RUNTIME_ENVIRONMENT_TIMEOUT",
+ "LEGACY_EDGE_PROXY", # Not functional; deprecated in 1.0.0, removed in 2.0.0
+ "LS_LOG",
+ "MOCK_UNIMPLEMENTED", # Not functional; deprecated in 1.3.0, removed in 3.0.0
+ "OPENSEARCH_ENDPOINT_STRATEGY",
+ "PERSISTENCE",
+ "PERSISTENCE_SINGLE_FILE",
+ "PERSIST_ALL", # defunct since 2.3.2
+ "PORT_WEB_UI",
+ "RDS_MYSQL_DOCKER",
+ "REQUIRE_PRO",
+ "SERVICES",
+ "STRICT_SERVICE_LOADING",
+ "SKIP_INFRA_DOWNLOADS",
+ "SQS_ENDPOINT_STRATEGY",
+ "USE_SINGLE_REGION", # Not functional; deprecated in 0.12.7, removed in 3.0.0
+ "USE_SSL",
+]
+
+PRESENCE_ENV_VAR = [
+ "DATA_DIR",
+ "EDGE_FORWARD_URL", # Not functional; deprecated in 1.4.0, removed in 3.0.0
+ "GATEWAY_LISTEN",
+ "HOSTNAME",
+ "HOSTNAME_EXTERNAL",
+ "HOSTNAME_FROM_LAMBDA",
+ "HOST_TMP_FOLDER", # Not functional; deprecated in 1.0.0, removed in 2.0.0
+ "INIT_SCRIPTS_PATH", # Not functional; deprecated in 1.1.0, removed in 2.0.0
+ "LAMBDA_DEBUG_MODE_CONFIG_PATH",
+ "LEGACY_DIRECTORIES", # Not functional; deprecated in 1.1.0, removed in 2.0.0
+ "LEGACY_INIT_DIR", # Not functional; deprecated in 1.1.0, removed in 2.0.0
+ "LOCALSTACK_HOST",
+ "LOCALSTACK_HOSTNAME",
+ "OUTBOUND_HTTP_PROXY",
+ "OUTBOUND_HTTPS_PROXY",
+ "S3_DIR",
+ "SFN_MOCK_CONFIG",
+ "TMPDIR",
+]
+
+
+@hooks.on_infra_start()
+def _publish_config_as_analytics_event():
+ env_vars = list(TRACKED_ENV_VAR)
+
+ for key, value in os.environ.items():
+ if key.startswith("PROVIDER_OVERRIDE_"):
+ env_vars.append(key)
+ elif key.startswith("SYNCHRONOUS_") and key.endswith("_EVENTS"):
+ # these config variables have been removed with 3.0.0
+ env_vars.append(key)
+
+ env_vars = {key: os.getenv(key) for key in env_vars}
+ present_env_vars = {env_var: 1 for env_var in PRESENCE_ENV_VAR if os.getenv(env_var)}
+
+ log.event("config", env_vars=env_vars, set_vars=present_env_vars)
+
+
+class LocalstackContainerInfo:
+ def get_image_variant(self) -> str:
+ for f in os.listdir("/usr/lib/localstack"):
+ if f.startswith(".") and f.endswith("-version"):
+ return f[1:-8]
+ return "unknown"
+
+ def has_docker_socket(self) -> bool:
+ return os.path.exists("/run/docker.sock")
+
+ def to_dict(self):
+ return {
+ "variant": self.get_image_variant(),
+ "has_docker_socket": self.has_docker_socket(),
+ }
+
+
+@hooks.on_infra_start()
+def _publish_container_info():
+ if not config.is_in_docker:
+ return
+
+ try:
+ log.event("container_info", payload=LocalstackContainerInfo().to_dict())
+ except Exception as e:
+ if config.DEBUG_ANALYTICS:
+ LOG.debug("error gathering container information: %s", e)
diff --git a/localstack-core/localstack/runtime/components.py b/localstack-core/localstack/runtime/components.py
new file mode 100644
index 0000000000000..db9662b2e030b
--- /dev/null
+++ b/localstack-core/localstack/runtime/components.py
@@ -0,0 +1,56 @@
+"""
+This package contains code to define and manage the core components that make up a ``LocalstackRuntime``.
+These include:
+ - A ``Gateway``
+ - A ``RuntimeServer`` as the main control loop
+ - A ``ServiceManager`` to manage service plugins (TODO: once the Service concept has been generalized)
+ - ... ?
+
+Components can then be accessed via ``get_current_runtime()``.
+"""
+
+from functools import cached_property
+
+from plux import Plugin, PluginManager
+from rolo.gateway import Gateway
+
+from .server.core import RuntimeServer, RuntimeServerPlugin
+
+
+class Components(Plugin):
+ """
+ A Plugin that allows a specific localstack runtime implementation (aws, snowflake, ...) to expose its
+ own component factory.
+ """
+
+ namespace = "localstack.runtime.components"
+
+ @cached_property
+ def gateway(self) -> Gateway:
+ raise NotImplementedError
+
+ @cached_property
+ def runtime_server(self) -> RuntimeServer:
+ raise NotImplementedError
+
+
+class BaseComponents(Components):
+ """
+ A component base, which includes a ``RuntimeServer`` created from the config variable, and a default
+ ServicePluginManager as ServiceManager.
+ """
+
+ @cached_property
+ def runtime_server(self) -> RuntimeServer:
+ from localstack import config
+
+ # TODO: rename to RUNTIME_SERVER
+ server_type = config.GATEWAY_SERVER
+
+ plugins = PluginManager(RuntimeServerPlugin.namespace)
+
+ if not plugins.exists(server_type):
+ raise ValueError(f"Unknown gateway server type {server_type}")
+
+ plugins.load(server_type)
+ return plugins.get_container(server_type).load_value
diff --git a/localstack-core/localstack/runtime/current.py b/localstack-core/localstack/runtime/current.py
new file mode 100644
index 0000000000000..fa033c58844fa
--- /dev/null
+++ b/localstack-core/localstack/runtime/current.py
@@ -0,0 +1,40 @@
+"""This package gives access to the singleton ``LocalstackRuntime`` instance. This is the only global state
+that should exist within localstack, which contains the singleton ``LocalstackRuntime`` which is currently
+running."""
+
+import threading
+import typing
+
+if typing.TYPE_CHECKING:
+ # make sure we don't have any imports here at runtime, so it can be imported anywhere without conflicts
+ from .runtime import LocalstackRuntime
+
+_runtime: typing.Optional["LocalstackRuntime"] = None
+"""The singleton LocalStack Runtime"""
+_runtime_lock = threading.RLock()
+
+
+def get_current_runtime() -> "LocalstackRuntime":
+ with _runtime_lock:
+ if not _runtime:
+ raise ValueError("LocalStack runtime has not yet been set")
+ return _runtime
+
+
+def set_current_runtime(runtime: "LocalstackRuntime"):
+ with _runtime_lock:
+ global _runtime
+ _runtime = runtime
+
+
+def initialize_runtime() -> "LocalstackRuntime":
+ from localstack.runtime import runtime
+
+ with _runtime_lock:
+ try:
+ return get_current_runtime()
+ except ValueError:
+ pass
+ rt = runtime.create_from_environment()
+ set_current_runtime(rt)
+ return rt
diff --git a/localstack-core/localstack/runtime/events.py b/localstack-core/localstack/runtime/events.py
new file mode 100644
index 0000000000000..2382fab6a47a2
--- /dev/null
+++ b/localstack-core/localstack/runtime/events.py
@@ -0,0 +1,7 @@
+import threading
+
+# TODO: deprecate and replace access with ``get_current_runtime().starting``, ...
+infra_starting = threading.Event()
+infra_ready = threading.Event()
+infra_stopping = threading.Event()
+infra_stopped = threading.Event()
diff --git a/localstack-core/localstack/runtime/exceptions.py b/localstack-core/localstack/runtime/exceptions.py
new file mode 100644
index 0000000000000..b4a4f72e65066
--- /dev/null
+++ b/localstack-core/localstack/runtime/exceptions.py
@@ -0,0 +1,9 @@
+class LocalstackExit(Exception):
+ """
+ This exception can be raised during the startup procedure to terminate localstack with an exit code and
+ a reason.
+ """
+
+ def __init__(self, reason: str = None, code: int = 0):
+ super().__init__(reason)
+ self.code = code
diff --git a/localstack-core/localstack/runtime/hooks.py b/localstack-core/localstack/runtime/hooks.py
new file mode 100644
index 0000000000000..05161679cf54e
--- /dev/null
+++ b/localstack-core/localstack/runtime/hooks.py
@@ -0,0 +1,104 @@
+import functools
+
+from plux import PluginManager, plugin
+
+# plugin namespace constants
+HOOKS_CONFIGURE_LOCALSTACK_CONTAINER = "localstack.hooks.configure_localstack_container"
+HOOKS_ON_RUNTIME_CREATE = "localstack.hooks.on_runtime_create"
+HOOKS_ON_INFRA_READY = "localstack.hooks.on_infra_ready"
+HOOKS_ON_INFRA_START = "localstack.hooks.on_infra_start"
+HOOKS_ON_PRO_INFRA_START = "localstack.hooks.on_pro_infra_start"
+HOOKS_ON_INFRA_SHUTDOWN = "localstack.hooks.on_infra_shutdown"
+HOOKS_PREPARE_HOST = "localstack.hooks.prepare_host"
+
+
+def hook(namespace: str, priority: int = 0, **kwargs):
+ """
+ Decorator for creating functional plugins that have a hook_priority attribute. Hooks with a higher priority value
+ will be executed earlier.
+ """
+
+ def wrapper(fn):
+ fn.hook_priority = priority
+ return plugin(namespace=namespace, **kwargs)(fn)
+
+ return wrapper
+
+
+def hook_spec(namespace: str):
+ """
+ Creates a new hook decorator bound to a namespace.
+
+ on_infra_start = hook_spec("localstack.hooks.on_infra_start")
+
+ @on_infra_start()
+ def foo():
+ pass
+
+ # run all hooks in order
+ on_infra_start.run()
+ """
+ fn = functools.partial(hook, namespace=namespace)
+ # attach hook manager and run method to decorator for convenience calls
+ fn.manager = HookManager(namespace)
+ fn.run = fn.manager.run_in_order
+ return fn
+
+
+class HookManager(PluginManager):
+ def load_all_sorted(self, propagate_exceptions=False):
+ """
+ Loads all hook plugins and sorts them by their hook_priority attribute.
+ """
+ plugins = self.load_all(propagate_exceptions)
+ # the hook_priority attribute is part of the function wrapped in the FunctionPlugin
+ plugins.sort(
+ key=lambda _fn_plugin: getattr(_fn_plugin.fn, "hook_priority", 0), reverse=True
+ )
+ return plugins
+
+ def run_in_order(self, *args, **kwargs):
+ """
+ Loads and runs all plugins in order them with the given arguments.
+ """
+ for fn_plugin in self.load_all_sorted():
+ fn_plugin(*args, **kwargs)
+
+ def __str__(self):
+ return "HookManager(%s)" % self.namespace
+
+ def __repr__(self):
+ return self.__str__()
+
+
+configure_localstack_container = hook_spec(HOOKS_CONFIGURE_LOCALSTACK_CONTAINER)
+"""Hooks to configure the LocalStack container before it starts. Executed on the host when invoking the CLI."""
+
+prepare_host = hook_spec(HOOKS_PREPARE_HOST)
+"""Hooks to prepare the host that's starting LocalStack. Executed on the host when invoking the CLI."""
+
+on_infra_start = hook_spec(HOOKS_ON_INFRA_START)
+"""Hooks that are executed right before starting the LocalStack infrastructure."""
+
+on_runtime_create = hook_spec(HOOKS_ON_RUNTIME_CREATE)
+"""Hooks that are executed right before the LocalstackRuntime is created. These can be used to apply
+patches or otherwise configure the interpreter before any other code is imported."""
+
+on_runtime_start = on_infra_start
+"""Alias for on_infra_start. TODO: switch and deprecated `infra` naming."""
+
+on_pro_infra_start = hook_spec(HOOKS_ON_PRO_INFRA_START)
+"""Hooks that are executed after on_infra_start hooks, and only if LocalStack pro has been activated."""
+
+on_infra_ready = hook_spec(HOOKS_ON_INFRA_READY)
+"""Hooks that are execute after all startup hooks have been executed, and the LocalStack infrastructure has become
+available."""
+
+on_runtime_ready = on_infra_ready
+"""Alias for on_infra_ready. TODO: switch and deprecated `infra` naming."""
+
+on_infra_shutdown = hook_spec(HOOKS_ON_INFRA_SHUTDOWN)
+"""Hooks that are execute when localstack shuts down."""
+
+on_runtime_shutdown = on_infra_shutdown
+"""Alias for on_infra_shutdown. TODO: switch and deprecated `infra` naming."""
diff --git a/localstack-core/localstack/runtime/init.py b/localstack-core/localstack/runtime/init.py
new file mode 100644
index 0000000000000..e9b2f97dccf9e
--- /dev/null
+++ b/localstack-core/localstack/runtime/init.py
@@ -0,0 +1,283 @@
+"""Module for initialization hooks https://docs.localstack.cloud/references/init-hooks/"""
+
+import dataclasses
+import logging
+import os.path
+import subprocess
+import time
+from enum import Enum
+from functools import cached_property
+from typing import Dict, List, Optional
+
+from plux import Plugin, PluginManager
+
+from localstack.runtime import hooks
+from localstack.utils.objects import singleton_factory
+
+LOG = logging.getLogger(__name__)
+
+
+class State(Enum):
+ UNKNOWN = "UNKNOWN"
+ RUNNING = "RUNNING"
+ SUCCESSFUL = "SUCCESSFUL"
+ ERROR = "ERROR"
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return self.name
+
+
+class Stage(Enum):
+ BOOT = 0
+ START = 1
+ READY = 2
+ SHUTDOWN = 3
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return self.name
+
+
+@dataclasses.dataclass
+class Script:
+ path: str
+ stage: Stage
+ state: State = State.UNKNOWN
+
+
+class ScriptRunner(Plugin):
+ """
+ Interface for running scripts.
+ """
+
+ namespace = "localstack.init.runner"
+ suffixes = []
+
+ def run(self, path: str) -> None:
+ """
+ Run the given script with the appropriate runtime.
+
+ :param path: the path to the script
+ """
+ raise NotImplementedError
+
+ def should_run(self, script_file: str) -> bool:
+ """
+ Checks whether the given file should be run with this script runner. In case multiple runners
+ evaluate this condition to true on the same file (ideally this doesn't happen), the first one
+ loaded will be used, which is potentially indeterministic.
+
+ :param script_file: the script file to run
+ :return: True if this runner should be used, False otherwise
+ """
+ for suffix in self.suffixes:
+ if script_file.endswith(suffix):
+ return True
+ return False
+
+
+class ShellScriptRunner(ScriptRunner):
+ """
+ Runner that interprets scripts as shell scripts and calls them directly.
+ """
+
+ name = "sh"
+ suffixes = [".sh"]
+
+ def run(self, path: str) -> None:
+ exit_code = subprocess.call(args=[], executable=path)
+ if exit_code != 0:
+ raise OSError("Script %s returned a non-zero exit code %s" % (path, exit_code))
+
+
+class PythonScriptRunner(ScriptRunner):
+ """
+ Runner that uses ``exec`` to run a python script.
+ """
+
+ name = "py"
+ suffixes = [".py"]
+
+ def run(self, path: str) -> None:
+ with open(path, "rb") as fd:
+ exec(fd.read(), {})
+
+
+class InitScriptManager:
+ _stage_directories: Dict[Stage, str] = {
+ Stage.BOOT: "boot.d",
+ Stage.START: "start.d",
+ Stage.READY: "ready.d",
+ Stage.SHUTDOWN: "shutdown.d",
+ }
+
+ script_root: str
+ stage_completed: Dict[Stage, bool]
+
+ def __init__(self, script_root: str):
+ self.script_root = script_root
+ self.stage_completed = dict.fromkeys(Stage, False)
+ self.runner_manager: PluginManager[ScriptRunner] = PluginManager(ScriptRunner.namespace)
+
+ @cached_property
+ def scripts(self) -> Dict[Stage, List[Script]]:
+ return self._find_scripts()
+
+ def get_script_runner(self, script_file: str) -> Optional[ScriptRunner]:
+ runners = self.runner_manager.load_all()
+ for runner in runners:
+ if runner.should_run(script_file):
+ return runner
+ return None
+
+ def has_script_runner(self, script_file: str) -> bool:
+ return self.get_script_runner(script_file) is not None
+
+ def run_stage(self, stage: Stage) -> List[Script]:
+ """
+ Runs all scripts in the given stage.
+
+ :param stage: the stage to run
+ :return: the scripts that were in the stage
+ """
+ scripts = self.scripts.get(stage, [])
+
+ if self.stage_completed[stage]:
+ LOG.debug("Stage %s already completed, skipping", stage)
+ return scripts
+
+ try:
+ for script in scripts:
+ LOG.debug("Running %s script %s", script.stage, script.path)
+
+ env_original = os.environ.copy()
+
+ try:
+ script.state = State.RUNNING
+ runner = self.get_script_runner(script.path)
+ runner.run(script.path)
+ except Exception as e:
+ script.state = State.ERROR
+ if LOG.isEnabledFor(logging.DEBUG):
+ LOG.exception("Error while running script %s", script)
+ else:
+ LOG.error("Error while running script %s: %s", script, e)
+ else:
+ script.state = State.SUCCESSFUL
+ finally:
+ # Discard env variables overridden in startup script that may cause side-effects
+ for env_var in (
+ "AWS_ACCESS_KEY_ID",
+ "AWS_SECRET_ACCESS_KEY",
+ "AWS_SESSION_TOKEN",
+ "AWS_DEFAULT_REGION",
+ "AWS_PROFILE",
+ "AWS_REGION",
+ ):
+ if env_var in env_original:
+ os.environ[env_var] = env_original[env_var]
+ else:
+ os.environ.pop(env_var, None)
+ finally:
+ self.stage_completed[stage] = True
+
+ return scripts
+
+ def _find_scripts(self) -> Dict[Stage, List[Script]]:
+ scripts = {}
+
+ if self.script_root is None:
+ LOG.debug("Unable to discover init scripts as script_root is None")
+ return {}
+
+ for stage in Stage:
+ scripts[stage] = []
+
+ stage_dir = self._stage_directories[stage]
+ if not stage_dir:
+ continue
+
+ stage_path = os.path.join(self.script_root, stage_dir)
+ if not os.path.isdir(stage_path):
+ continue
+
+ for root, dirs, files in os.walk(stage_path, topdown=True):
+ # from the docs: "When topdown is true, the caller can modify the dirnames list in-place"
+ dirs.sort()
+ files.sort()
+ for file in files:
+ script_path = os.path.abspath(os.path.join(root, file))
+ if not os.path.isfile(script_path):
+ continue
+
+ # only add the script if there's a runner for it
+ if not self.has_script_runner(script_path):
+ LOG.debug("No runner available for script %s", script_path)
+ continue
+
+ scripts[stage].append(Script(path=script_path, stage=stage))
+ LOG.debug("Init scripts discovered: %s", scripts)
+
+ return scripts
+
+
+# runtime integration
+
+
+@singleton_factory
+def init_script_manager() -> InitScriptManager:
+ from localstack import config
+
+ return InitScriptManager(script_root=config.dirs.init)
+
+
+@hooks.on_infra_start()
+def _run_init_scripts_on_start():
+ # this is a hack since we currently cannot know whether boot scripts have been executed or not
+ init_script_manager().stage_completed[Stage.BOOT] = True
+ _run_and_log(Stage.START)
+
+
+@hooks.on_infra_ready()
+def _run_init_scripts_on_ready():
+ _run_and_log(Stage.READY)
+
+
+@hooks.on_infra_shutdown()
+def _run_init_scripts_on_shutdown():
+ _run_and_log(Stage.SHUTDOWN)
+
+
+def _run_and_log(stage: Stage):
+ from localstack.utils.analytics import log
+
+ then = time.time()
+ scripts = init_script_manager().run_stage(stage)
+ took = (time.time() - then) * 1000
+
+ if scripts:
+ log.event("run_init", {"stage": stage.name, "scripts": len(scripts), "duration": took})
+
+
+def main():
+ """
+ Run the init scripts for a particular stage. For example, to run all boot scripts run::
+
+ python -m localstack.runtime.init BOOT
+
+ The __main__ entrypoint is currently mainly used for the docker-entrypoint.sh. Other stages
+ are executed from runtime hooks.
+ """
+ import sys
+
+ stage = Stage[sys.argv[1]]
+ init_script_manager().run_stage(stage)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/localstack-core/localstack/runtime/legacy.py b/localstack-core/localstack/runtime/legacy.py
new file mode 100644
index 0000000000000..2a2f54c562929
--- /dev/null
+++ b/localstack-core/localstack/runtime/legacy.py
@@ -0,0 +1,17 @@
+"""Adapter code for the legacy runtime to make sure the new runtime is compatible with the old one,
+and at the same time doesn't need ``localstack.services.infra``, which imports AWS-specific modules."""
+
+import logging
+import os
+import signal
+
+LOG = logging.getLogger(__name__)
+
+
+def signal_supervisor_restart():
+ # TODO: we should think about moving the localstack-supervisor into a script in the runtime,
+ # and make `signal_supervisor_restart` part of the supervisor code.
+ if pid := os.environ.get("SUPERVISOR_PID"):
+ os.kill(int(pid), signal.SIGUSR1)
+ else:
+ LOG.warning("could not signal supervisor to restart localstack")
diff --git a/localstack-core/localstack/runtime/main.py b/localstack-core/localstack/runtime/main.py
new file mode 100644
index 0000000000000..3a0357e230ad0
--- /dev/null
+++ b/localstack-core/localstack/runtime/main.py
@@ -0,0 +1,93 @@
+"""This is the entrypoint used to start the localstack runtime. It starts the infrastructure and also
+manages the interaction with the operating system - mostly signal handlers for now."""
+
+import signal
+import sys
+import traceback
+
+from localstack import config, constants
+from localstack.runtime.exceptions import LocalstackExit
+
+
+def print_runtime_information(in_docker: bool = False):
+ # FIXME: this is legacy code from the old CLI, reconcile with new CLI and runtime output
+ from localstack.utils.container_networking import get_main_container_name
+ from localstack.utils.container_utils.container_client import ContainerException
+ from localstack.utils.docker_utils import DOCKER_CLIENT
+
+ print()
+ print(f"LocalStack version: {constants.VERSION}")
+ if in_docker:
+ try:
+ container_name = get_main_container_name()
+ print("LocalStack Docker container name: %s" % container_name)
+ inspect_result = DOCKER_CLIENT.inspect_container(container_name)
+ container_id = inspect_result["Id"]
+ print("LocalStack Docker container id: %s" % container_id[:12])
+ image_details = DOCKER_CLIENT.inspect_image(inspect_result["Image"])
+ digests = image_details.get("RepoDigests") or ["Unavailable"]
+ print("LocalStack Docker image sha: %s" % digests[0])
+ except ContainerException:
+ print(
+ "LocalStack Docker container info: Failed to inspect the LocalStack docker container. "
+ "This is likely because the docker socket was not mounted into the container. "
+ "Without access to the docker socket, LocalStack will not function properly. Please "
+ "consult the LocalStack documentation on how to correctly start up LocalStack. ",
+ end="",
+ )
+ if config.DEBUG:
+ print("Docker debug information:")
+ traceback.print_exc()
+ else:
+ print(
+ "You can run LocalStack with `DEBUG=1` to get more information about the error."
+ )
+
+ if config.LOCALSTACK_BUILD_DATE:
+ print("LocalStack build date: %s" % config.LOCALSTACK_BUILD_DATE)
+
+ if config.LOCALSTACK_BUILD_GIT_HASH:
+ print("LocalStack build git hash: %s" % config.LOCALSTACK_BUILD_GIT_HASH)
+
+ print()
+
+
+def main():
+ from localstack.logging.setup import setup_logging_from_config
+ from localstack.runtime import current
+
+ try:
+ setup_logging_from_config()
+ runtime = current.initialize_runtime()
+ except Exception as e:
+ sys.stdout.write(f"ERROR: The LocalStack Runtime could not be initialized: {e}\n")
+ sys.stdout.flush()
+ raise
+
+ # TODO: where should this go?
+ print_runtime_information()
+
+ # signal handler to make sure SIGTERM properly shuts down localstack
+ def _terminate_localstack(sig: int, frame):
+ sys.stdout.write(f"Localstack runtime received signal {sig}\n")
+ sys.stdout.flush()
+ runtime.exit(0)
+
+ signal.signal(signal.SIGINT, _terminate_localstack)
+ signal.signal(signal.SIGTERM, _terminate_localstack)
+
+ try:
+ runtime.run()
+ except LocalstackExit as e:
+ sys.stdout.write(f"Localstack returning with exit code {e.code}. Reason: {e}")
+ sys.exit(e.code)
+ except Exception as e:
+ sys.stdout.write(f"ERROR: the LocalStack runtime exited unexpectedly: {e}\n")
+ sys.stdout.flush()
+ raise
+
+ sys.exit(runtime.exit_code)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/localstack-core/localstack/runtime/patches.py b/localstack-core/localstack/runtime/patches.py
new file mode 100644
index 0000000000000..4772a480bfee1
--- /dev/null
+++ b/localstack-core/localstack/runtime/patches.py
@@ -0,0 +1,70 @@
+"""
+System-wide patches that should be applied.
+"""
+
+from localstack.runtime import hooks
+from localstack.utils.patch import patch
+
+
+def patch_thread_pool():
+ """
+ This patch to ThreadPoolExecutor makes the executor remove the threads it creates from the global
+ ``_thread_queues`` of ``concurrent.futures.thread``, which joins all created threads at python exit and
+ will block interpreter shutdown if any threads are still running, even if they are daemon threads.
+ """
+
+ import concurrent.futures.thread
+
+ @patch(concurrent.futures.thread.ThreadPoolExecutor._adjust_thread_count)
+ def _adjust_thread_count(fn, self) -> None:
+ fn(self)
+
+ for t in self._threads:
+ if not t.daemon:
+ continue
+ try:
+ del concurrent.futures.thread._threads_queues[t]
+ except KeyError:
+ pass
+
+
+def patch_urllib3_connection_pool(**constructor_kwargs):
+ """
+ Override the default parameters of HTTPConnectionPool, e.g., set the pool size via maxsize=16
+ """
+ try:
+ from urllib3 import connectionpool, poolmanager
+
+ class MyHTTPSConnectionPool(connectionpool.HTTPSConnectionPool):
+ def __init__(self, *args, **kwargs):
+ kwargs.update(constructor_kwargs)
+ super(MyHTTPSConnectionPool, self).__init__(*args, **kwargs)
+
+ poolmanager.pool_classes_by_scheme["https"] = MyHTTPSConnectionPool
+
+ class MyHTTPConnectionPool(connectionpool.HTTPConnectionPool):
+ def __init__(self, *args, **kwargs):
+ kwargs.update(constructor_kwargs)
+ super(MyHTTPConnectionPool, self).__init__(*args, **kwargs)
+
+ poolmanager.pool_classes_by_scheme["http"] = MyHTTPConnectionPool
+ except Exception:
+ pass
+
+
+_applied = False
+
+
+@hooks.on_runtime_start(priority=100) # apply patches earlier than other hooks
+def apply_runtime_patches():
+ # FIXME: find a better way to apply system-wide patches
+ global _applied
+ if _applied:
+ return
+ _applied = True
+
+ from localstack.http.duplex_socket import enable_duplex_socket
+
+ patch_urllib3_connection_pool(maxsize=128)
+ patch_thread_pool()
+ enable_duplex_socket()
diff --git a/localstack-core/localstack/runtime/runtime.py b/localstack-core/localstack/runtime/runtime.py
new file mode 100644
index 0000000000000..1e5d4e6ab5b21
--- /dev/null
+++ b/localstack-core/localstack/runtime/runtime.py
@@ -0,0 +1,203 @@
+import logging
+import os
+import threading
+
+from plux import PluginManager
+
+from localstack import config, constants
+from localstack.runtime import events, hooks
+from localstack.utils import files, functions, net, sync, threads
+
+from .components import Components
+
+LOG = logging.getLogger(__name__)
+
+
+class LocalstackRuntime:
+ """
+ The localstack runtime. It has the following responsibilities:
+
+ - Manage localstack filesystem directories
+ - Execute runtime lifecycle hook plugins from ``localstack.runtime.hooks``.
+ - Manage the localstack SSL certificate
+ - Serve the gateway (It uses a ``RuntimeServer`` to serve a ``Gateway`` instance coming from the
+ ``Components`` factory.)
+ """
+
+ def __init__(self, components: Components):
+ self.components = components
+
+ # at some point, far far in the future, we should no longer access a global config object, but rather
+ # the one from the current runtime. This will allow us to truly instantiate multiple localstack
+ # runtime instances in one process, which can be useful for many different things. but there is too
+ # much global state at the moment think about this seriously. however, this assignment here can
+ # serve as a reminder to avoid global state in general.
+ self.config = config
+
+ # TODO: move away from `localstack.runtime.events` and instantiate new `threading.Event()` here
+ # instead
+ self.starting = events.infra_starting
+ self.ready = events.infra_ready
+ self.stopping = events.infra_stopping
+ self.stopped = events.infra_stopped
+ self.exit_code = 0
+ self._lifecycle_lock = threading.RLock()
+
+ def run(self):
+ """
+ Start the main control loop of the runtime and block the thread. This will initialize the
+ filesystem, run all lifecycle hooks, initialize the gateway server, and then serve the
+ ``RuntimeServer`` until ``shutdown()`` is called.
+ """
+ # indicates to the environment that this is an "infra process" (old terminology referring to the
+ # localstack runtime). this is necessary for disabling certain hooks that may run in the context of
+ # the CLI host mode. TODO: should not be needed over time.
+ os.environ[constants.LOCALSTACK_INFRA_PROCESS] = "1"
+
+ self._init_filesystem()
+ self._on_starting()
+ self._init_gateway_server()
+
+ # since we are blocking the main thread with the runtime server, we need to run the monitor that
+ # prints the ready marker asynchronously. this is different from how the runtime was started in the
+ # past, where the server was running in a thread.
+ # TODO: ideally we pass down a `shutdown` event that can be waited on so we can cancel the thread
+ # if the runtime shuts down beforehand
+ threading.Thread(target=self._run_ready_monitor, daemon=True).start()
+
+ # run the main control loop of the server and block execution
+ try:
+ self.components.runtime_server.run()
+ finally:
+ self._on_return()
+
+ def exit(self, code: int = 0):
+ """
+ Sets the exit code and runs ``shutdown``. It does not actually call ``sys.exit``, this is for the
+ caller to do.
+
+ :param code: the exit code to be set
+ """
+ self.exit_code = code
+ # we don't know yet why, but shutdown does not work on the main thread
+ threading.Thread(target=self.shutdown, name="Runtime-Shutdown").start()
+
+ def shutdown(self):
+ """
+ Initiates an orderly shutdown of the runtime by stopping the main control loop of the
+ ``RuntimeServer``. The shutdown hooks are actually called by the main control loop (in the main
+ thread) after it returns.
+ """
+ with self._lifecycle_lock:
+ if self.stopping.is_set():
+ return
+ self.stopping.set()
+
+ LOG.debug("[shutdown] Running shutdown hooks ...")
+ functions.call_safe(
+ hooks.on_runtime_shutdown.run,
+ exception_message="[shutdown] error calling shutdown hook",
+ )
+ LOG.debug("[shutdown] Shutting down runtime server ...")
+ self.components.runtime_server.shutdown()
+
+ def is_ready(self) -> bool:
+ return self.ready.is_set()
+
+ def _init_filesystem(self):
+ self._clear_tmp_directory()
+ self.config.dirs.mkdirs()
+
+ def _init_gateway_server(self):
+ from localstack.utils.ssl import create_ssl_cert, install_predefined_cert_if_available
+
+ install_predefined_cert_if_available()
+ serial_number = self.config.GATEWAY_LISTEN[0].port
+ _, cert_file_name, key_file_name = create_ssl_cert(serial_number=serial_number)
+ ssl_creds = (cert_file_name, key_file_name)
+
+ self.components.runtime_server.register(
+ self.components.gateway, self.config.GATEWAY_LISTEN, ssl_creds
+ )
+
+ def _on_starting(self):
+ self.starting.set()
+ hooks.on_runtime_start.run()
+
+ def _on_ready(self):
+ hooks.on_runtime_ready.run()
+ print(constants.READY_MARKER_OUTPUT, flush=True)
+ self.ready.set()
+
+ def _on_return(self):
+ LOG.debug("[shutdown] Cleaning up resources ...")
+ self._cleanup_resources()
+ self.stopped.set()
+ LOG.debug("[shutdown] Completed, bye!")
+
+ def _run_ready_monitor(self):
+ self._wait_for_gateway()
+ self._on_ready()
+
+ def _wait_for_gateway(self):
+ host_and_port = self.config.GATEWAY_LISTEN[0]
+
+ if not sync.poll_condition(
+ lambda: net.is_port_open(host_and_port.port), timeout=15, interval=0.3
+ ):
+ if LOG.isEnabledFor(logging.DEBUG):
+ # make another call with quiet=False to print detailed error logs
+ net.is_port_open(host_and_port.port, quiet=False)
+ raise TimeoutError(f"gave up waiting for gateway server to start on {host_and_port}")
+
+ def _clear_tmp_directory(self):
+ if self.config.CLEAR_TMP_FOLDER:
+ # try to clear temp dir on startup
+ try:
+ files.rm_rf(self.config.dirs.tmp)
+ except PermissionError as e:
+ LOG.error(
+ "unable to delete temp folder %s: %s, please delete manually or you will "
+ "keep seeing these errors.",
+ self.config.dirs.tmp,
+ e,
+ )
+
+ def _cleanup_resources(self):
+ threads.cleanup_threads_and_processes()
+ self._clear_tmp_directory()
+
+
+def create_from_environment() -> LocalstackRuntime:
+ """
+ Creates a new runtime instance from the current environment. It uses a plugin manager to resolve the
+ necessary components from the ``localstack.runtime.components`` plugin namespace to start the runtime.
+
+ :return: a new LocalstackRuntime instance
+ """
+ hooks.on_runtime_create.run()
+
+ plugin_manager = PluginManager(Components.namespace)
+ if config.RUNTIME_COMPONENTS:
+ try:
+ component = plugin_manager.load(config.RUNTIME_COMPONENTS)
+ return LocalstackRuntime(component)
+ except Exception as e:
+ raise ValueError(
+ f"Could not load runtime components from config RUNTIME_COMPONENTS={config.RUNTIME_COMPONENTS}: {e}."
+ ) from e
+ components = plugin_manager.load_all()
+
+ if not components:
+ raise ValueError(
+ f"No component plugins found in namespace {Components.namespace}. Are entry points created "
+ f"correctly?"
+ )
+
+ if len(components) > 1:
+ LOG.warning(
+ "There are more than one component plugins, using the first one which is %s",
+ components[0].name,
+ )
+
+ return LocalstackRuntime(components[0])
diff --git a/localstack-core/localstack/runtime/server/__init__.py b/localstack-core/localstack/runtime/server/__init__.py
new file mode 100644
index 0000000000000..808f22795246a
--- /dev/null
+++ b/localstack-core/localstack/runtime/server/__init__.py
@@ -0,0 +1,5 @@
+from localstack.runtime.server.core import RuntimeServer
+
+__all__ = [
+ "RuntimeServer",
+]
diff --git a/localstack-core/localstack/runtime/server/core.py b/localstack-core/localstack/runtime/server/core.py
new file mode 100644
index 0000000000000..137f276f3d496
--- /dev/null
+++ b/localstack-core/localstack/runtime/server/core.py
@@ -0,0 +1,51 @@
+from plux import Plugin
+from rolo.gateway import Gateway
+
+from localstack import config
+
+
+class RuntimeServer:
+ """
+ The main network IO loop of LocalStack. This could be twisted, hypercorn, or any other server
+ implementation.
+ """
+
+ def register(
+ self,
+ gateway: Gateway,
+ listen: list[config.HostAndPort],
+ ssl_creds: tuple[str, str] | None = None,
+ ):
+ """
+ Registers the Gateway and the port configuration into the server. Some servers like ``twisted`` or
+ ``hypercorn`` support multiple calls to ``register``, allowing you to serve several Gateways
+ through a single event loop.
+
+ :param gateway: the gateway to serve
+ :param listen: the host and port configuration
+ :param ssl_creds: ssl credentials (certificate file path, key file path)
+ """
+ raise NotImplementedError
+
+ def run(self):
+ """
+ Run the server and block the thread.
+ """
+ raise NotImplementedError
+
+ def shutdown(self):
+ """
+ Shutdown the running server.
+ """
+ raise NotImplementedError
+
+
+class RuntimeServerPlugin(Plugin):
+ """
+ Plugin that serves as a factory for specific ```RuntimeServer`` implementations.
+ """
+
+ namespace = "localstack.runtime.server"
+
+ def load(self, *args, **kwargs) -> RuntimeServer:
+ raise NotImplementedError
diff --git a/localstack-core/localstack/runtime/server/hypercorn.py b/localstack-core/localstack/runtime/server/hypercorn.py
new file mode 100644
index 0000000000000..ce15ea3d043e0
--- /dev/null
+++ b/localstack-core/localstack/runtime/server/hypercorn.py
@@ -0,0 +1,68 @@
+import asyncio
+import threading
+
+from hypercorn import Config
+from hypercorn.asyncio import serve
+from rolo.gateway import Gateway
+from rolo.gateway.asgi import AsgiGateway
+
+from localstack import config
+from localstack.logging.setup import setup_hypercorn_logger
+
+from .core import RuntimeServer
+
+
+class HypercornRuntimeServer(RuntimeServer):
+ def __init__(self):
+ self.loop = asyncio.get_event_loop()
+
+ self._close = asyncio.Event()
+ self._closed = threading.Event()
+
+ self._futures = []
+
+ def register(
+ self,
+ gateway: Gateway,
+ listen: list[config.HostAndPort],
+ ssl_creds: tuple[str, str] | None = None,
+ ):
+ hypercorn_config = Config()
+ hypercorn_config.h11_pass_raw_headers = True
+ hypercorn_config.bind = [str(host_and_port) for host_and_port in listen]
+ # hypercorn_config.use_reloader = use_reloader
+
+ setup_hypercorn_logger(hypercorn_config)
+
+ if ssl_creds:
+ cert_file_name, key_file_name = ssl_creds
+ hypercorn_config.certfile = cert_file_name
+ hypercorn_config.keyfile = key_file_name
+
+ app = AsgiGateway(gateway, event_loop=self.loop)
+
+ future = asyncio.run_coroutine_threadsafe(
+ serve(app, hypercorn_config, shutdown_trigger=self._shutdown_trigger),
+ self.loop,
+ )
+ self._futures.append(future)
+
+ def run(self):
+ self.loop.run_forever()
+
+ def shutdown(self):
+ self._close.set()
+ asyncio.run_coroutine_threadsafe(self._set_closed(), self.loop)
+ # TODO: correctly wait for all hypercorn serve coroutines to finish
+ asyncio.run_coroutine_threadsafe(self.loop.shutdown_asyncgens(), self.loop)
+ self.loop.shutdown_default_executor()
+ self.loop.stop()
+
+ async def _wait_server_stopped(self):
+ self._closed.set()
+
+ async def _set_closed(self):
+ self._close.set()
+
+ async def _shutdown_trigger(self):
+ await self._close.wait()
diff --git a/localstack-core/localstack/runtime/server/plugins.py b/localstack-core/localstack/runtime/server/plugins.py
new file mode 100644
index 0000000000000..95746e110375d
--- /dev/null
+++ b/localstack-core/localstack/runtime/server/plugins.py
@@ -0,0 +1,19 @@
+from localstack.runtime.server.core import RuntimeServer, RuntimeServerPlugin
+
+
+class TwistedRuntimeServerPlugin(RuntimeServerPlugin):
+ name = "twisted"
+
+ def load(self, *args, **kwargs) -> RuntimeServer:
+ from .twisted import TwistedRuntimeServer
+
+ return TwistedRuntimeServer()
+
+
+class HypercornRuntimeServerPlugin(RuntimeServerPlugin):
+ name = "hypercorn"
+
+ def load(self, *args, **kwargs) -> RuntimeServer:
+ from .hypercorn import HypercornRuntimeServer
+
+ return HypercornRuntimeServer()
diff --git a/localstack-core/localstack/runtime/server/twisted.py b/localstack-core/localstack/runtime/server/twisted.py
new file mode 100644
index 0000000000000..eba02ae16422c
--- /dev/null
+++ b/localstack-core/localstack/runtime/server/twisted.py
@@ -0,0 +1,57 @@
+from rolo.gateway import Gateway
+from rolo.serving.twisted import TwistedGateway
+from twisted.internet import endpoints, reactor, ssl
+
+from localstack import config
+from localstack.aws.serving.twisted import TLSMultiplexerFactory, stop_thread_pool
+from localstack.utils import patch
+
+from .core import RuntimeServer
+
+
+class TwistedRuntimeServer(RuntimeServer):
+ def __init__(self):
+ self.thread_pool = None
+
+ def register(
+ self,
+ gateway: Gateway,
+ listen: list[config.HostAndPort],
+ ssl_creds: tuple[str, str] | None = None,
+ ):
+ # setup twisted webserver Site
+ site = TwistedGateway(gateway)
+
+ # configure ssl
+ if ssl_creds:
+ cert_file_name, key_file_name = ssl_creds
+ context_factory = ssl.DefaultOpenSSLContextFactory(key_file_name, cert_file_name)
+ context_factory.getContext().use_certificate_chain_file(cert_file_name)
+ protocol_factory = TLSMultiplexerFactory(context_factory, False, site)
+ else:
+ protocol_factory = site
+
+ # add endpoint for each host/port combination
+ for host_and_port in listen:
+ if config.is_ipv6_address(host_and_port.host):
+ endpoint = endpoints.TCP6ServerEndpoint(
+ reactor, host_and_port.port, interface=host_and_port.host
+ )
+ else:
+ # TODO: interface = host?
+ endpoint = endpoints.TCP4ServerEndpoint(reactor, host_and_port.port)
+ endpoint.listen(protocol_factory)
+
+ def run(self):
+ reactor.suggestThreadPoolSize(config.GATEWAY_WORKER_COUNT)
+ self.thread_pool = reactor.getThreadPool()
+ patch.patch(self.thread_pool.stop)(stop_thread_pool)
+
+ # we don't need signal handlers, since all they do is call ``reactor`` stop, which we expect the
+ # caller to do via ``shutdown``.
+ return reactor.run(installSignalHandlers=False)
+
+ def shutdown(self):
+ if self.thread_pool:
+ self.thread_pool.stop(timeout=10)
+ reactor.stop()
diff --git a/localstack-core/localstack/runtime/shutdown.py b/localstack-core/localstack/runtime/shutdown.py
new file mode 100644
index 0000000000000..a64dab86ef930
--- /dev/null
+++ b/localstack-core/localstack/runtime/shutdown.py
@@ -0,0 +1,73 @@
+import logging
+from typing import Any, Callable
+
+from localstack.runtime import hooks
+from localstack.utils.functions import call_safe
+
+LOG = logging.getLogger(__name__)
+
+SERVICE_SHUTDOWN_PRIORITY = -10
+"""Shutdown hook priority for shutting down service plugins."""
+
+
+class ShutdownHandlers:
+ """
+ Register / unregister shutdown handlers. All registered shutdown handlers should execute as fast as possible.
+ Blocking shutdown handlers will block infra shutdown.
+ """
+
+ def __init__(self):
+ self._callbacks = []
+
+ def register(self, shutdown_handler: Callable[[], Any]) -> None:
+ """
+ Register shutdown handler. Handler should not block or take more than a couple seconds.
+
+ :param shutdown_handler: Callable without parameters
+ """
+ self._callbacks.append(shutdown_handler)
+
+ def unregister(self, shutdown_handler: Callable[[], Any]) -> None:
+ """
+ Unregister a handler. Idempotent operation.
+
+ :param shutdown_handler: Shutdown handler which was previously registered
+ """
+ try:
+ self._callbacks.remove(shutdown_handler)
+ except ValueError:
+ pass
+
+ def run(self) -> None:
+ """
+ Execute shutdown handlers in reverse order of registration.
+ Should only be called once, on shutdown.
+ """
+ for callback in reversed(list(self._callbacks)):
+ call_safe(callback)
+
+
+SHUTDOWN_HANDLERS = ShutdownHandlers()
+"""Shutdown handlers run with default priority in an on_infra_shutdown hook."""
+
+ON_AFTER_SERVICE_SHUTDOWN_HANDLERS = ShutdownHandlers()
+"""Shutdown handlers that are executed after all services have been shut down."""
+
+
+@hooks.on_infra_shutdown()
+def run_shutdown_handlers():
+ SHUTDOWN_HANDLERS.run()
+
+
+@hooks.on_infra_shutdown(priority=SERVICE_SHUTDOWN_PRIORITY)
+def shutdown_services():
+ # TODO: this belongs into the shutdown procedure of a `Platform` or `RuntimeContainer` class.
+ from localstack.services.plugins import SERVICE_PLUGINS
+
+ LOG.info("[shutdown] Stopping all services")
+ SERVICE_PLUGINS.stop_all_services()
+
+
+@hooks.on_infra_shutdown(priority=SERVICE_SHUTDOWN_PRIORITY - 10)
+def run_on_after_service_shutdown_handlers():
+ ON_AFTER_SERVICE_SHUTDOWN_HANDLERS.run()
diff --git a/localstack/services/sqs/__init__.py b/localstack-core/localstack/services/__init__.py
similarity index 100%
rename from localstack/services/sqs/__init__.py
rename to localstack-core/localstack/services/__init__.py
diff --git a/localstack/utils/__init__.py b/localstack-core/localstack/services/acm/__init__.py
similarity index 100%
rename from localstack/utils/__init__.py
rename to localstack-core/localstack/services/acm/__init__.py
diff --git a/localstack-core/localstack/services/acm/provider.py b/localstack-core/localstack/services/acm/provider.py
new file mode 100644
index 0000000000000..7425b88832e6b
--- /dev/null
+++ b/localstack-core/localstack/services/acm/provider.py
@@ -0,0 +1,136 @@
+from moto import settings as moto_settings
+from moto.acm import models as acm_models
+
+from localstack.aws.api import RequestContext, handler
+from localstack.aws.api.acm import (
+ AcmApi,
+ ListCertificatesRequest,
+ ListCertificatesResponse,
+ RequestCertificateRequest,
+ RequestCertificateResponse,
+)
+from localstack.services import moto
+from localstack.utils.patch import patch
+
+# reduce the validation wait time from 60 (default) to 10 seconds
+moto_settings.ACM_VALIDATION_WAIT = min(10, moto_settings.ACM_VALIDATION_WAIT)
+
+
+@patch(acm_models.CertBundle.describe)
+def describe(describe_orig, self):
+ # TODO fix! Terrible hack (for parity). Moto adds certain required fields only if status is PENDING_VALIDATION.
+ cert_status = self.status
+ self.status = "PENDING_VALIDATION"
+ try:
+ result = describe_orig(self)
+ finally:
+ self.status = cert_status
+
+ cert = result.get("Certificate", {})
+ cert["Status"] = cert_status
+ sans = cert.setdefault("SubjectAlternativeNames", [])
+ sans_summaries = cert.setdefault("SubjectAlternativeNameSummaries", sans)
+
+ # add missing attributes in ACM certs that cause Terraform to fail
+ addenda = {
+ "RenewalEligibility": "INELIGIBLE",
+ "KeyUsages": [{"Name": "DIGITAL_SIGNATURE"}, {"Name": "KEY_ENCIPHERMENT"}],
+ "ExtendedKeyUsages": [],
+ "Options": {"CertificateTransparencyLoggingPreference": "ENABLED"},
+ }
+ addenda["DomainValidationOptions"] = options = cert.get("DomainValidationOptions")
+ if not options:
+ options = addenda["DomainValidationOptions"] = [
+ {"ValidationMethod": cert.get("ValidationMethod")}
+ ]
+
+ for option in options:
+ option["DomainName"] = domain_name = option.get("DomainName") or cert.get("DomainName")
+ validation_domain = option.get("ValidationDomain") or f"test.{domain_name.lstrip('*.')}"
+ option["ValidationDomain"] = validation_domain
+ option["ValidationMethod"] = option.get("ValidationMethod") or "DNS"
+ status = option.get("ValidationStatus")
+ option["ValidationStatus"] = (
+ "SUCCESS" if (status is None or cert_status == "ISSUED") else status
+ )
+ if option["ValidationMethod"] == "EMAIL":
+ option["ValidationEmails"] = option.get("ValidationEmails") or [
+ f"admin@{self.common_name}"
+ ]
+ test_record = {
+ "Name": validation_domain,
+ "Type": "CNAME",
+ "Value": "test123",
+ }
+ option["ResourceRecord"] = option.get("ResourceRecord") or test_record
+ option["ResourceRecord"]["Name"] = option["ResourceRecord"]["Name"].replace(".*.", ".")
+
+ for key, value in addenda.items():
+ if not cert.get(key):
+ cert[key] = value
+ cert["Serial"] = str(cert.get("Serial") or "")
+
+ if cert.get("KeyAlgorithm") in ["RSA_1024", "RSA_2048"]:
+ cert["KeyAlgorithm"] = cert["KeyAlgorithm"].replace("RSA_", "RSA-")
+
+ # add subject alternative names
+ if cert["DomainName"] not in sans:
+ sans.append(cert["DomainName"])
+ if cert["DomainName"] not in sans_summaries:
+ sans_summaries.append(cert["DomainName"])
+
+ if "HasAdditionalSubjectAlternativeNames" not in cert:
+ cert["HasAdditionalSubjectAlternativeNames"] = False
+
+ if not cert.get("ExtendedKeyUsages"):
+ cert["ExtendedKeyUsages"] = [
+ {"Name": "TLS_WEB_SERVER_AUTHENTICATION", "OID": "1.3.6.1.0.1.2.3.0"},
+ {"Name": "TLS_WEB_CLIENT_AUTHENTICATION", "OID": "1.3.6.1.0.1.2.3.4"},
+ ]
+
+ # remove attributes prior to validation
+ if not cert.get("Status") == "ISSUED":
+ attrs = ["CertificateAuthorityArn", "IssuedAt", "NotAfter", "NotBefore", "Serial"]
+ for attr in attrs:
+ cert.pop(attr, None)
+ cert["KeyUsages"] = []
+ cert["ExtendedKeyUsages"] = []
+
+ return result
+
+
+class AcmProvider(AcmApi):
+ @handler("RequestCertificate", expand=False)
+ def request_certificate(
+ self,
+ context: RequestContext,
+ request: RequestCertificateRequest,
+ ) -> RequestCertificateResponse:
+ response: RequestCertificateResponse = moto.call_moto(context)
+
+ cert_arn = response["CertificateArn"]
+ backend = acm_models.acm_backends[context.account_id][context.region]
+ cert = backend._certificates[cert_arn]
+ if not hasattr(cert, "domain_validation_options"):
+ cert.domain_validation_options = request.get("DomainValidationOptions")
+
+ return response
+
+ @handler("ListCertificates", expand=False)
+ def list_certificates(
+ self,
+ context: RequestContext,
+ request: ListCertificatesRequest,
+ ) -> ListCertificatesResponse:
+ response = moto.call_moto(context)
+ summaries = response.get("CertificateSummaryList") or []
+ for summary in summaries:
+ if "KeyUsages" in summary:
+ summary["KeyUsages"] = [
+ k["Name"] if isinstance(k, dict) else k for k in summary["KeyUsages"]
+ ]
+ if "ExtendedKeyUsages" in summary:
+ summary["ExtendedKeyUsages"] = [
+ k["Name"] if isinstance(k, dict) else k for k in summary["ExtendedKeyUsages"]
+ ]
+ return response
diff --git a/localstack/utils/analytics/__init__.py b/localstack-core/localstack/services/apigateway/__init__.py
similarity index 100%
rename from localstack/utils/analytics/__init__.py
rename to localstack-core/localstack/services/apigateway/__init__.py
diff --git a/localstack-core/localstack/services/apigateway/analytics.py b/localstack-core/localstack/services/apigateway/analytics.py
new file mode 100644
index 0000000000000..d01d93a943f65
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/analytics.py
@@ -0,0 +1,5 @@
+from localstack.utils.analytics.metrics import LabeledCounter
+
+invocation_counter = LabeledCounter(
+ namespace="apigateway", name="rest_api_execute", labels=["invocation_type"]
+)
diff --git a/localstack-core/localstack/services/apigateway/exporter.py b/localstack-core/localstack/services/apigateway/exporter.py
new file mode 100644
index 0000000000000..0706e794c1651
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/exporter.py
@@ -0,0 +1,341 @@
+import abc
+import json
+from typing import Type
+
+from apispec import APISpec
+
+from localstack.aws.api.apigateway import ListOfModel
+from localstack.aws.connect import connect_to
+from localstack.utils.time import TIMESTAMP_FORMAT_TZ, timestamp
+
+from .helpers import OpenAPIExt
+
+# TODO:
+# - handle more extensions
+# see the list in OpenAPIExt
+# currently handled:
+# - x-amazon-apigateway-integration
+#
+
+
+class _BaseOpenApiExporter(abc.ABC):
+ VERSION = None
+
+ def __init__(self):
+ self.export_formats = {"application/json": "to_dict", "application/yaml": "to_yaml"}
+
+ def _add_models(self, spec: APISpec, models: ListOfModel, base_path: str):
+ for model in models:
+ model_def = json.loads(model["schema"])
+ self._resolve_refs(model_def, base_path)
+ spec.components.schema(
+ component_id=model["name"],
+ component=model_def,
+ )
+
+ def _resolve_refs(self, schema: dict, base_path: str):
+ if "$ref" in schema:
+ schema["$ref"] = f"{base_path}/{schema['$ref'].rsplit('/', maxsplit=1)[-1]}"
+ for value in schema.values():
+ if isinstance(value, dict):
+ self._resolve_refs(value, base_path)
+
+ @staticmethod
+ def _get_integration(method_integration: dict) -> dict:
+ fields = {
+ "type",
+ "passthroughBehavior",
+ "requestParameters",
+ "requestTemplates",
+ "httpMethod",
+ "uri",
+ }
+ integration = {k: v for k, v in method_integration.items() if k in fields}
+ integration["type"] = integration["type"].lower()
+ integration["passthroughBehavior"] = integration["passthroughBehavior"].lower()
+ if responses := method_integration.get("integrationResponses"):
+ integration["responses"] = {"default": responses.get("200")}
+ return integration
+
+ @abc.abstractmethod
+ def export(
+ self,
+ api_id: str,
+ stage: str,
+ export_format: str,
+ with_extension: bool,
+ account_id: str,
+ region_name: str,
+ ) -> str | dict: ...
+
+ @abc.abstractmethod
+ def _add_paths(self, spec: APISpec, resources: dict, with_extension: bool):
+ """
+ This method iterates over the different REST resources and its methods to add the APISpec paths using the
+ `apispec` module.
+ The path format is different between Swagger (OpenAPI 2.0) and OpenAPI 3.0
+ :param spec: an APISpec object representing the exported API Gateway REST API
+ :param resources: the API Gateway REST API resources (methods, methods integrations, responses...)
+ :param with_extension: flag to add the custom OpenAPI extension `apigateway`, allowing to properly import
+ integrations for example, or authorizers. (all the `x-amazon` fields contained in `OpenAPIExt`).
+ :return: None
+ """
+ ...
+
+
+class _OpenApiSwaggerExporter(_BaseOpenApiExporter):
+ VERSION = "2.0"
+
+ def _add_paths(self, spec, resources, with_extension):
+ for item in resources.get("items"):
+ path = item.get("path")
+ for method, method_config in item.get("resourceMethods", {}).items():
+ method = method.lower()
+
+ method_integration = method_config.get("methodIntegration", {})
+ integration_responses = method_integration.get("integrationResponses", {})
+ method_responses = method_config.get("methodResponses")
+ responses = {}
+ produces = set()
+ for status_code, values in method_responses.items():
+ response = {"description": f"{status_code} response"}
+ if response_parameters := values.get("responseParameters"):
+ headers = {}
+ for parameter in response_parameters:
+ in_, name = parameter.removeprefix("method.response.").split(".")
+ # TODO: other type?
+ if in_ == "header":
+ headers[name] = {"type": "string"}
+
+ if headers:
+ response["headers"] = headers
+ if response_models := values.get("responseModels"):
+ for content_type, model_name in response_models.items():
+ produces.add(content_type)
+ response["schema"] = model_name
+ if integration_response := integration_responses.get(status_code, {}):
+ produces.update(integration_response.get("responseTemplates", {}).keys())
+
+ responses[status_code] = response
+
+ request_parameters = method_config.get("requestParameters", {})
+ parameters = []
+ for parameter, required in request_parameters.items():
+ in_, name = parameter.removeprefix("method.request.").split(".")
+ in_ = in_ if in_ != "querystring" else "query"
+ parameters.append(
+ {"name": name, "in": in_, "required": required, "type": "string"}
+ )
+
+ request_models = method_config.get("requestModels", {})
+ for model_name in request_models.values():
+ parameter = {
+ "in": "body",
+ "name": model_name,
+ "required": True,
+ "schema": {"$ref": f"#/definitions/{model_name}"},
+ }
+ parameters.append(parameter)
+
+ method_operations = {"responses": responses}
+ if parameters:
+ method_operations["parameters"] = parameters
+ if produces:
+ method_operations["produces"] = list(produces)
+ if content_types := request_models | method_integration.get("requestTemplates", {}):
+ method_operations["consumes"] = list(content_types.keys())
+ if operation_name := method_config.get("operationName"):
+ method_operations["operationId"] = operation_name
+ if with_extension and method_integration:
+ method_operations[OpenAPIExt.INTEGRATION] = self._get_integration(
+ method_integration
+ )
+
+ spec.path(path=path, operations={method: method_operations})
+
+ def export(
+ self,
+ api_id: str,
+ stage: str,
+ export_format: str,
+ with_extension: bool,
+ account_id: str,
+ region_name: str,
+ ) -> str:
+ """
+ https://github.com/OAI/OpenAPI-Specification/blob/main/versions/2.0.md
+ """
+ apigateway_client = connect_to(
+ aws_access_key_id=account_id, region_name=region_name
+ ).apigateway
+
+ rest_api = apigateway_client.get_rest_api(restApiId=api_id)
+ resources = apigateway_client.get_resources(restApiId=api_id)
+ models = apigateway_client.get_models(restApiId=api_id)
+
+ info = {}
+ if (description := rest_api.get("description")) is not None:
+ info["description"] = description
+
+ spec = APISpec(
+ title=rest_api.get("name"),
+ version=rest_api.get("version")
+ or timestamp(rest_api.get("createdDate"), format=TIMESTAMP_FORMAT_TZ),
+ info=info,
+ openapi_version=self.VERSION,
+ basePath=f"/{stage}",
+ schemes=["https"],
+ )
+
+ self._add_paths(spec, resources, with_extension)
+ self._add_models(spec, models["items"], "#/definitions")
+
+ response = getattr(spec, self.export_formats.get(export_format))()
+ if (
+ with_extension
+ and isinstance(response, dict)
+ and (binary_media_types := rest_api.get("binaryMediaTypes")) is not None
+ ):
+ response[OpenAPIExt.BINARY_MEDIA_TYPES] = binary_media_types
+
+ return response
+
+
+class _OpenApiOAS30Exporter(_BaseOpenApiExporter):
+ VERSION = "3.0.1"
+
+ def _add_paths(self, spec, resources, with_extension):
+ for item in resources.get("items"):
+ path = item.get("path")
+ for method, method_config in item.get("resourceMethods", {}).items():
+ method = method.lower()
+
+ method_integration = method_config.get("methodIntegration", {})
+ integration_responses = method_integration.get("integrationResponses", {})
+ method_responses = method_config.get("methodResponses")
+ responses = {}
+ produces = set()
+ for status_code, values in method_responses.items():
+ response = {"description": f"{status_code} response"}
+ content = {}
+ if response_parameters := values.get("responseParameters"):
+ headers = {}
+ for parameter in response_parameters:
+ in_, name = parameter.removeprefix("method.response.").split(".")
+ # TODO: other type? query?
+ if in_ == "header":
+ headers[name] = {"schema": {"type": "string"}}
+
+ if headers:
+ response["headers"] = headers
+ if response_models := values.get("responseModels"):
+ for content_type, model_name in response_models.items():
+ content[content_type] = {
+ "schema": {"$ref": f"#/components/schemas/{model_name}"}
+ }
+ if integration_response := integration_responses.get(status_code, {}):
+ produces.update(integration_response.get("responseTemplates", {}).keys())
+
+ response["content"] = content
+ responses[status_code] = response
+
+ request_parameters = method_config.get("requestParameters", {})
+ parameters = []
+ for parameter, required in request_parameters.items():
+ in_, name = parameter.removeprefix("method.request.").split(".")
+ in_ = in_ if in_ != "querystring" else "query"
+ parameters.append({"name": name, "in": in_, "schema": {"type": "string"}})
+
+ request_body = {"content": {}}
+ request_models = method_config.get("requestModels", {})
+ for content_type, model_name in request_models.items():
+ request_body["content"][content_type] = {
+ "schema": {"$ref": f"#/components/schemas/{model_name}"},
+ }
+ request_body["required"] = True
+
+ method_operations = {"responses": responses}
+ if parameters:
+ method_operations["parameters"] = parameters
+ if request_body["content"]:
+ method_operations["requestBody"] = request_body
+ if operation_name := method_config.get("operationName"):
+ method_operations["operationId"] = operation_name
+ if with_extension and method_integration:
+ method_operations[OpenAPIExt.INTEGRATION] = self._get_integration(
+ method_integration
+ )
+
+ spec.path(path=path, operations={method: method_operations})
+
+ def export(
+ self,
+ api_id: str,
+ stage: str,
+ export_format: str,
+ with_extension: bool,
+ account_id: str,
+ region_name: str,
+ ) -> str:
+ """
+ https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md
+ """
+ apigateway_client = connect_to(
+ aws_access_key_id=account_id, region_name=region_name
+ ).apigateway
+
+ rest_api = apigateway_client.get_rest_api(restApiId=api_id)
+ resources = apigateway_client.get_resources(restApiId=api_id)
+ models = apigateway_client.get_models(restApiId=api_id)
+
+ info = {}
+
+ if (description := rest_api.get("description")) is not None:
+ info["description"] = description
+
+ spec = APISpec(
+ title=rest_api.get("name"),
+ version=rest_api.get("version")
+ or timestamp(rest_api.get("createdDate"), format=TIMESTAMP_FORMAT_TZ),
+ info=info,
+ openapi_version=self.VERSION,
+ servers=[{"variables": {"basePath": {"default": stage}}}],
+ )
+
+ self._add_paths(spec, resources, with_extension)
+ self._add_models(spec, models["items"], "#/components/schemas")
+
+ response = getattr(spec, self.export_formats.get(export_format))()
+ if isinstance(response, dict):
+ if "components" not in response:
+ response["components"] = {}
+
+ if (
+ with_extension
+ and (binary_media_types := rest_api.get("binaryMediaTypes")) is not None
+ ):
+ response[OpenAPIExt.BINARY_MEDIA_TYPES] = binary_media_types
+
+ return response
+
+
+class OpenApiExporter:
+ exporters: dict[str, Type[_BaseOpenApiExporter]]
+
+ def __init__(self):
+ self.exporters = {"swagger": _OpenApiSwaggerExporter, "oas30": _OpenApiOAS30Exporter}
+
+ def export_api(
+ self,
+ api_id: str,
+ stage: str,
+ export_type: str,
+ account_id: str,
+ region_name: str,
+ export_format: str = "application/json",
+ with_extension=False,
+ ) -> str:
+ exporter = self.exporters.get(export_type)()
+ return exporter.export(
+ api_id, stage, export_format, with_extension, account_id, region_name
+ )
diff --git a/localstack-core/localstack/services/apigateway/helpers.py b/localstack-core/localstack/services/apigateway/helpers.py
new file mode 100644
index 0000000000000..8e69a9218e6e2
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/helpers.py
@@ -0,0 +1,1010 @@
+import contextlib
+import copy
+import hashlib
+import json
+import logging
+from typing import List, Optional, TypedDict, Union
+from urllib import parse as urlparse
+
+from jsonpatch import apply_patch
+from jsonpointer import JsonPointerException
+from moto.apigateway import models as apigw_models
+from moto.apigateway.models import APIGatewayBackend, Integration, Resource
+from moto.apigateway.models import RestAPI as MotoRestAPI
+from moto.apigateway.utils import ApigwAuthorizerIdentifier, ApigwResourceIdentifier
+
+from localstack import config
+from localstack.aws.api import RequestContext
+from localstack.aws.api.apigateway import (
+ Authorizer,
+ ConnectionType,
+ DocumentationPart,
+ DocumentationPartLocation,
+ IntegrationType,
+ Model,
+ NotFoundException,
+ PutRestApiRequest,
+ RequestValidator,
+)
+from localstack.constants import (
+ APPLICATION_JSON,
+ AWS_REGION_US_EAST_1,
+ DEFAULT_AWS_ACCOUNT_ID,
+ PATH_USER_REQUEST,
+)
+from localstack.services.apigateway.legacy.context import ApiInvocationContext
+from localstack.services.apigateway.models import (
+ ApiGatewayStore,
+ RestApiContainer,
+ apigateway_stores,
+)
+from localstack.utils import common
+from localstack.utils.json import parse_json_or_yaml
+from localstack.utils.strings import short_uid, to_bytes, to_str
+from localstack.utils.urls import localstack_host
+
+LOG = logging.getLogger(__name__)
+
+REQUEST_TIME_DATE_FORMAT = "%d/%b/%Y:%H:%M:%S %z"
+
+INVOKE_TEST_LOG_TEMPLATE = """Execution log for request {request_id}
+ {formatted_date} : Starting execution for request: {request_id}
+ {formatted_date} : HTTP Method: {http_method}, Resource Path: {resource_path}
+ {formatted_date} : Method request path: {request_path}
+ {formatted_date} : Method request query string: {query_string}
+ {formatted_date} : Method request headers: {request_headers}
+ {formatted_date} : Method request body before transformations: {request_body}
+ {formatted_date} : Method response body after transformations: {response_body}
+ {formatted_date} : Method response headers: {response_headers}
+ {formatted_date} : Successfully completed execution
+ {formatted_date} : Method completed with status: {status_code}
+ """
+
+EMPTY_MODEL = "Empty"
+ERROR_MODEL = "Error"
+
+
+# TODO: we could actually parse the schema to get TypedDicts with the proper schema/types for each properties
+class OpenAPIExt:
+ """
+ Represents the specific OpenAPI extensions for API Gateway
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions.html
+ """
+
+ ANY_METHOD = "x-amazon-apigateway-any-method"
+ CORS = "x-amazon-apigateway-cors"
+ API_KEY_SOURCE = "x-amazon-apigateway-api-key-source"
+ AUTH = "x-amazon-apigateway-auth"
+ AUTHORIZER = "x-amazon-apigateway-authorizer"
+ AUTHTYPE = "x-amazon-apigateway-authtype"
+ BINARY_MEDIA_TYPES = "x-amazon-apigateway-binary-media-types"
+ DOCUMENTATION = "x-amazon-apigateway-documentation"
+ ENDPOINT_CONFIGURATION = "x-amazon-apigateway-endpoint-configuration"
+ GATEWAY_RESPONSES = "x-amazon-apigateway-gateway-responses"
+ IMPORTEXPORT_VERSION = "x-amazon-apigateway-importexport-version"
+ INTEGRATION = "x-amazon-apigateway-integration"
+ INTEGRATIONS = "x-amazon-apigateway-integrations" # used in components
+ MINIMUM_COMPRESSION_SIZE = "x-amazon-apigateway-minimum-compression-size"
+ POLICY = "x-amazon-apigateway-policy"
+ REQUEST_VALIDATOR = "x-amazon-apigateway-request-validator"
+ REQUEST_VALIDATORS = "x-amazon-apigateway-request-validators"
+ TAG_VALUE = "x-amazon-apigateway-tag-value"
+
+
+class AuthorizerConfig(TypedDict):
+ authorizer: Authorizer
+ authorization_scopes: Optional[list[str]]
+
+
+# TODO: make the CRUD operations in this file generic for the different model types (authorizes, validators, ...)
+
+
+def get_apigateway_store(context: RequestContext) -> ApiGatewayStore:
+ return apigateway_stores[context.account_id][context.region]
+
+
+def get_apigateway_store_for_invocation(context: ApiInvocationContext) -> ApiGatewayStore:
+ account_id = context.account_id or DEFAULT_AWS_ACCOUNT_ID
+ region_name = context.region_name or AWS_REGION_US_EAST_1
+ return apigateway_stores[account_id][region_name]
+
+
+def get_moto_backend(account_id: str, region: str) -> APIGatewayBackend:
+ return apigw_models.apigateway_backends[account_id][region]
+
+
+def get_moto_rest_api(context: RequestContext, rest_api_id: str) -> MotoRestAPI:
+ moto_backend = apigw_models.apigateway_backends[context.account_id][context.region]
+ if rest_api := moto_backend.apis.get(rest_api_id):
+ return rest_api
+ else:
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ )
+
+
+def get_rest_api_container(context: RequestContext, rest_api_id: str) -> RestApiContainer:
+ store = get_apigateway_store(context=context)
+ if not (rest_api_container := store.rest_apis.get(rest_api_id)):
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ )
+ return rest_api_container
+
+
+class OpenAPISpecificationResolver:
+ def __init__(self, document: dict, rest_api_id: str, allow_recursive=True):
+ self.document = document
+ self.allow_recursive = allow_recursive
+ # cache which maps known refs to part of the document
+ self._cache = {}
+ self._refpaths = ["#"]
+ host_definition = localstack_host()
+ self._base_url = f"{config.get_protocol()}://apigateway.{host_definition.host_and_port()}/restapis/{rest_api_id}/models/"
+
+ def _is_ref(self, item) -> bool:
+ return isinstance(item, dict) and "$ref" in item
+
+ def _is_internal_ref(self, refpath) -> bool:
+ return str(refpath).startswith("#/")
+
+ @property
+ def current_path(self):
+ return self._refpaths[-1]
+
+ @contextlib.contextmanager
+ def _pathctx(self, refpath: str):
+ if not self._is_internal_ref(refpath):
+ refpath = "/".join((self.current_path, refpath))
+
+ self._refpaths.append(refpath)
+ yield
+ self._refpaths.pop()
+
+ def _resolve_refpath(self, refpath: str) -> dict:
+ if refpath in self._refpaths and not self.allow_recursive:
+ raise Exception("recursion detected with allow_recursive=False")
+
+ # We don't resolve the Model definition, we will return a absolute reference to the model like AWS
+ # When validating the schema, we will need to resolve the $ref there
+ # Because if we resolved all $ref in schema, it can lead to circular references in complex schemas
+ if self.current_path.startswith("#/definitions") or self.current_path.startswith(
+ "#/components/schemas"
+ ):
+ return {"$ref": f"{self._base_url}{refpath.rsplit('/', maxsplit=1)[-1]}"}
+
+ # We should not resolve the Model either, because we need its name to set it to the Request/ResponseModels,
+ # it just makes our job more difficult to retrieve the Model name
+ # We still need to verify that the ref exists
+ is_schema = self.current_path.endswith("schema")
+
+ if refpath in self._cache and not is_schema:
+ return self._cache.get(refpath)
+
+ with self._pathctx(refpath):
+ if self._is_internal_ref(self.current_path):
+ cur = self.document
+ else:
+ raise NotImplementedError("External references not yet supported.")
+
+ for step in self.current_path.split("/")[1:]:
+ cur = cur.get(step)
+
+ self._cache[self.current_path] = cur
+
+ if is_schema:
+ # If the $ref doesn't exist in our schema, return None, otherwise return the ref
+ return {"$ref": refpath} if cur else None
+
+ return cur
+
+ def _namespaced_resolution(self, namespace: str, data: Union[dict, list]) -> Union[dict, list]:
+ with self._pathctx(namespace):
+ return self._resolve_references(data)
+
+ def _resolve_references(self, data) -> Union[dict, list]:
+ if self._is_ref(data):
+ return self._resolve_refpath(data["$ref"])
+
+ if isinstance(data, dict):
+ for k, v in data.items():
+ data[k] = self._namespaced_resolution(k, v)
+ elif isinstance(data, list):
+ for i, v in enumerate(data):
+ data[i] = self._namespaced_resolution(str(i), v)
+
+ return data
+
+ def resolve_references(self) -> dict:
+ return self._resolve_references(self.document)
+
+
+class ModelResolver:
+ """
+ This class allows a Model to use recursive and circular references to other Models.
+ To be able to JSON dump Models, AWS will not resolve Models but will use their absolute $ref instead.
+ When validating, we need to resolve those references, using JSON schema tricks to allow recursion.
+ See: https://json-schema.org/understanding-json-schema/structuring.html#recursion
+
+ To allow a simpler structure, we're not replacing directly the reference with the schema, but instead create
+ a map of all used schema in $defs, as advised on JSON schema:
+ See: https://json-schema.org/understanding-json-schema/structuring.html#defs
+
+ This allows us to not render every sub schema/models, but instead keep a clean map of used schemas.
+ """
+
+ def __init__(self, rest_api_container: RestApiContainer, model_name: str):
+ self.rest_api_container = rest_api_container
+ self.model_name = model_name
+ self._deps = {}
+ self._current_resolving_name = None
+
+ @contextlib.contextmanager
+ def _resolving_ctx(self, current_resolving_name: str):
+ self._current_resolving_name = current_resolving_name
+ yield
+ self._current_resolving_name = None
+
+ def resolve_model(self, model: dict) -> dict | None:
+ resolved_model = copy.deepcopy(model)
+ model_names = set()
+
+ def _look_for_ref(sub_model):
+ for key, value in sub_model.items():
+ if key == "$ref":
+ ref_name = value.rsplit("/", maxsplit=1)[-1]
+ if ref_name == self.model_name:
+ # if we reference our main Model, use the # for recursive access
+ sub_model[key] = "#"
+ continue
+ # otherwise, this Model will be available in $defs
+ sub_model[key] = f"#/$defs/{ref_name}"
+
+ if ref_name != self._current_resolving_name:
+ # add the ref to the next ref to resolve and to $deps
+ model_names.add(ref_name)
+
+ elif isinstance(value, dict):
+ _look_for_ref(value)
+ elif isinstance(value, list):
+ for val in value:
+ if isinstance(val, dict):
+ _look_for_ref(val)
+
+ if isinstance(resolved_model, dict):
+ _look_for_ref(resolved_model)
+
+ if model_names:
+ for ref_model_name in model_names:
+ if ref_model_name in self._deps:
+ continue
+
+ def_resolved, was_resolved = self._get_resolved_submodel(model_name=ref_model_name)
+
+ if not def_resolved:
+ LOG.debug(
+ "Failed to resolve submodel %s for model %s",
+ ref_model_name,
+ self._current_resolving_name,
+ )
+ return
+ # if the ref was already resolved, we copy the result to not alter the already resolved schema
+ if was_resolved:
+ def_resolved = copy.deepcopy(def_resolved)
+
+ self._remove_self_ref(def_resolved)
+
+ if "$deps" in def_resolved:
+ # this will happen only if the schema was already resolved, otherwise the deps would be in _deps
+ # remove own definition in case of recursive / circular Models
+ def_resolved["$defs"].pop(self.model_name, None)
+ # remove the $defs from the schema, we don't want nested $defs
+ def_resolved_defs = def_resolved.pop("$defs")
+ # merge the resolved sub model $defs to the main schema
+ self._deps.update(def_resolved_defs)
+
+ # add the dependencies to the global $deps
+ self._deps[ref_model_name] = def_resolved
+
+ return resolved_model
+
+ def _remove_self_ref(self, resolved_schema: dict):
+ for key, value in resolved_schema.items():
+ if key == "$ref":
+ ref_name = value.rsplit("/", maxsplit=1)[-1]
+ if ref_name == self.model_name:
+ resolved_schema[key] = "#"
+
+ elif isinstance(value, dict):
+ self._remove_self_ref(value)
+
+ def get_resolved_model(self) -> dict | None:
+ if not (resolved_model := self.rest_api_container.resolved_models.get(self.model_name)):
+ model = self.rest_api_container.models.get(self.model_name)
+ if not model:
+ return None
+ schema = json.loads(model["schema"])
+ resolved_model = self.resolve_model(schema)
+ if not resolved_model:
+ return None
+ # attach the resolved dependencies of the schema
+ if self._deps:
+ resolved_model["$defs"] = self._deps
+ self.rest_api_container.resolved_models[self.model_name] = resolved_model
+
+ return resolved_model
+
+ def _get_resolved_submodel(self, model_name: str) -> tuple[dict | None, bool | None]:
+ was_resolved = True
+ if not (resolved_model := self.rest_api_container.resolved_models.get(model_name)):
+ was_resolved = False
+ model = self.rest_api_container.models.get(model_name)
+ if not model:
+ LOG.warning(
+ "Error while validating the request body, could not the find the Model: '%s'",
+ model_name,
+ )
+ return None, was_resolved
+ schema = json.loads(model["schema"])
+
+ with self._resolving_ctx(model_name):
+ resolved_model = self.resolve_model(schema)
+
+ return resolved_model, was_resolved
+
+
+def resolve_references(data: dict, rest_api_id, allow_recursive=True) -> dict:
+ resolver = OpenAPISpecificationResolver(
+ data, allow_recursive=allow_recursive, rest_api_id=rest_api_id
+ )
+ return resolver.resolve_references()
+
+
+# ---------------
+# UTIL FUNCTIONS
+# ---------------
+
+
+def path_based_url(api_id: str, stage_name: str, path: str) -> str:
+ """Return URL for inbound API gateway for given API ID, stage name, and path"""
+ pattern = "%s/restapis/{api_id}/{stage_name}/%s{path}" % (
+ config.external_service_url(),
+ PATH_USER_REQUEST,
+ )
+ return pattern.format(api_id=api_id, stage_name=stage_name, path=path)
+
+
+def localstack_path_based_url(api_id: str, stage_name: str, path: str) -> str:
+ """Return URL for inbound API gateway for given API ID, stage name, and path on the _aws namespace"""
+ return f"{config.external_service_url()}/_aws/execute-api/{api_id}/{stage_name}{path}"
+
+
+def host_based_url(rest_api_id: str, path: str, stage_name: str = None):
+ """Return URL for inbound API gateway for given API ID, stage name, and path with custom dns
+ format"""
+ pattern = "{endpoint}{stage}{path}"
+ stage = stage_name and f"/{stage_name}" or ""
+ return pattern.format(endpoint=get_execute_api_endpoint(rest_api_id), stage=stage, path=path)
+
+
+def get_execute_api_endpoint(api_id: str, protocol: str | None = None) -> str:
+ host = localstack_host()
+ protocol = protocol or config.get_protocol()
+ return f"{protocol}://{api_id}.execute-api.{host.host_and_port()}"
+
+
+def apply_json_patch_safe(subject, patch_operations, in_place=True, return_list=False):
+ """Apply JSONPatch operations, using some customizations for compatibility with API GW
+ resources."""
+
+ results = []
+ patch_operations = (
+ [patch_operations] if isinstance(patch_operations, dict) else patch_operations
+ )
+ for operation in patch_operations:
+ try:
+ # special case: for "replace" operations, assume "" as the default value
+ if operation["op"] == "replace" and operation.get("value") is None:
+ operation["value"] = ""
+
+ if operation["op"] != "remove" and operation.get("value") is None:
+ LOG.info('Missing "value" in JSONPatch operation for %s: %s', subject, operation)
+ continue
+
+ if operation["op"] == "add":
+ path = operation["path"]
+ target = subject.get(path.strip("/"))
+ target = target or common.extract_from_jsonpointer_path(subject, path)
+ if not isinstance(target, list):
+ # for `add` operation, if the target does not exist, set it to an empty dict (default behaviour)
+ # previous behaviour was an empty list. Revisit this if issues arise.
+ # TODO: we are assigning a value, even if not `in_place=True`
+ common.assign_to_path(subject, path, value={}, delimiter="/")
+
+ target = common.extract_from_jsonpointer_path(subject, path)
+ if isinstance(target, list) and not path.endswith("/-"):
+ # if "path" is an attribute name pointing to an array in "subject", and we're running
+ # an "add" operation, then we should use the standard-compliant notation "/path/-"
+ operation["path"] = f"{path}/-"
+
+ if operation["op"] == "remove":
+ path = operation["path"]
+ common.assign_to_path(subject, path, value={}, delimiter="/")
+
+ result = apply_patch(subject, [operation], in_place=in_place)
+ if not in_place:
+ subject = result
+ results.append(result)
+ except JsonPointerException:
+ pass # path cannot be found - ignore
+ except Exception as e:
+ if "non-existent object" in str(e):
+ if operation["op"] == "replace":
+ # fall back to an ADD operation if the REPLACE fails
+ operation["op"] = "add"
+ result = apply_patch(subject, [operation], in_place=in_place)
+ results.append(result)
+ continue
+ if operation["op"] == "remove" and isinstance(subject, dict):
+ result = subject.pop(operation["path"], None)
+ results.append(result)
+ continue
+ raise
+ if return_list:
+ return results
+ return (results or [subject])[-1]
+
+
+def add_documentation_parts(rest_api_container, documentation):
+ for doc_part in documentation.get("documentationParts", []):
+ entity_id = short_uid()[:6]
+ location = doc_part["location"]
+ rest_api_container.documentation_parts[entity_id] = DocumentationPart(
+ id=entity_id,
+ location=DocumentationPartLocation(
+ type=location.get("type"),
+ path=location.get("path", "/")
+ if location.get("type") not in ["API", "MODEL"]
+ else None,
+ method=location.get("method"),
+ statusCode=location.get("statusCode"),
+ name=location.get("name"),
+ ),
+ properties=doc_part["properties"],
+ )
+
+
+def import_api_from_openapi_spec(
+ rest_api: MotoRestAPI, context: RequestContext, request: PutRestApiRequest
+) -> tuple[MotoRestAPI, list[str]]:
+ """Import an API from an OpenAPI spec document"""
+ body = parse_json_or_yaml(to_str(request["body"].read()))
+
+ warnings = []
+
+ # TODO There is an issue with the botocore specs so the parameters doesn't get populated as it should
+ # Once this is fixed we can uncomment the code below instead of taking the parameters the context request
+ # query_params = request.get("parameters") or {}
+ query_params: dict = context.request.values.to_dict()
+
+ resolved_schema = resolve_references(copy.deepcopy(body), rest_api_id=rest_api.id)
+ account_id = context.account_id
+ region_name = context.region
+
+ # TODO:
+ # 1. validate the "mode" property of the spec document, "merge" or "overwrite", and properly apply it
+ # for now, it only considers it for the binaryMediaTypes
+ # 2. validate the document type, "swagger" or "openapi"
+ mode = request.get("mode", "merge")
+
+ rest_api.version = (
+ str(version) if (version := resolved_schema.get("info", {}).get("version")) else None
+ )
+ # XXX for some reason this makes cf tests fail that's why is commented.
+ # test_cfn_handle_serverless_api_resource
+ # rest_api.name = resolved_schema.get("info", {}).get("title")
+ rest_api.description = resolved_schema.get("info", {}).get("description")
+
+ # authorizers map to avoid duplication
+ authorizers = {}
+
+ store = get_apigateway_store(context=context)
+ rest_api_container = store.rest_apis[rest_api.id]
+
+ def is_api_key_required(path_payload: dict) -> bool:
+ # TODO: consolidate and refactor with `create_authorizer`, duplicate logic for now
+ if not (security_schemes := path_payload.get("security")):
+ return False
+
+ for security_scheme in security_schemes:
+ for security_scheme_name in security_scheme.keys():
+ # $.securityDefinitions is Swagger 2.0
+ # $.components.SecuritySchemes is OpenAPI 3.0
+ security_definitions = resolved_schema.get(
+ "securityDefinitions"
+ ) or resolved_schema.get("components", {}).get("securitySchemes", {})
+ if security_scheme_name in security_definitions:
+ security_config = security_definitions.get(security_scheme_name)
+ if (
+ OpenAPIExt.AUTHORIZER not in security_config
+ and security_config.get("type") == "apiKey"
+ and security_config.get("name", "").lower() == "x-api-key"
+ ):
+ return True
+ return False
+
+ def create_authorizers(security_schemes: dict) -> None:
+ for security_scheme_name, security_config in security_schemes.items():
+ aws_apigateway_authorizer = security_config.get(OpenAPIExt.AUTHORIZER, {})
+ if not aws_apigateway_authorizer:
+ continue
+
+ if security_scheme_name in authorizers:
+ continue
+
+ authorizer_type = aws_apigateway_authorizer.get("type", "").upper()
+ # TODO: do we need validation of resources here?
+ authorizer = Authorizer(
+ id=ApigwAuthorizerIdentifier(
+ account_id, region_name, security_scheme_name
+ ).generate(),
+ name=security_scheme_name,
+ type=authorizer_type,
+ authorizerResultTtlInSeconds=aws_apigateway_authorizer.get(
+ "authorizerResultTtlInSeconds", None
+ ),
+ )
+ if provider_arns := aws_apigateway_authorizer.get("providerARNs"):
+ authorizer["providerARNs"] = provider_arns
+ if auth_type := security_config.get(OpenAPIExt.AUTHTYPE):
+ authorizer["authType"] = auth_type
+ if authorizer_uri := aws_apigateway_authorizer.get("authorizerUri"):
+ authorizer["authorizerUri"] = authorizer_uri
+ if authorizer_credentials := aws_apigateway_authorizer.get("authorizerCredentials"):
+ authorizer["authorizerCredentials"] = authorizer_credentials
+ if authorizer_type in ("TOKEN", "COGNITO_USER_POOLS"):
+ header_name = security_config.get("name")
+ authorizer["identitySource"] = f"method.request.header.{header_name}"
+ elif identity_source := aws_apigateway_authorizer.get("identitySource"):
+ # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-authorizer.html
+ # Applicable for the authorizer of the request and jwt type only
+ authorizer["identitySource"] = identity_source
+ if identity_validation_expression := aws_apigateway_authorizer.get(
+ "identityValidationExpression"
+ ):
+ authorizer["identityValidationExpression"] = identity_validation_expression
+
+ rest_api_container.authorizers[authorizer["id"]] = authorizer
+
+ authorizers[security_scheme_name] = authorizer
+
+ def get_authorizer(path_payload: dict) -> Optional[AuthorizerConfig]:
+ if not (security_schemes := path_payload.get("security")):
+ return None
+
+ for security_scheme in security_schemes:
+ for security_scheme_name, scopes in security_scheme.items():
+ if authorizer := authorizers.get(security_scheme_name):
+ return AuthorizerConfig(authorizer=authorizer, authorization_scopes=scopes)
+
+ def get_or_create_path(abs_path: str, base_path: str):
+ parts = abs_path.rstrip("/").replace("//", "/").split("/")
+ parent_id = ""
+ if len(parts) > 1:
+ parent_path = "/".join(parts[:-1])
+ parent = get_or_create_path(parent_path, base_path=base_path)
+ parent_id = parent.id
+ if existing := [
+ r
+ for r in rest_api.resources.values()
+ if r.path_part == (parts[-1] or "/") and (r.parent_id or "") == (parent_id or "")
+ ]:
+ return existing[0]
+
+ # construct relative path (without base path), then add field resources for this path
+ rel_path = abs_path.removeprefix(base_path)
+ return add_path_methods(rel_path, parts, parent_id=parent_id)
+
+ def add_path_methods(rel_path: str, parts: List[str], parent_id=""):
+ rel_path = rel_path or "/"
+ child_id = ApigwResourceIdentifier(account_id, region_name, parent_id, rel_path).generate()
+
+ # Create a `Resource` for the passed `rel_path`
+ resource = Resource(
+ account_id=rest_api.account_id,
+ resource_id=child_id,
+ region_name=rest_api.region_name,
+ api_id=rest_api.id,
+ path_part=parts[-1] or "/",
+ parent_id=parent_id,
+ )
+
+ paths_dict = resolved_schema["paths"]
+ method_paths = paths_dict.get(rel_path, {})
+ # Iterate over each field of the `path` to try to find the methods defined
+ for field, field_schema in method_paths.items():
+ if field in [
+ "parameters",
+ "servers",
+ "description",
+ "summary",
+ "$ref",
+ ] or not isinstance(field_schema, dict):
+ LOG.warning("Ignoring unsupported field %s in path %s", field, rel_path)
+ # TODO: check if we should skip parameters, those are global parameters applied to every routes but
+ # can be overridden at the operation level
+ continue
+
+ method_name = field.upper()
+ if method_name == OpenAPIExt.ANY_METHOD.upper():
+ method_name = "ANY"
+
+ # Create the `Method` resource for each method path
+ method_resource = create_method_resource(resource, method_name, field_schema)
+
+ # Get the `Method` requestParameters and requestModels
+ request_parameters_schema = field_schema.get("parameters", [])
+ request_parameters = {}
+ request_models = {}
+ if request_parameters_schema:
+ for req_param_data in request_parameters_schema:
+ # For Swagger 2.0, possible values for `in` from the specs are "query", "header", "path",
+ # "formData" or "body".
+ # For OpenAPI 3.0, values are "query", "header", "path" or "cookie".
+ # Only "path", "header" and "query" are supported in API Gateway for requestParameters
+ # "body" is mapped to a requestModel
+ param_location = req_param_data.get("in")
+ param_name = req_param_data.get("name")
+ param_required = req_param_data.get("required", False)
+ if param_location in ("query", "header", "path"):
+ if param_location == "query":
+ param_location = "querystring"
+
+ request_parameters[f"method.request.{param_location}.{param_name}"] = (
+ param_required
+ )
+
+ elif param_location == "body":
+ request_models = {APPLICATION_JSON: param_name}
+
+ else:
+ LOG.warning(
+ "Ignoring unsupported requestParameters/requestModels location value for %s: %s",
+ param_name,
+ param_location,
+ )
+ continue
+
+ # this replaces 'body' in Parameters for OpenAPI 3.0, a requestBody Object
+ # https://swagger.io/specification/v3/#request-body-object
+ if request_models_schema := field_schema.get("requestBody"):
+ model_ref = None
+ for content_type, media_type in request_models_schema.get("content", {}).items():
+ # we're iterating over the Media Type object:
+ # https://swagger.io/specification/v3/#media-type-object
+ if content_type == APPLICATION_JSON:
+ model_ref = media_type.get("schema", {}).get("$ref")
+ continue
+ LOG.warning(
+ "Found '%s' content-type for the MethodResponse model for path '%s' and method '%s', not adding the model as currently not supported",
+ content_type,
+ rel_path,
+ method_name,
+ )
+ if model_ref:
+ model_schema = model_ref.rsplit("/", maxsplit=1)[-1]
+ request_models = {APPLICATION_JSON: model_schema}
+
+ method_resource.request_models = request_models or None
+
+ # check if there's a request validator set in the method
+ request_validator_name = field_schema.get(
+ OpenAPIExt.REQUEST_VALIDATOR, default_req_validator_name
+ )
+ if request_validator_name:
+ if not (
+ req_validator_id := request_validator_name_id_map.get(request_validator_name)
+ ):
+ # Might raise an exception here if we properly validate the template
+ LOG.warning(
+ "A validator ('%s') was referenced for %s.(%s), but is not defined",
+ request_validator_name,
+ rel_path,
+ method_name,
+ )
+ method_resource.request_validator_id = req_validator_id
+
+ # we check if there's a path parameter, AWS adds the requestParameter automatically
+ resource_path_part = parts[-1].strip("/")
+ if is_variable_path(resource_path_part) and not is_greedy_path(resource_path_part):
+ path_parameter = resource_path_part[1:-1] # remove the curly braces
+ request_parameters[f"method.request.path.{path_parameter}"] = True
+
+ method_resource.request_parameters = request_parameters or None
+
+ # Create the `MethodResponse` for the previously created `Method`
+ method_responses = field_schema.get("responses", {})
+ for method_status_code, method_response in method_responses.items():
+ method_status_code = str(method_status_code)
+ method_response_model = None
+ model_ref = None
+ # separating the two different versions, Swagger (2.0) and OpenAPI 3.0
+ if "schema" in method_response: # this is Swagger
+ model_ref = method_response["schema"].get("$ref")
+ elif "content" in method_response: # this is OpenAPI 3.0
+ for content_type, media_type in method_response["content"].items():
+ # we're iterating over the Media Type object:
+ # https://swagger.io/specification/v3/#media-type-object
+ if content_type == APPLICATION_JSON:
+ model_ref = media_type.get("schema", {}).get("$ref")
+ continue
+ LOG.warning(
+ "Found '%s' content-type for the MethodResponse model for path '%s' and method '%s', not adding the model as currently not supported",
+ content_type,
+ rel_path,
+ method_name,
+ )
+
+ if model_ref:
+ model_schema = model_ref.rsplit("/", maxsplit=1)[-1]
+
+ method_response_model = {APPLICATION_JSON: model_schema}
+
+ method_response_parameters = {}
+ if response_param_headers := method_response.get("headers"):
+ for header, header_info in response_param_headers.items():
+ # TODO: make use of `header_info`
+ method_response_parameters[f"method.response.header.{header}"] = False
+
+ method_resource.create_response(
+ method_status_code,
+ method_response_model,
+ method_response_parameters or None,
+ )
+
+ # Create the `Integration` for the previously created `Method`
+ method_integration = field_schema.get(OpenAPIExt.INTEGRATION, {})
+
+ integration_type = (
+ i_type.upper() if (i_type := method_integration.get("type")) else None
+ )
+
+ match integration_type:
+ case "AWS_PROXY":
+ # if the integration is AWS_PROXY with lambda, the only accepted integration method is POST
+ integration_method = "POST"
+ case _:
+ integration_method = (
+ method_integration.get("httpMethod") or method_name
+ ).upper()
+
+ connection_type = (
+ ConnectionType.INTERNET
+ if integration_type in (IntegrationType.HTTP, IntegrationType.HTTP_PROXY)
+ else None
+ )
+
+ if integration_request_parameters := method_integration.get("requestParameters"):
+ validated_parameters = {}
+ for k, v in integration_request_parameters.items():
+ if isinstance(v, str):
+ validated_parameters[k] = v
+ else:
+ # TODO This fixes for boolean serialization. We should validate how other types behave
+ value = str(v).lower()
+ warnings.append(
+ "Invalid format for 'requestParameters'. Expected type string for property "
+ f"'{k}' of resource '{resource.get_path()}' and method '{method_name}' but got '{value}'"
+ )
+
+ integration_request_parameters = validated_parameters
+
+ integration = Integration(
+ http_method=integration_method,
+ uri=method_integration.get("uri"),
+ integration_type=integration_type,
+ passthrough_behavior=method_integration.get(
+ "passthroughBehavior", "WHEN_NO_MATCH"
+ ).upper(),
+ request_templates=method_integration.get("requestTemplates"),
+ request_parameters=integration_request_parameters,
+ cache_namespace=resource.id,
+ timeout_in_millis=method_integration.get("timeoutInMillis") or "29000",
+ content_handling=method_integration.get("contentHandling"),
+ connection_type=connection_type,
+ )
+
+ # Create the `IntegrationResponse` for the previously created `Integration`
+ if method_integration_responses := method_integration.get("responses"):
+ for pattern, integration_responses in method_integration_responses.items():
+ integration_response_templates = integration_responses.get("responseTemplates")
+ integration_response_parameters = integration_responses.get(
+ "responseParameters"
+ )
+
+ integration_response = integration.create_integration_response(
+ status_code=str(integration_responses.get("statusCode", 200)),
+ selection_pattern=pattern if pattern != "default" else None,
+ response_templates=integration_response_templates,
+ response_parameters=integration_response_parameters,
+ content_handling=None,
+ )
+ # moto set the responseTemplates to an empty dict when it should be None if not defined
+ if integration_response_templates is None:
+ integration_response.response_templates = None
+
+ resource.resource_methods[method_name].method_integration = integration
+
+ rest_api.resources[child_id] = resource
+ rest_api_container.resource_children.setdefault(parent_id, []).append(child_id)
+ return resource
+
+ def create_method_resource(child, method, method_schema):
+ authorization_type = "NONE"
+ api_key_required = is_api_key_required(method_schema)
+ kwargs = {}
+
+ if authorizer := get_authorizer(method_schema) or default_authorizer:
+ method_authorizer = authorizer["authorizer"]
+ # override the authorizer_type if it's a TOKEN or REQUEST to CUSTOM
+ if (authorizer_type := method_authorizer["type"]) in ("TOKEN", "REQUEST"):
+ authorization_type = "CUSTOM"
+ else:
+ authorization_type = authorizer_type
+
+ kwargs["authorizer_id"] = method_authorizer["id"]
+
+ if authorization_scopes := authorizer.get("authorization_scopes"):
+ kwargs["authorization_scopes"] = authorization_scopes
+
+ return child.add_method(
+ method,
+ api_key_required=api_key_required,
+ authorization_type=authorization_type,
+ operation_name=method_schema.get("operationId"),
+ **kwargs,
+ )
+
+ models = resolved_schema.get("definitions") or resolved_schema.get("components", {}).get(
+ "schemas", {}
+ )
+ for name, model_data in models.items():
+ model_id = short_uid()[:6] # length 6 to make TF tests pass
+ model = Model(
+ id=model_id,
+ name=name,
+ contentType=APPLICATION_JSON,
+ description=model_data.get("description"),
+ schema=json.dumps(model_data),
+ )
+ store.rest_apis[rest_api.id].models[name] = model
+
+ # create the RequestValidators defined at the top-level field `x-amazon-apigateway-request-validators`
+ request_validators = resolved_schema.get(OpenAPIExt.REQUEST_VALIDATORS, {})
+ request_validator_name_id_map = {}
+ for validator_name, validator_schema in request_validators.items():
+ validator_id = short_uid()[:6]
+
+ validator = RequestValidator(
+ id=validator_id,
+ name=validator_name,
+ validateRequestBody=validator_schema.get("validateRequestBody") or False,
+ validateRequestParameters=validator_schema.get("validateRequestParameters") or False,
+ )
+
+ store.rest_apis[rest_api.id].validators[validator_id] = validator
+ request_validator_name_id_map[validator_name] = validator_id
+
+ # get default requestValidator if present
+ default_req_validator_name = resolved_schema.get(OpenAPIExt.REQUEST_VALIDATOR)
+
+ # $.securityDefinitions is Swagger 2.0
+ # $.components.SecuritySchemes is OpenAPI 3.0
+ security_data = resolved_schema.get("securityDefinitions") or resolved_schema.get(
+ "components", {}
+ ).get("securitySchemes", {})
+ # create the defined authorizers, even if they're not used by any routes
+ if security_data:
+ create_authorizers(security_data)
+
+ # create default authorizer if present
+ default_authorizer = get_authorizer(resolved_schema)
+
+ # determine base path
+ # default basepath mode is "ignore"
+ # see https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api-basePath.html
+ basepath_mode = query_params.get("basepath") or "ignore"
+ base_path = ""
+
+ if basepath_mode != "ignore":
+ # in Swagger 2.0, the basePath is a top-level property
+ if "basePath" in resolved_schema:
+ base_path = resolved_schema["basePath"]
+
+ # in OpenAPI 3.0, the basePath is contained in the server object
+ elif "servers" in resolved_schema:
+ servers_property = resolved_schema.get("servers", [])
+ for server in servers_property:
+ # first, we check if there are a basePath variable (1st choice)
+ if "basePath" in server.get("variables", {}):
+ base_path = server["variables"]["basePath"].get("default", "")
+ break
+ # TODO: this allows both absolute and relative part, but AWS might not manage relative
+ url_path = urlparse.urlparse(server.get("url", "")).path
+ if url_path:
+ base_path = url_path if url_path != "/" else ""
+ break
+
+ if basepath_mode == "split":
+ base_path = base_path.strip("/").partition("/")[-1]
+ base_path = f"/{base_path}" if base_path else ""
+
+ api_paths = resolved_schema.get("paths", {})
+ if api_paths:
+ # Remove default root, then add paths from API spec
+ # TODO: the default mode is now `merge`, not `overwrite` if using `PutRestApi`
+ # TODO: quick hack for now, but do not remove the rootResource if the OpenAPI file is empty
+ rest_api.resources = {}
+
+ for path in api_paths:
+ get_or_create_path(base_path + path, base_path=base_path)
+
+ # binary types
+ if mode == "merge":
+ existing_binary_media_types = rest_api.binaryMediaTypes or []
+ else:
+ existing_binary_media_types = []
+
+ rest_api.binaryMediaTypes = existing_binary_media_types + resolved_schema.get(
+ OpenAPIExt.BINARY_MEDIA_TYPES, []
+ )
+
+ policy = resolved_schema.get(OpenAPIExt.POLICY)
+ if policy:
+ policy = json.dumps(policy) if isinstance(policy, dict) else str(policy)
+ rest_api.policy = policy
+ minimum_compression_size = resolved_schema.get(OpenAPIExt.MINIMUM_COMPRESSION_SIZE)
+ if minimum_compression_size is not None:
+ rest_api.minimum_compression_size = int(minimum_compression_size)
+ endpoint_config = resolved_schema.get(OpenAPIExt.ENDPOINT_CONFIGURATION)
+ if endpoint_config:
+ if endpoint_config.get("vpcEndpointIds"):
+ endpoint_config.setdefault("types", ["PRIVATE"])
+ rest_api.endpoint_configuration = endpoint_config
+
+ api_key_source = resolved_schema.get(OpenAPIExt.API_KEY_SOURCE)
+ if api_key_source is not None:
+ rest_api.api_key_source = api_key_source.upper()
+
+ documentation = resolved_schema.get(OpenAPIExt.DOCUMENTATION)
+ if documentation:
+ add_documentation_parts(rest_api_container, documentation)
+
+ return rest_api, warnings
+
+
+def is_greedy_path(path_part: str) -> bool:
+ return path_part.startswith("{") and path_part.endswith("+}")
+
+
+def is_variable_path(path_part: str) -> bool:
+ return path_part.startswith("{") and path_part.endswith("}")
+
+
+def get_domain_name_hash(domain_name: str) -> str:
+ """
+ Return a hash of the given domain name, which help construct regional domain names for APIs.
+ TODO: use this in the future to dispatch API Gateway API invocations made to the regional domain name
+ """
+ return hashlib.shake_128(to_bytes(domain_name)).hexdigest(4)
+
+
+def get_regional_domain_name(domain_name: str) -> str:
+ """
+ Return the regional domain name for the given domain name.
+ In real AWS, this would look something like: "d-oplm2qchq0.execute-api.us-east-1.amazonaws.com"
+ In LocalStack, we're returning this format: "d-.execute-api.localhost.localstack.cloud"
+ """
+ domain_name_hash = get_domain_name_hash(domain_name)
+ host = localstack_host().host
+ return f"d-{domain_name_hash}.execute-api.{host}"
diff --git a/localstack/utils/aws/__init__.py b/localstack-core/localstack/services/apigateway/legacy/__init__.py
similarity index 100%
rename from localstack/utils/aws/__init__.py
rename to localstack-core/localstack/services/apigateway/legacy/__init__.py
diff --git a/localstack-core/localstack/services/apigateway/legacy/context.py b/localstack-core/localstack/services/apigateway/legacy/context.py
new file mode 100644
index 0000000000000..37b9725f3feb8
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/legacy/context.py
@@ -0,0 +1,201 @@
+import base64
+import json
+from enum import Enum
+from typing import Any, Dict, List, Optional, Union
+
+from responses import Response
+
+from localstack.constants import HEADER_LOCALSTACK_EDGE_URL
+from localstack.utils.aws.aws_responses import parse_query_string
+from localstack.utils.strings import short_uid, to_str
+
+# type definition for data parameters (i.e., invocation payloads)
+InvocationPayload = Union[Dict, str, bytes]
+
+
+class ApiGatewayVersion(Enum):
+ V1 = "v1"
+ V2 = "v2"
+
+
+class ApiInvocationContext:
+ """Represents the context for an incoming API Gateway invocation."""
+
+ # basic (raw) HTTP invocation details (method, path, data, headers)
+ method: str
+ path: str
+ data: InvocationPayload
+ headers: Dict[str, str]
+
+ # raw URI (including query string) retired from werkzeug "RAW_URI" environment variable
+ raw_uri: str
+
+ # invocation context
+ context: Dict[str, Any]
+ # authentication info for this invocation
+ auth_context: Dict[str, Any]
+
+ # target API/resource details extracted from the invocation
+ apigw_version: ApiGatewayVersion
+ api_id: str
+ stage: str
+ account_id: str
+ region_name: str
+ # resource path, including any path parameter placeholders (e.g., "/my/path/{id}")
+ resource_path: str
+ integration: Dict
+ resource: Dict
+ # Invocation path with query string, e.g., "/my/path?test". Defaults to "path", can be used
+ # to overwrite the actual API path, in case the path format "../_user_request_/.." is used.
+ _path_with_query_string: str
+
+ # response templates to be applied to the invocation result
+ response_templates: Dict
+
+ route: Dict
+ connection_id: str
+ path_params: Dict
+
+ # response object
+ response: Response
+
+ # dict of stage variables (mapping names to values)
+ stage_variables: Dict[str, str]
+
+ # websockets route selection
+ ws_route: str
+
+ def __init__(
+ self,
+ method: str,
+ path: str,
+ data: Union[str, bytes],
+ headers: Dict[str, str],
+ api_id: str = None,
+ stage: str = None,
+ context: Dict[str, Any] = None,
+ auth_context: Dict[str, Any] = None,
+ ):
+ self.method = method
+ self._path = path
+ self.data = data
+ self.headers = headers
+ self.context = {"requestId": short_uid()} if context is None else context
+ self.auth_context = {} if auth_context is None else auth_context
+ self.apigw_version = None
+ self.api_id = api_id
+ self.stage = stage
+ self.region_name = None
+ self.account_id = None
+ self.integration = None
+ self.resource = None
+ self.resource_path = None
+ self.path_with_query_string = None
+ self.response_templates = {}
+ self.stage_variables = {}
+ self.path_params = {}
+ self.route = None
+ self.ws_route = None
+ self.response = None
+
+ @property
+ def path(self) -> str:
+ return self._path
+
+ @path.setter
+ def path(self, new_path: str):
+ if isinstance(new_path, str):
+ new_path = "/" + new_path.lstrip("/")
+ self._path = new_path
+
+ @property
+ def resource_id(self) -> Optional[str]:
+ return (self.resource or {}).get("id")
+
+ @property
+ def invocation_path(self) -> str:
+ """Return the plain invocation path, without query parameters."""
+ path = self.path_with_query_string or self.path
+ return path.split("?")[0]
+
+ @property
+ def path_with_query_string(self) -> str:
+ """Return invocation path with query string - defaults to the value of 'path', unless customized."""
+ return self._path_with_query_string or self.path
+
+ @path_with_query_string.setter
+ def path_with_query_string(self, new_path: str):
+ """Set a custom invocation path with query string (used to handle "../_user_request_/.." paths)."""
+ if isinstance(new_path, str):
+ new_path = "/" + new_path.lstrip("/")
+ self._path_with_query_string = new_path
+
+ def query_params(self) -> Dict[str, str]:
+ """Extract the query parameters from the target URL or path in this request context."""
+ query_string = self.path_with_query_string.partition("?")[2]
+ return parse_query_string(query_string)
+
+ @property
+ def integration_uri(self) -> Optional[str]:
+ integration = self.integration or {}
+ return integration.get("uri") or integration.get("integrationUri")
+
+ @property
+ def auth_identity(self) -> Optional[Dict]:
+ if isinstance(self.auth_context, dict):
+ if self.auth_context.get("identity") is None:
+ self.auth_context["identity"] = {}
+ return self.auth_context["identity"]
+
+ @property
+ def authorizer_type(self) -> str:
+ if isinstance(self.auth_context, dict):
+ return self.auth_context.get("authorizer_type") if self.auth_context else None
+
+ @property
+ def authorizer_result(self) -> Dict[str, Any]:
+ if isinstance(self.auth_context, dict):
+ return self.auth_context.get("authorizer") if self.auth_context else {}
+
+ def is_websocket_request(self) -> bool:
+ upgrade_header = str(self.headers.get("upgrade") or "")
+ return upgrade_header.lower() == "websocket"
+
+ def is_v1(self) -> bool:
+ """Whether this is an API Gateway v1 request"""
+ return self.apigw_version == ApiGatewayVersion.V1
+
+ def cookies(self) -> Optional[List[str]]:
+ if cookies := self.headers.get("cookie") or "":
+ return list(cookies.split(";"))
+ return None
+
+ @property
+ def is_data_base64_encoded(self) -> bool:
+ try:
+ json.dumps(self.data) if isinstance(self.data, (dict, list)) else to_str(self.data)
+ return False
+ except UnicodeDecodeError:
+ return True
+
+ def data_as_string(self) -> str:
+ try:
+ return (
+ json.dumps(self.data) if isinstance(self.data, (dict, list)) else to_str(self.data)
+ )
+ except UnicodeDecodeError:
+ # we string encode our base64 as string as well
+ return to_str(base64.b64encode(self.data))
+
+ def _extract_host_from_header(self) -> str:
+ host = self.headers.get(HEADER_LOCALSTACK_EDGE_URL) or self.headers.get("host", "")
+ return host.split("://")[-1].split("/")[0].split(":")[0]
+
+ @property
+ def domain_name(self) -> str:
+ return self._extract_host_from_header()
+
+ @property
+ def domain_prefix(self) -> str:
+ host = self._extract_host_from_header()
+ return host.split(".")[0]
diff --git a/localstack-core/localstack/services/apigateway/legacy/helpers.py b/localstack-core/localstack/services/apigateway/legacy/helpers.py
new file mode 100644
index 0000000000000..62a91a32e78b0
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/legacy/helpers.py
@@ -0,0 +1,711 @@
+import json
+import logging
+import re
+import time
+from collections import defaultdict
+from datetime import datetime, timezone
+from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union
+from urllib import parse as urlparse
+
+from botocore.utils import InvalidArnException
+from moto.apigateway.models import apigateway_backends
+from requests.models import Response
+
+from localstack.aws.connect import connect_to
+from localstack.constants import (
+ APPLICATION_JSON,
+ DEFAULT_AWS_ACCOUNT_ID,
+ HEADER_LOCALSTACK_EDGE_URL,
+ PATH_USER_REQUEST,
+)
+from localstack.services.apigateway.helpers import REQUEST_TIME_DATE_FORMAT
+from localstack.services.apigateway.legacy.context import ApiInvocationContext
+from localstack.utils import common
+from localstack.utils.aws import resources as resource_utils
+from localstack.utils.aws.arns import get_partition, parse_arn
+from localstack.utils.aws.aws_responses import requests_error_response_json, requests_response
+from localstack.utils.json import try_json
+from localstack.utils.numbers import is_number
+from localstack.utils.strings import canonicalize_bool_to_str, long_uid, to_str
+
+LOG = logging.getLogger(__name__)
+
+# regex path patterns
+PATH_REGEX_MAIN = r"^/restapis/([A-Za-z0-9_\-]+)/[a-z]+(\?.*)?"
+PATH_REGEX_SUB = r"^/restapis/([A-Za-z0-9_\-]+)/[a-z]+/([A-Za-z0-9_\-]+)/.*"
+PATH_REGEX_TEST_INVOKE_API = r"^\/restapis\/([A-Za-z0-9_\-]+)\/resources\/([A-Za-z0-9_\-]+)\/methods\/([A-Za-z0-9_\-]+)/?(\?.*)?"
+
+# regex path pattern for user requests, handles stages like $default
+PATH_REGEX_USER_REQUEST = (
+ r"^/restapis/([A-Za-z0-9_\\-]+)(?:/([A-Za-z0-9\_($|%%24)\\-]+))?/%s/(.*)$" % PATH_USER_REQUEST
+)
+# URL pattern for invocations
+HOST_REGEX_EXECUTE_API = r"(?:.*://)?([a-zA-Z0-9]+)(?:(-vpce-[^.]+))?\.execute-api\.(.*)"
+
+# template for SQS inbound data
+APIGATEWAY_SQS_DATA_INBOUND_TEMPLATE = (
+ "Action=SendMessage&MessageBody=$util.base64Encode($input.json('$'))"
+)
+
+
+class ApiGatewayIntegrationError(Exception):
+ """
+ Base class for all ApiGateway Integration errors.
+ Can be used as is or extended for common error types.
+ These exceptions should be handled in one place, and bubble up from all others.
+ """
+
+ message: str
+ status_code: int
+
+ def __init__(self, message: str, status_code: int):
+ super().__init__(message)
+ self.message = message
+ self.status_code = status_code
+
+ def to_response(self):
+ return requests_response({"message": self.message}, status_code=self.status_code)
+
+
+class IntegrationParameters(TypedDict):
+ path: dict[str, str]
+ querystring: dict[str, str]
+ headers: dict[str, str]
+
+
+class RequestParametersResolver:
+ """
+ Integration request data mapping expressions
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/request-response-data-mappings.html
+
+ Note: Use on REST APIs only
+ """
+
+ def resolve(self, context: ApiInvocationContext) -> IntegrationParameters:
+ """
+ Resolve method request parameters into integration request parameters.
+ Integration request parameters, in the form of path variables, query strings
+ or headers, can be mapped from any defined method request parameters
+ and the payload.
+
+ :return: IntegrationParameters
+ """
+ method_request_params: Dict[str, Any] = self.method_request_dict(context)
+
+ # requestParameters: {
+ # "integration.request.path.pathParam": "method.request.header.Content-Type"
+ # "integration.request.querystring.who": "method.request.querystring.who",
+ # "integration.request.header.Content-Type": "'application/json'",
+ # }
+ request_params = context.integration.get("requestParameters", {})
+
+ # resolve all integration request parameters with the already resolved method request parameters
+ integrations_parameters = {}
+ for k, v in request_params.items():
+ if v.lower() in method_request_params:
+ integrations_parameters[k] = method_request_params[v.lower()]
+ else:
+ # static values
+ integrations_parameters[k] = v.replace("'", "")
+
+ # build the integration parameters
+ result: IntegrationParameters = IntegrationParameters(path={}, querystring={}, headers={})
+ for k, v in integrations_parameters.items():
+ # headers
+ if k.startswith("integration.request.header."):
+ header_name = k.split(".")[-1]
+ result["headers"].update({header_name: v})
+
+ # querystring
+ if k.startswith("integration.request.querystring."):
+ param_name = k.split(".")[-1]
+ result["querystring"].update({param_name: v})
+
+ # path
+ if k.startswith("integration.request.path."):
+ path_name = k.split(".")[-1]
+ result["path"].update({path_name: v})
+
+ return result
+
+ def method_request_dict(self, context: ApiInvocationContext) -> Dict[str, Any]:
+ """
+ Build a dict with all method request parameters and their values.
+ :return: dict with all method request parameters and their values,
+ and all keys in lowercase
+ """
+ params: Dict[str, str] = {}
+
+ # TODO: add support for multi-values headers and multi-values querystring
+
+ for k, v in context.query_params().items():
+ params[f"method.request.querystring.{k}"] = v
+
+ for k, v in context.headers.items():
+ params[f"method.request.header.{k}"] = v
+
+ for k, v in context.path_params.items():
+ params[f"method.request.path.{k}"] = v
+
+ for k, v in context.stage_variables.items():
+ params[f"stagevariables.{k}"] = v
+
+ # TODO: add support for missing context variables, use `context.context` which contains most of the variables
+ # see https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference
+ # - all `context.identity` fields
+ # - protocol
+ # - requestId, extendedRequestId
+ # - all requestOverride, responseOverride
+ # - requestTime, requestTimeEpoch
+ # - resourcePath
+ # - wafResponseCode, webaclArn
+ params["context.accountId"] = context.account_id
+ params["context.apiId"] = context.api_id
+ params["context.domainName"] = context.domain_name
+ params["context.httpMethod"] = context.method
+ params["context.path"] = context.path
+ params["context.resourceId"] = context.resource_id
+ params["context.stage"] = context.stage
+
+ auth_context_authorizer = context.auth_context.get("authorizer") or {}
+ for k, v in auth_context_authorizer.items():
+ if isinstance(v, bool):
+ v = canonicalize_bool_to_str(v)
+ elif is_number(v):
+ v = str(v)
+
+ params[f"context.authorizer.{k.lower()}"] = v
+
+ if context.data:
+ params["method.request.body"] = context.data
+
+ return {key.lower(): val for key, val in params.items()}
+
+
+class ResponseParametersResolver:
+ def resolve(self, context: ApiInvocationContext) -> Dict[str, str]:
+ """
+ Resolve integration response parameters into method response parameters.
+ Integration response parameters can map header, body,
+ or static values to the header type of the method response.
+
+ :return: dict with all method response parameters and their values
+ """
+ integration_request_params: Dict[str, Any] = self.integration_request_dict(context)
+
+ # "responseParameters" : {
+ # "method.response.header.Location" : "integration.response.body.redirect.url",
+ # "method.response.header.x-user-id" : "integration.response.header.x-userid"
+ # }
+ integration_responses = context.integration.get("integrationResponses", {})
+ # XXX Fix for other status codes context.response contains a response status code, but response
+ # can be a LambdaResponse or Response object and the field is not the same, normalize it or use introspection
+ response_params = integration_responses.get("200", {}).get("responseParameters", {})
+
+ # resolve all integration request parameters with the already resolved method
+ # request parameters
+ method_parameters = {}
+ for k, v in response_params.items():
+ if v.lower() in integration_request_params:
+ method_parameters[k] = integration_request_params[v.lower()]
+ else:
+ # static values
+ method_parameters[k] = v.replace("'", "")
+
+ # build the integration parameters
+ result: Dict[str, str] = {}
+ for k, v in method_parameters.items():
+ # headers
+ if k.startswith("method.response.header."):
+ header_name = k.split(".")[-1]
+ result[header_name] = v
+
+ return result
+
+ def integration_request_dict(self, context: ApiInvocationContext) -> Dict[str, Any]:
+ params: Dict[str, str] = {}
+
+ for k, v in context.headers.items():
+ params[f"integration.request.header.{k}"] = v
+
+ if context.data:
+ params["integration.request.body"] = try_json(context.data)
+
+ return {key.lower(): val for key, val in params.items()}
+
+
+def make_json_response(message):
+ return requests_response(json.dumps(message), headers={"Content-Type": APPLICATION_JSON})
+
+
+def make_error_response(message, code=400, error_type=None):
+ if code == 404 and not error_type:
+ error_type = "NotFoundException"
+ error_type = error_type or "InvalidRequest"
+ return requests_error_response_json(message, code=code, error_type=error_type)
+
+
+def select_integration_response(matched_part: str, invocation_context: ApiInvocationContext):
+ int_responses = invocation_context.integration.get("integrationResponses") or {}
+ if select_by_pattern := [
+ response
+ for response in int_responses.values()
+ if response.get("selectionPattern")
+ and re.match(response.get("selectionPattern"), matched_part)
+ ]:
+ selected_response = select_by_pattern[0]
+ if len(select_by_pattern) > 1:
+ LOG.warning(
+ "Multiple integration responses matching '%s' statuscode. Choosing '%s' (first).",
+ matched_part,
+ selected_response["statusCode"],
+ )
+ else:
+ # choose default return code
+ default_responses = [
+ response for response in int_responses.values() if not response.get("selectionPattern")
+ ]
+ if not default_responses:
+ raise ApiGatewayIntegrationError("Internal server error", 500)
+
+ selected_response = default_responses[0]
+ if len(default_responses) > 1:
+ LOG.warning(
+ "Multiple default integration responses. Choosing %s (first).",
+ selected_response["statusCode"],
+ )
+ return selected_response
+
+
+def make_accepted_response():
+ response = Response()
+ response.status_code = 202
+ return response
+
+
+def get_api_id_from_path(path):
+ if match := re.match(PATH_REGEX_SUB, path):
+ return match.group(1)
+ return re.match(PATH_REGEX_MAIN, path).group(1)
+
+
+def is_test_invoke_method(method, path):
+ return method == "POST" and bool(re.match(PATH_REGEX_TEST_INVOKE_API, path))
+
+
+def get_stage_variables(context: ApiInvocationContext) -> Optional[Dict[str, str]]:
+ if is_test_invoke_method(context.method, context.path):
+ return None
+
+ if not context.stage:
+ return {}
+
+ account_id, region_name = get_api_account_id_and_region(context.api_id)
+ api_gateway_client = connect_to(
+ aws_access_key_id=account_id, region_name=region_name
+ ).apigateway
+ try:
+ response = api_gateway_client.get_stage(restApiId=context.api_id, stageName=context.stage)
+ return response.get("variables", {})
+ except Exception:
+ LOG.info("Failed to get stage %s for API id %s", context.stage, context.api_id)
+ return {}
+
+
+def tokenize_path(path):
+ return path.lstrip("/").split("/")
+
+
+def extract_path_params(path: str, extracted_path: str) -> Dict[str, str]:
+ tokenized_extracted_path = tokenize_path(extracted_path)
+ # Looks for '{' in the tokenized extracted path
+ path_params_list = [(i, v) for i, v in enumerate(tokenized_extracted_path) if "{" in v]
+ tokenized_path = tokenize_path(path)
+ path_params = {}
+ for param in path_params_list:
+ path_param_name = param[1][1:-1]
+ path_param_position = param[0]
+ if path_param_name.endswith("+"):
+ path_params[path_param_name.rstrip("+")] = "/".join(
+ tokenized_path[path_param_position:]
+ )
+ else:
+ path_params[path_param_name] = tokenized_path[path_param_position]
+ path_params = common.json_safe(path_params)
+ return path_params
+
+
+def extract_query_string_params(path: str) -> Tuple[str, Dict[str, str]]:
+ parsed_path = urlparse.urlparse(path)
+ if not path.startswith("//"):
+ path = parsed_path.path
+ parsed_query_string_params = urlparse.parse_qs(parsed_path.query)
+
+ query_string_params = {}
+ for query_param_name, query_param_values in parsed_query_string_params.items():
+ if len(query_param_values) == 1:
+ query_string_params[query_param_name] = query_param_values[0]
+ else:
+ query_string_params[query_param_name] = query_param_values
+
+ path = path or "/"
+ return path, query_string_params
+
+
+def get_cors_response(headers):
+ # TODO: for now we simply return "allow-all" CORS headers, but in the future
+ # we should implement custom headers for CORS rules, as supported by API Gateway:
+ # http://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-cors.html
+ response = Response()
+ response.status_code = 200
+ response.headers["Access-Control-Allow-Origin"] = "*"
+ response.headers["Access-Control-Allow-Methods"] = "GET, POST, PUT, DELETE, PATCH"
+ response.headers["Access-Control-Allow-Headers"] = "*"
+ response._content = ""
+ return response
+
+
+def get_apigateway_path_for_resource(
+ api_id, resource_id, path_suffix="", resources=None, region_name=None
+):
+ if resources is None:
+ apigateway = connect_to(region_name=region_name).apigateway
+ resources = apigateway.get_resources(restApiId=api_id, limit=100)["items"]
+ target_resource = list(filter(lambda res: res["id"] == resource_id, resources))[0]
+ path_part = target_resource.get("pathPart", "")
+ if path_suffix:
+ if path_part:
+ path_suffix = "%s/%s" % (path_part, path_suffix)
+ else:
+ path_suffix = path_part
+ parent_id = target_resource.get("parentId")
+ if not parent_id:
+ return "/%s" % path_suffix
+ return get_apigateway_path_for_resource(
+ api_id,
+ parent_id,
+ path_suffix=path_suffix,
+ resources=resources,
+ region_name=region_name,
+ )
+
+
+def get_rest_api_paths(account_id: str, region_name: str, rest_api_id: str):
+ apigateway = connect_to(aws_access_key_id=account_id, region_name=region_name).apigateway
+ resources = apigateway.get_resources(restApiId=rest_api_id, limit=100)
+ resource_map = {}
+ for resource in resources["items"]:
+ path = resource.get("path")
+ # TODO: check if this is still required in the general case (can we rely on "path" being
+ # present?)
+ path = path or get_apigateway_path_for_resource(
+ rest_api_id, resource["id"], region_name=region_name
+ )
+ resource_map[path] = resource
+ return resource_map
+
+
+# TODO: Extract this to a set of rules that have precedence and easy to test individually.
+#
+# https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-method-settings
+# -method-request.html
+# https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-routes.html
+def get_resource_for_path(
+ path: str, method: str, path_map: Dict[str, Dict]
+) -> tuple[Optional[str], Optional[dict]]:
+ matches = []
+ # creates a regex from the input path if there are parameters, e.g /foo/{bar}/baz -> /foo/[
+ # ^\]+/baz, otherwise is a direct match.
+ for api_path, details in path_map.items():
+ api_path_regex = re.sub(r"{[^+]+\+}", r"[^\?#]+", api_path)
+ api_path_regex = re.sub(r"{[^}]+}", r"[^/]+", api_path_regex)
+ if re.match(r"^%s$" % api_path_regex, path):
+ matches.append((api_path, details))
+
+ # if there are no matches, it's not worth to proceed, bail here!
+ if not matches:
+ LOG.debug("No match found for path: '%s' and method: '%s'", path, method)
+ return None, None
+
+ if len(matches) == 1:
+ LOG.debug("Match found for path: '%s' and method: '%s'", path, method)
+ return matches[0]
+
+ # so we have more than one match
+ # /{proxy+} and /api/{proxy+} for inputs like /api/foo/bar
+ # /foo/{param1}/baz and /foo/{param1}/{param2} for inputs like /for/bar/baz
+ proxy_matches = []
+ param_matches = []
+ for match in matches:
+ match_methods = list(match[1].get("resourceMethods", {}).keys())
+ # only look for path matches if the request method is in the resource
+ if method.upper() in match_methods or "ANY" in match_methods:
+ # check if we have an exact match (exact matches take precedence) if the method is the same
+ if match[0] == path:
+ return match
+
+ elif path_matches_pattern(path, match[0]):
+ # parameters can fit in
+ param_matches.append(match)
+ continue
+
+ proxy_matches.append(match)
+
+ if param_matches:
+ # count the amount of parameters, return the one with the least which is the most precise
+ sorted_matches = sorted(param_matches, key=lambda x: x[0].count("{"))
+ LOG.debug("Match found for path: '%s' and method: '%s'", path, method)
+ return sorted_matches[0]
+
+ if proxy_matches:
+ # at this stage, we still have more than one match, but we have an eager example like
+ # /{proxy+} or /api/{proxy+}, so we pick the best match by sorting by length, only if they have a method
+ # that could match
+ sorted_matches = sorted(proxy_matches, key=lambda x: len(x[0]), reverse=True)
+ LOG.debug("Match found for path: '%s' and method: '%s'", path, method)
+ return sorted_matches[0]
+
+ # if there are no matches with a method that would match, return
+ LOG.debug("No match found for method: '%s' for matched path: %s", method, path)
+ return None, None
+
+
+def path_matches_pattern(path, api_path):
+ api_paths = api_path.split("/")
+ paths = path.split("/")
+ reg_check = re.compile(r"{(.*)}")
+ if len(api_paths) != len(paths):
+ return False
+ results = [
+ part == paths[indx]
+ for indx, part in enumerate(api_paths)
+ if reg_check.match(part) is None and part
+ ]
+
+ return len(results) > 0 and all(results)
+
+
+def connect_api_gateway_to_sqs(gateway_name, stage_name, queue_arn, path, account_id, region_name):
+ resources = {}
+ template = APIGATEWAY_SQS_DATA_INBOUND_TEMPLATE
+ resource_path = path.replace("/", "")
+
+ try:
+ arn = parse_arn(queue_arn)
+ queue_name = arn["resource"]
+ sqs_account = arn["account"]
+ sqs_region = arn["region"]
+ except InvalidArnException:
+ queue_name = queue_arn
+ sqs_account = account_id
+ sqs_region = region_name
+
+ partition = get_partition(region_name)
+ resources[resource_path] = [
+ {
+ "httpMethod": "POST",
+ "authorizationType": "NONE",
+ "integrations": [
+ {
+ "type": "AWS",
+ "uri": "arn:%s:apigateway:%s:sqs:path/%s/%s"
+ % (partition, sqs_region, sqs_account, queue_name),
+ "requestTemplates": {"application/json": template},
+ "requestParameters": {
+ "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'"
+ },
+ }
+ ],
+ }
+ ]
+ return resource_utils.create_api_gateway(
+ name=gateway_name,
+ resources=resources,
+ stage_name=stage_name,
+ client=connect_to(aws_access_key_id=sqs_account, region_name=sqs_region).apigateway,
+ )
+
+
+def get_target_resource_details(
+ invocation_context: ApiInvocationContext,
+) -> Tuple[Optional[str], Optional[dict]]:
+ """Look up and return the API GW resource (path pattern + resource dict) for the given invocation context."""
+ path_map = get_rest_api_paths(
+ account_id=invocation_context.account_id,
+ region_name=invocation_context.region_name,
+ rest_api_id=invocation_context.api_id,
+ )
+ relative_path = invocation_context.invocation_path.rstrip("/") or "/"
+ try:
+ extracted_path, resource = get_resource_for_path(
+ path=relative_path, method=invocation_context.method, path_map=path_map
+ )
+ if not extracted_path:
+ return None, None
+ invocation_context.resource = resource
+ invocation_context.resource_path = extracted_path
+ try:
+ invocation_context.path_params = extract_path_params(
+ path=relative_path, extracted_path=extracted_path
+ )
+ except Exception:
+ invocation_context.path_params = {}
+
+ return extracted_path, resource
+
+ except Exception:
+ return None, None
+
+
+def get_target_resource_method(invocation_context: ApiInvocationContext) -> Optional[Dict]:
+ """Look up and return the API GW resource method for the given invocation context."""
+ _, resource = get_target_resource_details(invocation_context)
+ if not resource:
+ return None
+ methods = resource.get("resourceMethods") or {}
+ return methods.get(invocation_context.method.upper()) or methods.get("ANY")
+
+
+def event_type_from_route_key(invocation_context):
+ action = invocation_context.route["RouteKey"]
+ return (
+ "CONNECT"
+ if action == "$connect"
+ else "DISCONNECT"
+ if action == "$disconnect"
+ else "MESSAGE"
+ )
+
+
+def get_event_request_context(invocation_context: ApiInvocationContext):
+ method = invocation_context.method
+ path = invocation_context.path
+ headers = invocation_context.headers
+ integration_uri = invocation_context.integration_uri
+ resource_path = invocation_context.resource_path
+ resource_id = invocation_context.resource_id
+
+ set_api_id_stage_invocation_path(invocation_context)
+ api_id = invocation_context.api_id
+ stage = invocation_context.stage
+
+ if "_user_request_" in invocation_context.raw_uri:
+ full_path = invocation_context.raw_uri.partition("_user_request_")[2]
+ else:
+ full_path = invocation_context.raw_uri.removeprefix(f"/{stage}")
+ relative_path, query_string_params = extract_query_string_params(path=full_path)
+
+ source_ip = invocation_context.auth_identity.get("sourceIp")
+ integration_uri = integration_uri or ""
+ account_id = integration_uri.split(":lambda:path")[-1].split(":function:")[0].split(":")[-1]
+ account_id = account_id or DEFAULT_AWS_ACCOUNT_ID
+ request_context = {
+ "accountId": account_id,
+ "apiId": api_id,
+ "resourcePath": resource_path or relative_path,
+ "domainPrefix": invocation_context.domain_prefix,
+ "domainName": invocation_context.domain_name,
+ "resourceId": resource_id,
+ "requestId": long_uid(),
+ "identity": {
+ "accountId": account_id,
+ "sourceIp": source_ip,
+ "userAgent": headers.get("User-Agent"),
+ },
+ "httpMethod": method,
+ "protocol": "HTTP/1.1",
+ "requestTime": datetime.now(timezone.utc).strftime(REQUEST_TIME_DATE_FORMAT),
+ "requestTimeEpoch": int(time.time() * 1000),
+ "authorizer": {},
+ }
+
+ if invocation_context.is_websocket_request():
+ request_context["connectionId"] = invocation_context.connection_id
+
+ # set "authorizer" and "identity" event attributes from request context
+ authorizer_result = invocation_context.authorizer_result
+ if authorizer_result:
+ request_context["authorizer"] = authorizer_result
+ request_context["identity"].update(invocation_context.auth_identity or {})
+
+ if not is_test_invoke_method(method, path):
+ request_context["path"] = (f"/{stage}" if stage else "") + relative_path
+ request_context["stage"] = stage
+ return request_context
+
+
+def set_api_id_stage_invocation_path(
+ invocation_context: ApiInvocationContext,
+) -> ApiInvocationContext:
+ # skip if all details are already available
+ values = (
+ invocation_context.api_id,
+ invocation_context.stage,
+ invocation_context.path_with_query_string,
+ )
+ if all(values):
+ return invocation_context
+
+ # skip if this is a websocket request
+ if invocation_context.is_websocket_request():
+ return invocation_context
+
+ path = invocation_context.path
+ headers = invocation_context.headers
+
+ path_match = re.search(PATH_REGEX_USER_REQUEST, path)
+ host_header = headers.get(HEADER_LOCALSTACK_EDGE_URL, "") or headers.get("Host") or ""
+ host_match = re.search(HOST_REGEX_EXECUTE_API, host_header)
+ test_invoke_match = re.search(PATH_REGEX_TEST_INVOKE_API, path)
+ if path_match:
+ api_id = path_match.group(1)
+ stage = path_match.group(2)
+ relative_path_w_query_params = "/%s" % path_match.group(3)
+ elif host_match:
+ api_id = extract_api_id_from_hostname_in_url(host_header)
+ stage = path.strip("/").split("/")[0]
+ relative_path_w_query_params = "/%s" % path.lstrip("/").partition("/")[2]
+ elif test_invoke_match:
+ stage = invocation_context.stage
+ api_id = invocation_context.api_id
+ relative_path_w_query_params = invocation_context.path_with_query_string
+ else:
+ raise Exception(
+ f"Unable to extract API Gateway details from request: {path} {dict(headers)}"
+ )
+
+ # set details in invocation context
+ invocation_context.api_id = api_id
+ invocation_context.stage = stage
+ invocation_context.path_with_query_string = relative_path_w_query_params
+ return invocation_context
+
+
+def get_api_account_id_and_region(api_id: str) -> Tuple[Optional[str], Optional[str]]:
+ """Return the region name for the given REST API ID"""
+ for account_id, account in apigateway_backends.items():
+ for region_name, region in account.items():
+ # compare low case keys to avoid case sensitivity issues
+ for key in region.apis.keys():
+ if key.lower() == api_id.lower():
+ return account_id, region_name
+ return None, None
+
+
+def extract_api_id_from_hostname_in_url(hostname: str) -> str:
+ """Extract API ID 'id123' from URLs like https://id123.execute-api.localhost.localstack.cloud:4566"""
+ match = re.match(HOST_REGEX_EXECUTE_API, hostname)
+ return match.group(1)
+
+
+def multi_value_dict_for_list(elements: Union[List, Dict]) -> Dict:
+ temp_mv_dict = defaultdict(list)
+ for key in elements:
+ if isinstance(key, (list, tuple)):
+ key, value = key
+ else:
+ value = elements[key]
+
+ key = to_str(key)
+ temp_mv_dict[key].append(value)
+ return {k: tuple(v) for k, v in temp_mv_dict.items()}
diff --git a/localstack-core/localstack/services/apigateway/legacy/integration.py b/localstack-core/localstack/services/apigateway/legacy/integration.py
new file mode 100644
index 0000000000000..12852fff266af
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/legacy/integration.py
@@ -0,0 +1,1119 @@
+import base64
+import json
+import logging
+import re
+from abc import ABC, abstractmethod
+from functools import lru_cache
+from http import HTTPMethod, HTTPStatus
+from typing import Any, Dict
+from urllib.parse import urljoin
+
+import requests
+from botocore.exceptions import ClientError
+from moto.apigatewayv2.exceptions import BadRequestException
+from requests import Response
+
+from localstack import config
+from localstack.aws.connect import (
+ INTERNAL_REQUEST_PARAMS_HEADER,
+ InternalRequestParameters,
+ connect_to,
+ dump_dto,
+)
+from localstack.constants import APPLICATION_JSON, HEADER_CONTENT_TYPE
+from localstack.services.apigateway.legacy.context import ApiInvocationContext
+from localstack.services.apigateway.legacy.helpers import (
+ ApiGatewayIntegrationError,
+ IntegrationParameters,
+ RequestParametersResolver,
+ ResponseParametersResolver,
+ extract_path_params,
+ extract_query_string_params,
+ get_event_request_context,
+ get_stage_variables,
+ make_error_response,
+ multi_value_dict_for_list,
+)
+from localstack.services.apigateway.legacy.templates import (
+ MappingTemplates,
+ RequestTemplates,
+ ResponseTemplates,
+)
+from localstack.services.stepfunctions.stepfunctions_utils import await_sfn_execution_result
+from localstack.utils import common
+from localstack.utils.aws.arns import ARN_PARTITION_REGEX, extract_region_from_arn, get_partition
+from localstack.utils.aws.aws_responses import (
+ LambdaResponse,
+ request_response_stream,
+ requests_response,
+)
+from localstack.utils.aws.client_types import ServicePrincipal
+from localstack.utils.aws.request_context import mock_aws_request_headers
+from localstack.utils.aws.templating import VtlTemplate
+from localstack.utils.collections import dict_multi_values, remove_attributes
+from localstack.utils.common import make_http_request, to_str
+from localstack.utils.http import add_query_params_to_url, canonicalize_headers, parse_request_data
+from localstack.utils.json import json_safe, try_json
+from localstack.utils.strings import camel_to_snake_case, to_bytes
+
+LOG = logging.getLogger(__name__)
+
+
+class IntegrationAccessError(ApiGatewayIntegrationError):
+ """
+ Error message when an integration cannot be accessed.
+ """
+
+ def __init__(self):
+ super().__init__("Internal server error", 500)
+
+
+class BackendIntegration(ABC):
+ """Abstract base class representing a backend integration"""
+
+ def __init__(self):
+ self.request_templates = RequestTemplates()
+ self.response_templates = ResponseTemplates()
+ self.request_params_resolver = RequestParametersResolver()
+ self.response_params_resolver = ResponseParametersResolver()
+
+ @abstractmethod
+ def invoke(self, invocation_context: ApiInvocationContext):
+ pass
+
+ @classmethod
+ def _create_response(cls, status_code, headers, data=""):
+ response = Response()
+ response.status_code = status_code
+ response.headers = headers
+ response._content = data
+ return response
+
+ @classmethod
+ def apply_request_parameters(
+ cls, integration_params: IntegrationParameters, headers: Dict[str, Any]
+ ):
+ for k, v in integration_params.get("headers").items():
+ headers.update({k: v})
+
+ @classmethod
+ def apply_response_parameters(
+ cls, invocation_context: ApiInvocationContext, response: Response
+ ):
+ integration = invocation_context.integration
+ integration_responses = integration.get("integrationResponses") or {}
+ if not integration_responses:
+ return response
+ entries = list(integration_responses.keys())
+ return_code = str(response.status_code)
+ if return_code not in entries:
+ if len(entries) > 1:
+ LOG.info("Found multiple integration response status codes: %s", entries)
+ return response
+ return_code = entries[0]
+ response_params = integration_responses[return_code].get("responseParameters", {})
+ for key, value in response_params.items():
+ # TODO: add support for method.response.body, etc ...
+ if str(key).lower().startswith("method.response.header."):
+ header_name = key[len("method.response.header.") :]
+ response.headers[header_name] = value.strip("'")
+ return response
+
+ @classmethod
+ def render_template_selection_expression(cls, invocation_context: ApiInvocationContext):
+ integration = invocation_context.integration
+ template_selection_expression = integration.get("templateSelectionExpression")
+
+ # AWS template selection relies on the content type
+ # to select an input template or output mapping AND template selection expressions.
+ # All of them will fall back to the $default template if a matching template is not found.
+ if not template_selection_expression:
+ content_type = invocation_context.headers.get(HEADER_CONTENT_TYPE, APPLICATION_JSON)
+ if integration.get("RequestTemplates", {}).get(content_type):
+ return content_type
+ return "$default"
+
+ data = try_json(invocation_context.data)
+ variables = {
+ "request": {
+ "header": invocation_context.headers,
+ "querystring": invocation_context.query_params(),
+ "body": data,
+ "context": invocation_context.context or {},
+ "stage_variables": invocation_context.stage_variables or {},
+ }
+ }
+ return VtlTemplate().render_vtl(template_selection_expression, variables) or "$default"
+
+
+@lru_cache(maxsize=64)
+def get_service_factory(region_name: str, role_arn: str):
+ if role_arn:
+ return connect_to.with_assumed_role(
+ role_arn=role_arn,
+ region_name=region_name,
+ service_principal=ServicePrincipal.apigateway,
+ session_name="BackplaneAssumeRoleSession",
+ )
+ else:
+ return connect_to(region_name=region_name)
+
+
+@lru_cache(maxsize=64)
+def get_internal_mocked_headers(
+ service_name: str,
+ region_name: str,
+ source_arn: str,
+ role_arn: str | None,
+) -> dict[str, str]:
+ if role_arn:
+ access_key_id = (
+ connect_to(region_name=region_name)
+ .sts.request_metadata(service_principal=ServicePrincipal.apigateway)
+ .assume_role(RoleArn=role_arn, RoleSessionName="BackplaneAssumeRoleSession")[
+ "Credentials"
+ ]["AccessKeyId"]
+ )
+ else:
+ access_key_id = None
+ headers = mock_aws_request_headers(
+ service=service_name, aws_access_key_id=access_key_id, region_name=region_name
+ )
+
+ dto = InternalRequestParameters(
+ service_principal=ServicePrincipal.apigateway, source_arn=source_arn
+ )
+ headers[INTERNAL_REQUEST_PARAMS_HEADER] = dump_dto(dto)
+ return headers
+
+
+def get_source_arn(invocation_context: ApiInvocationContext):
+ return f"arn:{get_partition(invocation_context.region_name)}:execute-api:{invocation_context.region_name}:{invocation_context.account_id}:{invocation_context.api_id}/{invocation_context.stage}/{invocation_context.method}{invocation_context.path}"
+
+
+def call_lambda(
+ function_arn: str, event: bytes, asynchronous: bool, invocation_context: ApiInvocationContext
+) -> str:
+ clients = get_service_factory(
+ region_name=extract_region_from_arn(function_arn),
+ role_arn=invocation_context.integration.get("credentials"),
+ )
+ inv_result = clients.lambda_.request_metadata(
+ service_principal=ServicePrincipal.apigateway, source_arn=get_source_arn(invocation_context)
+ ).invoke(
+ FunctionName=function_arn,
+ Payload=event,
+ InvocationType="Event" if asynchronous else "RequestResponse",
+ )
+ if payload := inv_result.get("Payload"):
+ payload = to_str(payload.read())
+ return payload
+ return ""
+
+
+class LambdaProxyIntegration(BackendIntegration):
+ @classmethod
+ def update_content_length(cls, response: Response):
+ if response and response.content is not None:
+ response.headers["Content-Length"] = str(len(response.content))
+
+ @classmethod
+ def lambda_result_to_response(cls, result) -> LambdaResponse:
+ response = LambdaResponse()
+ response.headers.update({"content-type": "application/json"})
+ parsed_result = result if isinstance(result, dict) else json.loads(str(result or "{}"))
+ parsed_result = common.json_safe(parsed_result)
+ parsed_result = {} if parsed_result is None else parsed_result
+
+ if set(parsed_result) - {
+ "body",
+ "statusCode",
+ "headers",
+ "isBase64Encoded",
+ "multiValueHeaders",
+ }:
+ LOG.warning(
+ 'Lambda output should follow the next JSON format: { "isBase64Encoded": true|false, "statusCode": httpStatusCode, "headers": { "headerName": "headerValue", ... },"body": "..."}\n Lambda output: %s',
+ parsed_result,
+ )
+ response.status_code = 502
+ response._content = json.dumps({"message": "Internal server error"})
+ return response
+
+ response.status_code = int(parsed_result.get("statusCode", 200))
+ parsed_headers = parsed_result.get("headers", {})
+ if parsed_headers is not None:
+ response.headers.update(parsed_headers)
+ try:
+ result_body = parsed_result.get("body")
+ if isinstance(result_body, dict):
+ response._content = json.dumps(result_body)
+ else:
+ body_bytes = to_bytes(to_str(result_body or ""))
+ if parsed_result.get("isBase64Encoded", False):
+ body_bytes = base64.b64decode(body_bytes)
+ response._content = body_bytes
+ except Exception as e:
+ LOG.warning("Couldn't set Lambda response content: %s", e)
+ response._content = "{}"
+ response.multi_value_headers = parsed_result.get("multiValueHeaders") or {}
+ return response
+
+ @staticmethod
+ def fix_proxy_path_params(path_params):
+ proxy_path_param_value = path_params.get("proxy+")
+ if not proxy_path_param_value:
+ return
+ del path_params["proxy+"]
+ path_params["proxy"] = proxy_path_param_value
+
+ @staticmethod
+ def validate_integration_method(invocation_context: ApiInvocationContext):
+ if invocation_context.integration["httpMethod"] != HTTPMethod.POST:
+ raise ApiGatewayIntegrationError("Internal server error", status_code=500)
+
+ @classmethod
+ def construct_invocation_event(
+ cls, method, path, headers, data, query_string_params=None, is_base64_encoded=False
+ ):
+ query_string_params = query_string_params or parse_request_data(method, path, "")
+
+ single_value_query_string_params = {
+ k: v[-1] if isinstance(v, list) else v for k, v in query_string_params.items()
+ }
+ # Some headers get capitalized like in CloudFront, see
+ # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/add-origin-custom-headers.html#add-origin-custom-headers-forward-authorization
+ # It seems AWS_PROXY lambda integrations are behind cloudfront, as seen by the returned headers in AWS
+ to_capitalize: list[str] = ["authorization"] # some headers get capitalized
+ headers = {
+ k.capitalize() if k.lower() in to_capitalize else k: v for k, v in headers.items()
+ }
+
+ # AWS canonical header names, converting them to lower-case
+ headers = canonicalize_headers(headers)
+
+ return {
+ "path": "/" + path.lstrip("/"),
+ "headers": headers,
+ "multiValueHeaders": multi_value_dict_for_list(headers),
+ "body": data,
+ "isBase64Encoded": is_base64_encoded,
+ "httpMethod": method,
+ "queryStringParameters": single_value_query_string_params or None,
+ "multiValueQueryStringParameters": dict_multi_values(query_string_params) or None,
+ }
+
+ @classmethod
+ def process_apigateway_invocation(
+ cls,
+ func_arn,
+ path,
+ payload,
+ invocation_context: ApiInvocationContext,
+ query_string_params=None,
+ ) -> str:
+ if (path_params := invocation_context.path_params) is None:
+ path_params = {}
+ if (request_context := invocation_context.context) is None:
+ request_context = {}
+ try:
+ resource_path = invocation_context.resource_path or path
+ event = cls.construct_invocation_event(
+ invocation_context.method,
+ path,
+ invocation_context.headers,
+ payload,
+ query_string_params,
+ invocation_context.is_data_base64_encoded,
+ )
+ path_params = dict(path_params)
+ cls.fix_proxy_path_params(path_params)
+ event["pathParameters"] = path_params
+ event["resource"] = resource_path
+ event["requestContext"] = request_context
+ event["stageVariables"] = invocation_context.stage_variables
+ LOG.debug(
+ "Running Lambda function %s from API Gateway invocation: %s %s",
+ func_arn,
+ invocation_context.method or "GET",
+ path,
+ )
+ asynchronous = invocation_context.headers.get("X-Amz-Invocation-Type") == "'Event'"
+ return call_lambda(
+ function_arn=func_arn,
+ event=to_bytes(json.dumps(event)),
+ asynchronous=asynchronous,
+ invocation_context=invocation_context,
+ )
+ except ClientError as e:
+ raise IntegrationAccessError() from e
+ except Exception as e:
+ LOG.warning(
+ "Unable to run Lambda function on API Gateway message: %s",
+ e,
+ )
+
+ def invoke(self, invocation_context: ApiInvocationContext):
+ self.validate_integration_method(invocation_context)
+ uri = (
+ invocation_context.integration.get("uri")
+ or invocation_context.integration.get("integrationUri")
+ or ""
+ )
+ invocation_context.context = get_event_request_context(invocation_context)
+ relative_path, query_string_params = extract_query_string_params(
+ path=invocation_context.path_with_query_string
+ )
+ try:
+ path_params = extract_path_params(
+ path=relative_path, extracted_path=invocation_context.resource_path
+ )
+ invocation_context.path_params = path_params
+ except Exception:
+ pass
+
+ func_arn = uri
+ if ":lambda:path" in uri:
+ func_arn = uri.split(":lambda:path")[1].split("functions/")[1].split("/invocations")[0]
+
+ if invocation_context.authorizer_type:
+ invocation_context.context["authorizer"] = invocation_context.authorizer_result
+
+ payload = self.request_templates.render(invocation_context)
+
+ result = self.process_apigateway_invocation(
+ func_arn=func_arn,
+ path=relative_path,
+ payload=payload,
+ invocation_context=invocation_context,
+ query_string_params=query_string_params,
+ )
+
+ response = LambdaResponse()
+ response.headers.update({"content-type": "application/json"})
+ parsed_result = json.loads(str(result or "{}"))
+ parsed_result = common.json_safe(parsed_result)
+ parsed_result = {} if parsed_result is None else parsed_result
+
+ if set(parsed_result) - {
+ "body",
+ "statusCode",
+ "headers",
+ "isBase64Encoded",
+ "multiValueHeaders",
+ }:
+ LOG.warning(
+ 'Lambda output should follow the next JSON format: { "isBase64Encoded": true|false, "statusCode": httpStatusCode, "headers": { "headerName": "headerValue", ... },"body": "..."}\n Lambda output: %s',
+ parsed_result,
+ )
+ response.status_code = 502
+ response._content = json.dumps({"message": "Internal server error"})
+ return response
+
+ response.status_code = int(parsed_result.get("statusCode", 200))
+ parsed_headers = parsed_result.get("headers", {})
+ if parsed_headers is not None:
+ response.headers.update(parsed_headers)
+ try:
+ result_body = parsed_result.get("body")
+ if isinstance(result_body, dict):
+ response._content = json.dumps(result_body)
+ else:
+ body_bytes = to_bytes(result_body or "")
+ if parsed_result.get("isBase64Encoded", False):
+ body_bytes = base64.b64decode(body_bytes)
+ response._content = body_bytes
+ except Exception as e:
+ LOG.warning("Couldn't set Lambda response content: %s", e)
+ response._content = "{}"
+ response.multi_value_headers = parsed_result.get("multiValueHeaders") or {}
+
+ # apply custom response template
+ self.update_content_length(response)
+ invocation_context.response = response
+
+ return invocation_context.response
+
+
+class LambdaIntegration(BackendIntegration):
+ def invoke(self, invocation_context: ApiInvocationContext):
+ invocation_context.stage_variables = get_stage_variables(invocation_context)
+ headers = invocation_context.headers
+
+ # resolve integration parameters
+ integration_parameters = self.request_params_resolver.resolve(context=invocation_context)
+ headers.update(integration_parameters.get("headers", {}))
+
+ if invocation_context.authorizer_type:
+ invocation_context.context["authorizer"] = invocation_context.authorizer_result
+
+ func_arn = self._lambda_integration_uri(invocation_context)
+ # integration type "AWS" is only supported for WebSocket APIs and REST
+ # API (v1), but the template selection expression is only supported for
+ # Websockets
+ if invocation_context.is_websocket_request():
+ template_key = self.render_template_selection_expression(invocation_context)
+ payload = self.request_templates.render(invocation_context, template_key)
+ else:
+ payload = self.request_templates.render(invocation_context)
+
+ asynchronous = headers.get("X-Amz-Invocation-Type", "").strip("'") == "Event"
+ try:
+ result = call_lambda(
+ function_arn=func_arn,
+ event=to_bytes(payload or ""),
+ asynchronous=asynchronous,
+ invocation_context=invocation_context,
+ )
+ except ClientError as e:
+ raise IntegrationAccessError() from e
+
+ # default lambda status code is 200
+ response = LambdaResponse()
+ response.status_code = 200
+ response._content = result
+
+ if asynchronous:
+ response._content = ""
+
+ # response template
+ invocation_context.response = response
+ self.response_templates.render(invocation_context)
+ invocation_context.response.headers["Content-Length"] = str(len(response.content or ""))
+
+ headers = self.response_params_resolver.resolve(invocation_context)
+ invocation_context.response.headers.update(headers)
+
+ return invocation_context.response
+
+ def _lambda_integration_uri(self, invocation_context: ApiInvocationContext):
+ """
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/aws-api-gateway-stage-variables-reference.html
+ """
+ uri = (
+ invocation_context.integration.get("uri")
+ or invocation_context.integration.get("integrationUri")
+ or ""
+ )
+ variables = {"stageVariables": invocation_context.stage_variables}
+ uri = VtlTemplate().render_vtl(uri, variables)
+ if ":lambda:path" in uri:
+ uri = uri.split(":lambda:path")[1].split("functions/")[1].split("/invocations")[0]
+ return uri
+
+
+class KinesisIntegration(BackendIntegration):
+ def invoke(self, invocation_context: ApiInvocationContext):
+ integration = invocation_context.integration
+ integration_type_orig = integration.get("type") or integration.get("integrationType") or ""
+ integration_type = integration_type_orig.upper()
+ uri = integration.get("uri") or integration.get("integrationUri") or ""
+ integration_subtype = integration.get("integrationSubtype")
+
+ if uri.endswith("kinesis:action/PutRecord") or integration_subtype == "Kinesis-PutRecord":
+ target = "Kinesis_20131202.PutRecord"
+ elif uri.endswith("kinesis:action/PutRecords"):
+ target = "Kinesis_20131202.PutRecords"
+ elif uri.endswith("kinesis:action/ListStreams"):
+ target = "Kinesis_20131202.ListStreams"
+ else:
+ LOG.info(
+ "Unexpected API Gateway integration URI '%s' for integration type %s",
+ uri,
+ integration_type,
+ )
+ target = ""
+
+ try:
+ # xXx this "event" request context is used in multiple places, we probably
+ # want to refactor this into a model class.
+ # I'd argue we should not make a decision on the event_request_context inside the integration because,
+ # it's different between API types (REST, HTTP, WebSocket) and per event version
+ invocation_context.context = get_event_request_context(invocation_context)
+ invocation_context.stage_variables = get_stage_variables(invocation_context)
+
+ # integration type "AWS" is only supported for WebSocket APIs and REST
+ # API (v1), but the template selection expression is only supported for
+ # Websockets
+ if invocation_context.is_websocket_request():
+ template_key = self.render_template_selection_expression(invocation_context)
+ payload = self.request_templates.render(invocation_context, template_key)
+ else:
+ # For HTTP APIs with a specified integration_subtype,
+ # a key-value map specifying parameters that are passed to AWS_PROXY integrations
+ if integration_type == "AWS_PROXY" and integration_subtype == "Kinesis-PutRecord":
+ payload = self._create_request_parameters(invocation_context)
+ else:
+ payload = self.request_templates.render(invocation_context)
+
+ except Exception as e:
+ LOG.warning("Unable to convert API Gateway payload to str", e)
+ raise
+
+ # forward records to target kinesis stream
+ headers = get_internal_mocked_headers(
+ service_name="kinesis",
+ region_name=invocation_context.region_name,
+ role_arn=invocation_context.integration.get("credentials"),
+ source_arn=get_source_arn(invocation_context),
+ )
+ headers["X-Amz-Target"] = target
+
+ result = common.make_http_request(
+ url=config.internal_service_url(), data=payload, headers=headers, method="POST"
+ )
+
+ # apply response template
+ invocation_context.response = result
+ self.response_templates.render(invocation_context)
+ return invocation_context.response
+
+ @classmethod
+ def _validate_required_params(cls, request_parameters: Dict[str, Any]) -> None:
+ if not request_parameters:
+ raise BadRequestException("Missing required parameters")
+ # https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-aws-services-reference.html#Kinesis-PutRecord
+ stream_name = request_parameters.get("StreamName")
+ partition_key = request_parameters.get("PartitionKey")
+ data = request_parameters.get("Data")
+
+ if not stream_name:
+ raise BadRequestException("StreamName")
+
+ if not partition_key:
+ raise BadRequestException("PartitionKey")
+
+ if not data:
+ raise BadRequestException("Data")
+
+ def _create_request_parameters(
+ self, invocation_context: ApiInvocationContext
+ ) -> Dict[str, Any]:
+ request_parameters = invocation_context.integration.get("requestParameters", {})
+ self._validate_required_params(request_parameters)
+
+ variables = {
+ "request": {
+ "header": invocation_context.headers,
+ "querystring": invocation_context.query_params(),
+ "body": invocation_context.data_as_string(),
+ "context": invocation_context.context or {},
+ "stage_variables": invocation_context.stage_variables or {},
+ }
+ }
+
+ if invocation_context.headers.get("Content-Type") == "application/json":
+ variables["request"]["body"] = json.loads(invocation_context.data_as_string())
+ else:
+ # AWS parity no content type still yields a valid response from Kinesis
+ variables["request"]["body"] = try_json(invocation_context.data_as_string())
+
+ # Required parameters
+ payload = {
+ "StreamName": VtlTemplate().render_vtl(request_parameters.get("StreamName"), variables),
+ "Data": VtlTemplate().render_vtl(request_parameters.get("Data"), variables),
+ "PartitionKey": VtlTemplate().render_vtl(
+ request_parameters.get("PartitionKey"), variables
+ ),
+ }
+ # Optional Parameters
+ if "ExplicitHashKey" in request_parameters:
+ payload["ExplicitHashKey"] = VtlTemplate().render_vtl(
+ request_parameters.get("ExplicitHashKey"), variables
+ )
+ if "SequenceNumberForOrdering" in request_parameters:
+ payload["SequenceNumberForOrdering"] = VtlTemplate().render_vtl(
+ request_parameters.get("SequenceNumberForOrdering"), variables
+ )
+ # TODO: XXX we don't support the Region parameter
+ # if "Region" in request_parameters:
+ # payload["Region"] = VtlTemplate().render_vtl(
+ # request_parameters.get("Region"), variables
+ # )
+ return json.dumps(payload)
+
+
+class DynamoDBIntegration(BackendIntegration):
+ def invoke(self, invocation_context: ApiInvocationContext):
+ # TODO we might want to do it plain http instead of using boto here, like kinesis
+ integration = invocation_context.integration
+ uri = integration.get("uri") or integration.get("integrationUri") or ""
+
+ # example: arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection
+ action = uri.split(":dynamodb:action/")[1].split("&")[0]
+
+ # render request template
+ payload = self.request_templates.render(invocation_context)
+ payload = json.loads(payload)
+
+ # determine target method via reflection
+ clients = get_service_factory(
+ region_name=invocation_context.region_name,
+ role_arn=invocation_context.integration.get("credentials"),
+ )
+ dynamo_client = clients.dynamodb.request_metadata(
+ service_principal=ServicePrincipal.apigateway,
+ source_arn=get_source_arn(invocation_context),
+ )
+ method_name = camel_to_snake_case(action)
+ client_method = getattr(dynamo_client, method_name, None)
+ if not client_method:
+ raise Exception(f"Unsupported action {action} in API Gateway integration URI {uri}")
+
+ # run request against DynamoDB backend
+ try:
+ response = client_method(**payload)
+ except ClientError as e:
+ response = e.response
+ # The request body is packed into the "Error" field. To make the response match AWS, we will remove that
+ # field and merge with the response dict
+ error = response.pop("Error", {})
+ error.pop("Code", None) # the Code is also something not relayed
+ response |= error
+
+ status_code = response.get("ResponseMetadata", {}).get("HTTPStatusCode", 200)
+ # apply response templates
+ response_content = json.dumps(remove_attributes(response, ["ResponseMetadata"]))
+ response_obj = requests_response(content=response_content)
+ response = self.response_templates.render(invocation_context, response=response_obj)
+
+ # construct final response
+ # TODO: set response header based on response templates
+ headers = {HEADER_CONTENT_TYPE: APPLICATION_JSON}
+ response = requests_response(response, headers=headers, status_code=status_code)
+
+ return response
+
+
+class S3Integration(BackendIntegration):
+ # target ARN patterns
+ TARGET_REGEX_PATH_S3_URI = rf"{ARN_PARTITION_REGEX}:apigateway:[a-zA-Z0-9\-]+:s3:path/(?P[^/]+)/(?P.+)$"
+ TARGET_REGEX_ACTION_S3_URI = rf"{ARN_PARTITION_REGEX}:apigateway:[a-zA-Z0-9\-]+:s3:action/(?:GetObject&Bucket\=(?P[^&]+)&Key\=(?P.+))$"
+
+ def invoke(self, invocation_context: ApiInvocationContext):
+ invocation_path = invocation_context.path_with_query_string
+ integration = invocation_context.integration
+ path_params = invocation_context.path_params
+ relative_path, query_string_params = extract_query_string_params(path=invocation_path)
+ uri = integration.get("uri") or integration.get("integrationUri") or ""
+
+ s3 = connect_to().s3
+ uri = apply_request_parameters(
+ uri,
+ integration=integration,
+ path_params=path_params,
+ query_params=query_string_params,
+ )
+ uri_match = re.match(self.TARGET_REGEX_PATH_S3_URI, uri) or re.match(
+ self.TARGET_REGEX_ACTION_S3_URI, uri
+ )
+ if not uri_match:
+ msg = "Request URI does not match s3 specifications"
+ LOG.warning(msg)
+ return make_error_response(msg, 400)
+
+ bucket, object_key = uri_match.group("bucket", "object")
+ LOG.debug("Getting request for bucket %s object %s", bucket, object_key)
+
+ action = None
+ invoke_args = {"Bucket": bucket, "Key": object_key}
+ match invocation_context.method:
+ case HTTPMethod.GET:
+ action = s3.get_object
+ case HTTPMethod.PUT:
+ invoke_args["Body"] = invocation_context.data
+ action = s3.put_object
+ case HTTPMethod.DELETE:
+ action = s3.delete_object
+ case _:
+ make_error_response(
+ "The specified method is not allowed against this resource.", 405
+ )
+
+ try:
+ object = action(**invoke_args)
+ except s3.exceptions.NoSuchKey:
+ msg = f"Object {object_key} not found"
+ LOG.debug(msg)
+ return make_error_response(msg, 404)
+
+ headers = mock_aws_request_headers(
+ service="s3",
+ aws_access_key_id=invocation_context.account_id,
+ region_name=invocation_context.region_name,
+ )
+
+ if object.get("ContentType"):
+ headers["Content-Type"] = object["ContentType"]
+
+ # stream used so large files do not fill memory
+ if body := object.get("Body"):
+ response = request_response_stream(stream=body, headers=headers)
+ else:
+ response = requests_response(content="", headers=headers)
+ return response
+
+
+class HTTPIntegration(BackendIntegration):
+ @staticmethod
+ def _set_http_apigw_headers(headers: Dict[str, Any], invocation_context: ApiInvocationContext):
+ del headers["host"]
+ headers["x-amzn-apigateway-api-id"] = invocation_context.api_id
+ return headers
+
+ def invoke(self, invocation_context: ApiInvocationContext):
+ invocation_path = invocation_context.path_with_query_string
+ integration = invocation_context.integration
+ path_params = invocation_context.path_params
+ method = invocation_context.method
+ headers = invocation_context.headers
+
+ relative_path, query_string_params = extract_query_string_params(path=invocation_path)
+ uri = integration.get("uri") or integration.get("integrationUri") or ""
+
+ # resolve integration parameters
+ integration_parameters = self.request_params_resolver.resolve(context=invocation_context)
+ headers.update(integration_parameters.get("headers", {}))
+ self._set_http_apigw_headers(headers, invocation_context)
+
+ if ":servicediscovery:" in uri:
+ # check if this is a servicediscovery integration URI
+ client = connect_to().servicediscovery
+ service_id = uri.split("/")[-1]
+ instances = client.list_instances(ServiceId=service_id)["Instances"]
+ instance = (instances or [None])[0]
+ if instance and instance.get("Id"):
+ uri = "http://%s/%s" % (instance["Id"], invocation_path.lstrip("/"))
+
+ # apply custom request template
+ invocation_context.context = get_event_request_context(invocation_context)
+ invocation_context.stage_variables = get_stage_variables(invocation_context)
+ payload = self.request_templates.render(invocation_context)
+
+ if isinstance(payload, dict):
+ payload = json.dumps(payload)
+
+ # https://docs.aws.amazon.com/apigateway/latest/developerguide/aws-api-gateway-stage-variables-reference.html
+ # HTTP integration URIs
+ #
+ # A stage variable can be used as part of an HTTP integration URL, as shown in the following examples:
+ #
+ # A full URI without protocol β http://${stageVariables.}
+ # A full domain β http://${stageVariables.}/resource/operation
+ # A subdomain β http://${stageVariables.}.example.com/resource/operation
+ # A path β http://example.com/${stageVariables.}/bar
+ # A query string β http://example.com/foo?q=${stageVariables.}
+ render_vars = {"stageVariables": invocation_context.stage_variables}
+ rendered_uri = VtlTemplate().render_vtl(uri, render_vars)
+
+ uri = apply_request_parameters(
+ rendered_uri,
+ integration=integration,
+ path_params=path_params,
+ query_params=query_string_params,
+ )
+ result = requests.request(method=method, url=uri, data=payload, headers=headers)
+ if not result.ok:
+ LOG.debug(
+ "Upstream response from <%s> %s returned with status code: %s",
+ method,
+ uri,
+ result.status_code,
+ )
+ # apply custom response template for non-proxy integration
+ invocation_context.response = result
+ if integration["type"] != "HTTP_PROXY":
+ self.response_templates.render(invocation_context)
+ return invocation_context.response
+
+
+class SQSIntegration(BackendIntegration):
+ def invoke(self, invocation_context: ApiInvocationContext):
+ integration = invocation_context.integration
+ uri = integration.get("uri") or integration.get("integrationUri") or ""
+ account_id, queue = uri.split("/")[-2:]
+ region_name = uri.split(":")[3]
+
+ headers = get_internal_mocked_headers(
+ service_name="sqs",
+ region_name=region_name,
+ role_arn=invocation_context.integration.get("credentials"),
+ source_arn=get_source_arn(invocation_context),
+ )
+
+ # integration parameters can override headers
+ integration_parameters = self.request_params_resolver.resolve(context=invocation_context)
+ headers.update(integration_parameters.get("headers", {}))
+ if "Accept" not in headers:
+ headers["Accept"] = "application/json"
+
+ if invocation_context.is_websocket_request():
+ template_key = self.render_template_selection_expression(invocation_context)
+ payload = self.request_templates.render(invocation_context, template_key)
+ else:
+ payload = self.request_templates.render(invocation_context)
+
+ # not sure what the purpose of this is, but it's in the original code
+ # TODO: check if this is still needed
+ if "GetQueueUrl" in payload or "CreateQueue" in payload:
+ new_request = f"{payload}&QueueName={queue}"
+ else:
+ queue_url = f"{config.internal_service_url()}/queue/{region_name}/{account_id}/{queue}"
+ new_request = f"{payload}&QueueUrl={queue_url}"
+
+ url = urljoin(config.internal_service_url(), f"/queue/{region_name}/{account_id}/{queue}")
+ response = common.make_http_request(url, method="POST", headers=headers, data=new_request)
+
+ # apply response template
+ invocation_context.response = response
+ response._content = self.response_templates.render(invocation_context)
+ return response
+
+
+class SNSIntegration(BackendIntegration):
+ def invoke(self, invocation_context: ApiInvocationContext) -> Response:
+ # TODO: check if the logic below is accurate - cover with snapshot tests!
+ invocation_context.context = get_event_request_context(invocation_context)
+ invocation_context.stage_variables = get_stage_variables(invocation_context)
+ integration = invocation_context.integration
+ uri = integration.get("uri") or integration.get("integrationUri") or ""
+
+ try:
+ if invocation_context.is_websocket_request():
+ template_key = self.render_template_selection_expression(invocation_context)
+ payload = self.request_templates.render(invocation_context, template_key)
+ else:
+ payload = self.request_templates.render(invocation_context)
+ except Exception as e:
+ LOG.warning("Failed to apply template for SNS integration", e)
+ raise
+ region_name = uri.split(":")[3]
+ headers = mock_aws_request_headers(
+ service="sns", aws_access_key_id=invocation_context.account_id, region_name=region_name
+ )
+ response = make_http_request(
+ config.internal_service_url(), method="POST", headers=headers, data=payload
+ )
+
+ invocation_context.response = response
+ response._content = self.response_templates.render(invocation_context)
+ return self.apply_response_parameters(invocation_context, response)
+
+
+class StepFunctionIntegration(BackendIntegration):
+ @classmethod
+ def _validate_required_params(cls, request_parameters: Dict[str, Any]) -> None:
+ if not request_parameters:
+ raise BadRequestException("Missing required parameters")
+ # stateMachineArn and input are required
+ state_machine_arn_param = request_parameters.get("StateMachineArn")
+ input_param = request_parameters.get("Input")
+
+ if not state_machine_arn_param:
+ raise BadRequestException("StateMachineArn")
+
+ if not input_param:
+ raise BadRequestException("Input")
+
+ def invoke(self, invocation_context: ApiInvocationContext):
+ uri = (
+ invocation_context.integration.get("uri")
+ or invocation_context.integration.get("integrationUri")
+ or ""
+ )
+ action = uri.split("/")[-1]
+
+ if invocation_context.integration.get("IntegrationType") == "AWS_PROXY":
+ payload = self._create_request_parameters(invocation_context)
+ elif APPLICATION_JSON in invocation_context.integration.get("requestTemplates", {}):
+ payload = self.request_templates.render(invocation_context)
+ payload = json.loads(payload)
+ else:
+ payload = json.loads(invocation_context.data)
+
+ client = get_service_factory(
+ region_name=invocation_context.region_name,
+ role_arn=invocation_context.integration.get("credentials"),
+ ).stepfunctions
+
+ if isinstance(payload.get("input"), dict):
+ payload["input"] = json.dumps(payload["input"])
+
+ # Hot fix since step functions local package responses: Unsupported Operation: 'StartSyncExecution'
+ method_name = (
+ camel_to_snake_case(action) if action != "StartSyncExecution" else "start_execution"
+ )
+
+ try:
+ # call method on step function client
+ method = getattr(client, method_name)
+ except AttributeError:
+ msg = f"Invalid step function action: {method_name}"
+ LOG.error(msg)
+ return StepFunctionIntegration._create_response(
+ HTTPStatus.BAD_REQUEST.value,
+ headers={"Content-Type": APPLICATION_JSON},
+ data=json.dumps({"message": msg}),
+ )
+
+ result = method(**payload)
+ result = json_safe(remove_attributes(result, ["ResponseMetadata"]))
+ response = StepFunctionIntegration._create_response(
+ HTTPStatus.OK.value,
+ mock_aws_request_headers(
+ "stepfunctions",
+ aws_access_key_id=invocation_context.account_id,
+ region_name=invocation_context.region_name,
+ ),
+ data=json.dumps(result),
+ )
+ if action == "StartSyncExecution":
+ # poll for the execution result and return it
+ result = await_sfn_execution_result(result["executionArn"])
+ result_status = result.get("status")
+ if result_status != "SUCCEEDED":
+ return StepFunctionIntegration._create_response(
+ HTTPStatus.INTERNAL_SERVER_ERROR.value,
+ headers={"Content-Type": APPLICATION_JSON},
+ data=json.dumps(
+ {
+ "message": "StepFunctions execution %s failed with status '%s'"
+ % (result["executionArn"], result_status)
+ }
+ ),
+ )
+
+ result = json_safe(result)
+ response = requests_response(content=result)
+
+ # apply response templates
+ invocation_context.response = response
+ response._content = self.response_templates.render(invocation_context)
+ return response
+
+ def _create_request_parameters(self, invocation_context):
+ request_parameters = invocation_context.integration.get("requestParameters", {})
+ self._validate_required_params(request_parameters)
+
+ variables = {
+ "request": {
+ "header": invocation_context.headers,
+ "querystring": invocation_context.query_params(),
+ "body": invocation_context.data_as_string(),
+ "context": invocation_context.context or {},
+ "stage_variables": invocation_context.stage_variables or {},
+ }
+ }
+ rendered_input = VtlTemplate().render_vtl(request_parameters.get("Input"), variables)
+ return {
+ "stateMachineArn": request_parameters.get("StateMachineArn"),
+ "input": rendered_input,
+ }
+
+
+class MockIntegration(BackendIntegration):
+ @classmethod
+ def check_passthrough_behavior(cls, passthrough_behavior: str, request_template: str):
+ return MappingTemplates(passthrough_behavior).check_passthrough_behavior(request_template)
+
+ def invoke(self, invocation_context: ApiInvocationContext) -> Response:
+ passthrough_behavior = invocation_context.integration.get("passthroughBehavior") or ""
+ request_template = invocation_context.integration.get("requestTemplates", {}).get(
+ invocation_context.headers.get(HEADER_CONTENT_TYPE, APPLICATION_JSON)
+ )
+
+ # based on the configured passthrough behavior and the existence of template or not,
+ # we proceed calling the integration or raise an exception.
+ try:
+ self.check_passthrough_behavior(passthrough_behavior, request_template)
+ except MappingTemplates.UnsupportedMediaType:
+ return MockIntegration._create_response(
+ HTTPStatus.UNSUPPORTED_MEDIA_TYPE.value,
+ headers={"Content-Type": APPLICATION_JSON},
+ data=json.dumps({"message": f"{HTTPStatus.UNSUPPORTED_MEDIA_TYPE.phrase}"}),
+ )
+
+ # request template rendering
+ request_payload = self.request_templates.render(invocation_context)
+
+ # mapping is done based on "statusCode" field, we default to 200
+ status_code = 200
+ if invocation_context.headers.get(HEADER_CONTENT_TYPE) == APPLICATION_JSON:
+ try:
+ mock_response = json.loads(request_payload)
+ status_code = mock_response.get("statusCode", status_code)
+ except Exception as e:
+ LOG.warning("failed to deserialize request payload after transformation: %s", e)
+ http_status = HTTPStatus(500)
+ return MockIntegration._create_response(
+ http_status.value,
+ headers={"Content-Type": APPLICATION_JSON},
+ data=json.dumps({"message": f"{http_status.phrase}"}),
+ )
+
+ # response template
+ response = MockIntegration._create_response(
+ status_code, invocation_context.headers, data=request_payload
+ )
+ response._content = self.response_templates.render(invocation_context, response=response)
+ # apply response parameters
+ response = self.apply_response_parameters(invocation_context, response)
+ if not invocation_context.headers.get(HEADER_CONTENT_TYPE):
+ invocation_context.headers.update({HEADER_CONTENT_TYPE: APPLICATION_JSON})
+ return response
+
+
+# TODO: remove once we migrate all usages to `apply_request_parameters` on BackendIntegration
+def apply_request_parameters(
+ uri: str, integration: Dict[str, Any], path_params: Dict[str, str], query_params: Dict[str, str]
+):
+ request_parameters = integration.get("requestParameters")
+ uri = uri or integration.get("uri") or integration.get("integrationUri") or ""
+ if request_parameters:
+ for key in path_params:
+ # check if path_params is present in the integration request parameters
+ request_param_key = f"integration.request.path.{key}"
+ request_param_value = f"method.request.path.{key}"
+ if request_parameters.get(request_param_key) == request_param_value:
+ uri = uri.replace(f"{{{key}}}", path_params[key])
+
+ if integration.get("type") != "HTTP_PROXY" and request_parameters:
+ for key in query_params.copy():
+ request_query_key = f"integration.request.querystring.{key}"
+ request_param_val = f"method.request.querystring.{key}"
+ if request_parameters.get(request_query_key, None) != request_param_val:
+ query_params.pop(key)
+
+ return add_query_params_to_url(uri, query_params)
+
+
+class EventBridgeIntegration(BackendIntegration):
+ def invoke(self, invocation_context: ApiInvocationContext):
+ invocation_context.context = get_event_request_context(invocation_context)
+ try:
+ payload = self.request_templates.render(invocation_context)
+ except Exception as e:
+ LOG.warning("Failed to apply template for EventBridge integration: %s", e)
+ raise
+ uri = (
+ invocation_context.integration.get("uri")
+ or invocation_context.integration.get("integrationUri")
+ or ""
+ )
+ region_name = uri.split(":")[3]
+ headers = get_internal_mocked_headers(
+ service_name="events",
+ region_name=region_name,
+ role_arn=invocation_context.integration.get("credentials"),
+ source_arn=get_source_arn(invocation_context),
+ )
+ headers.update({"X-Amz-Target": invocation_context.headers.get("X-Amz-Target")})
+ response = make_http_request(
+ config.internal_service_url(), method="POST", headers=headers, data=payload
+ )
+
+ invocation_context.response = response
+
+ self.response_templates.render(invocation_context)
+ invocation_context.response.headers["Content-Length"] = str(len(response.content or ""))
+ return invocation_context.response
diff --git a/localstack-core/localstack/services/apigateway/legacy/invocations.py b/localstack-core/localstack/services/apigateway/legacy/invocations.py
new file mode 100644
index 0000000000000..18085fc52e22e
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/legacy/invocations.py
@@ -0,0 +1,400 @@
+import json
+import logging
+import re
+
+from jsonschema import ValidationError, validate
+from requests.models import Response
+from werkzeug.exceptions import NotFound
+
+from localstack.aws.connect import connect_to
+from localstack.constants import APPLICATION_JSON
+from localstack.services.apigateway.helpers import (
+ EMPTY_MODEL,
+ ModelResolver,
+ get_apigateway_store_for_invocation,
+)
+from localstack.services.apigateway.legacy.context import ApiInvocationContext
+from localstack.services.apigateway.legacy.helpers import (
+ get_cors_response,
+ get_event_request_context,
+ get_target_resource_details,
+ make_error_response,
+ set_api_id_stage_invocation_path,
+)
+from localstack.services.apigateway.legacy.integration import (
+ ApiGatewayIntegrationError,
+ DynamoDBIntegration,
+ EventBridgeIntegration,
+ HTTPIntegration,
+ KinesisIntegration,
+ LambdaIntegration,
+ LambdaProxyIntegration,
+ MockIntegration,
+ S3Integration,
+ SNSIntegration,
+ SQSIntegration,
+ StepFunctionIntegration,
+)
+from localstack.services.apigateway.models import ApiGatewayStore
+from localstack.utils.aws.arns import ARN_PARTITION_REGEX
+from localstack.utils.aws.aws_responses import requests_response
+
+LOG = logging.getLogger(__name__)
+
+
+class AuthorizationError(Exception):
+ message: str
+ status_code: int
+
+ def __init__(self, message: str, status_code: int):
+ super().__init__(message)
+ self.message = message
+ self.status_code = status_code
+
+ def to_response(self):
+ return requests_response({"message": self.message}, status_code=self.status_code)
+
+
+# we separate those 2 exceptions to allow better GatewayResponse support later on
+class BadRequestParameters(Exception):
+ message: str
+
+ def __init__(self, message: str):
+ super().__init__(message)
+ self.message = message
+
+ def to_response(self):
+ return requests_response({"message": self.message}, status_code=400)
+
+
+class BadRequestBody(Exception):
+ message: str
+
+ def __init__(self, message: str):
+ super().__init__(message)
+ self.message = message
+
+ def to_response(self):
+ return requests_response({"message": self.message}, status_code=400)
+
+
+class RequestValidator:
+ __slots__ = ["context", "rest_api_container"]
+
+ def __init__(self, context: ApiInvocationContext, store: ApiGatewayStore = None):
+ self.context = context
+ store = store or get_apigateway_store_for_invocation(context=context)
+ if not (container := store.rest_apis.get(context.api_id)):
+ # TODO: find the right exception
+ raise NotFound()
+ self.rest_api_container = container
+
+ def validate_request(self) -> None:
+ """
+ :raises BadRequestParameters if the request has required parameters which are not present
+ :raises BadRequestBody if the request has required body validation with a model and it does not respect it
+ :return: None
+ """
+ # make all the positive checks first
+ if self.context.resource is None or "resourceMethods" not in self.context.resource:
+ return
+
+ resource_methods = self.context.resource["resourceMethods"]
+ if self.context.method not in resource_methods and "ANY" not in resource_methods:
+ return
+
+ # check if there is validator for the resource
+ resource = resource_methods.get(self.context.method, resource_methods.get("ANY", {}))
+ if not (resource.get("requestValidatorId") or "").strip():
+ return
+
+ # check if there is a validator for this request
+ validator = self.rest_api_container.validators.get(resource["requestValidatorId"])
+ if not validator:
+ return
+
+ if self.should_validate_request(validator) and (
+ missing_parameters := self._get_missing_required_parameters(resource)
+ ):
+ message = f"Missing required request parameters: [{', '.join(missing_parameters)}]"
+ raise BadRequestParameters(message=message)
+
+ if self.should_validate_body(validator) and not self._is_body_valid(resource):
+ raise BadRequestBody(message="Invalid request body")
+
+ return
+
+ def _is_body_valid(self, resource) -> bool:
+ # if there's no model to validate the body, use the Empty model
+ # https://docs.aws.amazon.com/cdk/api/v1/docs/@aws-cdk_aws-apigateway.EmptyModel.html
+ if not (request_models := resource.get("requestModels")):
+ model_name = EMPTY_MODEL
+ else:
+ model_name = request_models.get(
+ APPLICATION_JSON, request_models.get("$default", EMPTY_MODEL)
+ )
+
+ model_resolver = ModelResolver(
+ rest_api_container=self.rest_api_container,
+ model_name=model_name,
+ )
+
+ # try to get the resolved model first
+ resolved_schema = model_resolver.get_resolved_model()
+ if not resolved_schema:
+ LOG.exception(
+ "An exception occurred while trying to validate the request: could not find the model"
+ )
+ return False
+
+ try:
+ # if the body is empty, replace it with an empty JSON body
+ validate(
+ instance=json.loads(self.context.data or "{}"),
+ schema=resolved_schema,
+ )
+ return True
+ except ValidationError as e:
+ LOG.warning("failed to validate request body %s", e)
+ return False
+ except json.JSONDecodeError as e:
+ LOG.warning("failed to validate request body, request data is not valid JSON %s", e)
+ return False
+
+ def _get_missing_required_parameters(self, resource) -> list[str]:
+ missing_params = []
+ if not (request_parameters := resource.get("requestParameters")):
+ return missing_params
+
+ for request_parameter, required in sorted(request_parameters.items()):
+ if not required:
+ continue
+
+ param_type, param_value = request_parameter.removeprefix("method.request.").split(".")
+ match param_type:
+ case "header":
+ is_missing = param_value not in self.context.headers
+ case "path":
+ is_missing = param_value not in self.context.resource_path
+ case "querystring":
+ is_missing = param_value not in self.context.query_params()
+ case _:
+ # TODO: method.request.body is not specified in the documentation, and requestModels should do it
+ # verify this
+ is_missing = False
+
+ if is_missing:
+ missing_params.append(param_value)
+
+ return missing_params
+
+ @staticmethod
+ def should_validate_body(validator):
+ return validator["validateRequestBody"]
+
+ @staticmethod
+ def should_validate_request(validator):
+ return validator.get("validateRequestParameters")
+
+
+# ------------
+# API METHODS
+# ------------
+
+
+def validate_api_key(api_key: str, invocation_context: ApiInvocationContext):
+ usage_plan_ids = []
+ client = connect_to(
+ aws_access_key_id=invocation_context.account_id, region_name=invocation_context.region_name
+ ).apigateway
+
+ usage_plans = client.get_usage_plans()
+ for item in usage_plans.get("items", []):
+ api_stages = item.get("apiStages", [])
+ usage_plan_ids.extend(
+ item.get("id")
+ for api_stage in api_stages
+ if (
+ api_stage.get("stage") == invocation_context.stage
+ and api_stage.get("apiId") == invocation_context.api_id
+ )
+ )
+ for usage_plan_id in usage_plan_ids:
+ usage_plan_keys = client.get_usage_plan_keys(usagePlanId=usage_plan_id)
+ for key in usage_plan_keys.get("items", []):
+ if key.get("value") == api_key:
+ # check if the key is enabled
+ api_key = client.get_api_key(apiKey=key.get("id"))
+ return api_key.get("enabled") in ("true", True)
+
+ return False
+
+
+def is_api_key_valid(invocation_context: ApiInvocationContext) -> bool:
+ # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-api-key-source.html
+ client = connect_to(
+ aws_access_key_id=invocation_context.account_id, region_name=invocation_context.region_name
+ ).apigateway
+ rest_api = client.get_rest_api(restApiId=invocation_context.api_id)
+
+ # The source of the API key for metering requests according to a usage plan.
+ # Valid values are:
+ # - HEADER to read the API key from the X-API-Key header of a request.
+ # - AUTHORIZER to read the API key from the UsageIdentifierKey from a custom authorizer.
+
+ api_key_source = rest_api.get("apiKeySource")
+ match api_key_source:
+ case "HEADER":
+ api_key = invocation_context.headers.get("X-API-Key")
+ return validate_api_key(api_key, invocation_context) if api_key else False
+ case "AUTHORIZER":
+ api_key = invocation_context.auth_identity.get("apiKey")
+ return validate_api_key(api_key, invocation_context) if api_key else False
+
+
+def update_content_length(response: Response):
+ if response and response.content is not None:
+ response.headers["Content-Length"] = str(len(response.content))
+
+
+def invoke_rest_api_from_request(invocation_context: ApiInvocationContext):
+ set_api_id_stage_invocation_path(invocation_context)
+ try:
+ return invoke_rest_api(invocation_context)
+ except AuthorizationError as e:
+ LOG.warning(
+ "Authorization error while invoking API Gateway ID %s: %s",
+ invocation_context.api_id,
+ e,
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
+ )
+ return e.to_response()
+
+
+def invoke_rest_api(invocation_context: ApiInvocationContext):
+ invocation_path = invocation_context.path_with_query_string
+ raw_path = invocation_context.path or invocation_path
+ method = invocation_context.method
+ headers = invocation_context.headers
+
+ extracted_path, resource = get_target_resource_details(invocation_context)
+ if not resource:
+ return make_error_response("Unable to find path %s" % invocation_context.path, 404)
+
+ # validate request
+ validator = RequestValidator(invocation_context)
+ try:
+ validator.validate_request()
+ except (BadRequestParameters, BadRequestBody) as e:
+ return e.to_response()
+
+ api_key_required = resource.get("resourceMethods", {}).get(method, {}).get("apiKeyRequired")
+ if api_key_required and not is_api_key_valid(invocation_context):
+ raise AuthorizationError("Forbidden", 403)
+
+ resource_methods = resource.get("resourceMethods", {})
+ resource_method = resource_methods.get(method, {})
+ if not resource_method:
+ # HttpMethod: '*'
+ # ResourcePath: '/*' - produces 'X-AMAZON-APIGATEWAY-ANY-METHOD'
+ resource_method = resource_methods.get("ANY", {}) or resource_methods.get(
+ "X-AMAZON-APIGATEWAY-ANY-METHOD", {}
+ )
+ method_integration = resource_method.get("methodIntegration")
+ if not method_integration:
+ if method == "OPTIONS" and "Origin" in headers:
+ # default to returning CORS headers if this is an OPTIONS request
+ return get_cors_response(headers)
+ return make_error_response(
+ "Unable to find integration for: %s %s (%s)" % (method, invocation_path, raw_path),
+ 404,
+ )
+
+ # update fields in invocation context, then forward request to next handler
+ invocation_context.resource_path = extracted_path
+ invocation_context.integration = method_integration
+
+ return invoke_rest_api_integration(invocation_context)
+
+
+def invoke_rest_api_integration(invocation_context: ApiInvocationContext):
+ try:
+ response = invoke_rest_api_integration_backend(invocation_context)
+ # TODO remove this setter once all the integrations are migrated to the new response
+ # handling
+ invocation_context.response = response
+ return response
+ except ApiGatewayIntegrationError as e:
+ LOG.warning(
+ "Error while invoking integration for ApiGateway ID %s: %s",
+ invocation_context.api_id,
+ e,
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
+ )
+ return e.to_response()
+ except Exception as e:
+ msg = f"Error invoking integration for API Gateway ID '{invocation_context.api_id}': {e}"
+ LOG.exception(msg)
+ return make_error_response(msg, 400)
+
+
+# This function is patched downstream for backend integrations that are only available
+# in Pro (potentially to be replaced with a runtime hook in the future).
+def invoke_rest_api_integration_backend(invocation_context: ApiInvocationContext):
+ # define local aliases from invocation context
+ method = invocation_context.method
+ headers = invocation_context.headers
+ integration = invocation_context.integration
+ integration_type_orig = integration.get("type") or integration.get("integrationType") or ""
+ integration_type = integration_type_orig.upper()
+ integration_method = integration.get("httpMethod")
+ uri = integration.get("uri") or integration.get("integrationUri") or ""
+
+ if (re.match(f"{ARN_PARTITION_REGEX}:apigateway:", uri) and ":lambda:path" in uri) or re.match(
+ f"{ARN_PARTITION_REGEX}:lambda", uri
+ ):
+ invocation_context.context = get_event_request_context(invocation_context)
+ if integration_type == "AWS_PROXY":
+ return LambdaProxyIntegration().invoke(invocation_context)
+ elif integration_type == "AWS":
+ return LambdaIntegration().invoke(invocation_context)
+
+ elif integration_type == "AWS":
+ if "kinesis:action/" in uri:
+ return KinesisIntegration().invoke(invocation_context)
+
+ if "states:action/" in uri:
+ return StepFunctionIntegration().invoke(invocation_context)
+
+ if ":dynamodb:action" in uri:
+ return DynamoDBIntegration().invoke(invocation_context)
+
+ if "s3:path/" in uri or "s3:action/" in uri:
+ return S3Integration().invoke(invocation_context)
+
+ if integration_method == "POST" and ":sqs:path" in uri:
+ return SQSIntegration().invoke(invocation_context)
+
+ if method == "POST" and ":sns:path" in uri:
+ return SNSIntegration().invoke(invocation_context)
+
+ if (
+ method == "POST"
+ and re.match(f"{ARN_PARTITION_REGEX}:apigateway:", uri)
+ and "events:action/PutEvents" in uri
+ ):
+ return EventBridgeIntegration().invoke(invocation_context)
+
+ elif integration_type in ["HTTP_PROXY", "HTTP"]:
+ return HTTPIntegration().invoke(invocation_context)
+
+ elif integration_type == "MOCK":
+ return MockIntegration().invoke(invocation_context)
+
+ if method == "OPTIONS":
+ # fall back to returning CORS headers if this is an OPTIONS request
+ return get_cors_response(headers)
+
+ raise Exception(
+ f'API Gateway integration type "{integration_type}", method "{method}", URI "{uri}" not yet implemented'
+ )
diff --git a/localstack-core/localstack/services/apigateway/legacy/provider.py b/localstack-core/localstack/services/apigateway/legacy/provider.py
new file mode 100644
index 0000000000000..846a965628402
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/legacy/provider.py
@@ -0,0 +1,3266 @@
+import copy
+import io
+import json
+import logging
+import re
+from copy import deepcopy
+from datetime import datetime
+from typing import IO, Any
+
+from moto.apigateway import models as apigw_models
+from moto.apigateway.models import Resource as MotoResource
+from moto.apigateway.models import RestAPI as MotoRestAPI
+from moto.core.utils import camelcase_to_underscores
+
+from localstack.aws.api import CommonServiceException, RequestContext, ServiceRequest, handler
+from localstack.aws.api.apigateway import (
+ Account,
+ ApigatewayApi,
+ ApiKey,
+ ApiKeys,
+ Authorizer,
+ Authorizers,
+ BadRequestException,
+ BasePathMapping,
+ BasePathMappings,
+ Blob,
+ Boolean,
+ ClientCertificate,
+ ClientCertificates,
+ ConflictException,
+ ConnectionType,
+ CreateAuthorizerRequest,
+ CreateRestApiRequest,
+ CreateStageRequest,
+ Deployment,
+ DocumentationPart,
+ DocumentationPartIds,
+ DocumentationPartLocation,
+ DocumentationParts,
+ DocumentationVersion,
+ DocumentationVersions,
+ DomainName,
+ DomainNames,
+ DomainNameStatus,
+ EndpointConfiguration,
+ EndpointType,
+ ExportResponse,
+ GatewayResponse,
+ GatewayResponses,
+ GatewayResponseType,
+ GetDocumentationPartsRequest,
+ Integration,
+ IntegrationResponse,
+ IntegrationType,
+ IpAddressType,
+ ListOfApiStage,
+ ListOfPatchOperation,
+ ListOfStageKeys,
+ ListOfString,
+ MapOfStringToBoolean,
+ MapOfStringToString,
+ Method,
+ MethodResponse,
+ Model,
+ Models,
+ MutualTlsAuthenticationInput,
+ NotFoundException,
+ NullableBoolean,
+ NullableInteger,
+ PutIntegrationRequest,
+ PutIntegrationResponseRequest,
+ PutMode,
+ PutRestApiRequest,
+ QuotaSettings,
+ RequestValidator,
+ RequestValidators,
+ Resource,
+ ResourceOwner,
+ RestApi,
+ RestApis,
+ RoutingMode,
+ SecurityPolicy,
+ Stage,
+ Stages,
+ StatusCode,
+ String,
+ Tags,
+ TestInvokeMethodRequest,
+ TestInvokeMethodResponse,
+ ThrottleSettings,
+ UsagePlan,
+ UsagePlanKeys,
+ UsagePlans,
+ VpcLink,
+ VpcLinks,
+)
+from localstack.aws.connect import connect_to
+from localstack.aws.forwarder import create_aws_request_context
+from localstack.constants import APPLICATION_JSON
+from localstack.services.apigateway.exporter import OpenApiExporter
+from localstack.services.apigateway.helpers import (
+ EMPTY_MODEL,
+ ERROR_MODEL,
+ INVOKE_TEST_LOG_TEMPLATE,
+ OpenAPIExt,
+ apply_json_patch_safe,
+ get_apigateway_store,
+ get_moto_backend,
+ get_moto_rest_api,
+ get_regional_domain_name,
+ get_rest_api_container,
+ import_api_from_openapi_spec,
+ is_greedy_path,
+ is_variable_path,
+ resolve_references,
+)
+from localstack.services.apigateway.legacy.helpers import multi_value_dict_for_list
+from localstack.services.apigateway.legacy.invocations import invoke_rest_api_from_request
+from localstack.services.apigateway.legacy.router_asf import ApigatewayRouter, to_invocation_context
+from localstack.services.apigateway.models import ApiGatewayStore, RestApiContainer
+from localstack.services.apigateway.next_gen.execute_api.router import (
+ ApiGatewayRouter as ApiGatewayRouterNextGen,
+)
+from localstack.services.apigateway.patches import apply_patches
+from localstack.services.edge import ROUTER
+from localstack.services.moto import call_moto, call_moto_with_request
+from localstack.services.plugins import ServiceLifecycleHook
+from localstack.utils.aws.arns import InvalidArnException, get_partition, parse_arn
+from localstack.utils.collections import (
+ DelSafeDict,
+ PaginatedList,
+ ensure_list,
+ select_from_typed_dict,
+)
+from localstack.utils.json import parse_json_or_yaml
+from localstack.utils.strings import md5, short_uid, str_to_bool, to_bytes, to_str
+from localstack.utils.time import TIMESTAMP_FORMAT_TZ, now_utc, timestamp
+
+LOG = logging.getLogger(__name__)
+
+# list of valid paths for Stage update patch operations (extracted from AWS responses via snapshot tests)
+STAGE_UPDATE_PATHS = [
+ "/deploymentId",
+ "/description",
+ "/cacheClusterEnabled",
+ "/cacheClusterSize",
+ "/clientCertificateId",
+ "/accessLogSettings",
+ "/accessLogSettings/destinationArn",
+ "/accessLogSettings/format",
+ "/{resourcePath}/{httpMethod}/metrics/enabled",
+ "/{resourcePath}/{httpMethod}/logging/dataTrace",
+ "/{resourcePath}/{httpMethod}/logging/loglevel",
+ "/{resourcePath}/{httpMethod}/throttling/burstLimit",
+ "/{resourcePath}/{httpMethod}/throttling/rateLimit",
+ "/{resourcePath}/{httpMethod}/caching/ttlInSeconds",
+ "/{resourcePath}/{httpMethod}/caching/enabled",
+ "/{resourcePath}/{httpMethod}/caching/dataEncrypted",
+ "/{resourcePath}/{httpMethod}/caching/requireAuthorizationForCacheControl",
+ "/{resourcePath}/{httpMethod}/caching/unauthorizedCacheControlHeaderStrategy",
+ "/*/*/metrics/enabled",
+ "/*/*/logging/dataTrace",
+ "/*/*/logging/loglevel",
+ "/*/*/throttling/burstLimit",
+ "/*/*/throttling/rateLimit",
+ "/*/*/caching/ttlInSeconds",
+ "/*/*/caching/enabled",
+ "/*/*/caching/dataEncrypted",
+ "/*/*/caching/requireAuthorizationForCacheControl",
+ "/*/*/caching/unauthorizedCacheControlHeaderStrategy",
+ "/variables/{variable_name}",
+ "/tracingEnabled",
+]
+
+VALID_INTEGRATION_TYPES = {
+ IntegrationType.AWS,
+ IntegrationType.AWS_PROXY,
+ IntegrationType.HTTP,
+ IntegrationType.HTTP_PROXY,
+ IntegrationType.MOCK,
+}
+
+
+class ApigatewayProvider(ApigatewayApi, ServiceLifecycleHook):
+ router: ApigatewayRouter | ApiGatewayRouterNextGen
+
+ def __init__(self, router: ApigatewayRouter | ApiGatewayRouterNextGen = None):
+ self.router = router or ApigatewayRouter(ROUTER)
+
+ def on_after_init(self):
+ apply_patches()
+ self.router.register_routes()
+
+ @handler("TestInvokeMethod", expand=False)
+ def test_invoke_method(
+ self, context: RequestContext, request: TestInvokeMethodRequest
+ ) -> TestInvokeMethodResponse:
+ invocation_context = to_invocation_context(context.request)
+ invocation_context.method = request.get("httpMethod")
+ invocation_context.api_id = request.get("restApiId")
+ invocation_context.path_with_query_string = request.get("pathWithQueryString")
+ invocation_context.region_name = context.region
+ invocation_context.account_id = context.account_id
+
+ moto_rest_api = get_moto_rest_api(context=context, rest_api_id=invocation_context.api_id)
+ resource = moto_rest_api.resources.get(request["resourceId"])
+ if not resource:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ invocation_context.resource = {"id": resource.id}
+ invocation_context.resource_path = resource.path_part
+
+ if data := parse_json_or_yaml(to_str(invocation_context.data or b"")):
+ invocation_context.data = data.get("body")
+ invocation_context.headers = data.get("headers", {})
+
+ req_start_time = datetime.now()
+ result = invoke_rest_api_from_request(invocation_context)
+ req_end_time = datetime.now()
+
+ # TODO: add the missing fields to the log. Next iteration will add helpers to extract the missing fields
+ # from the apicontext
+ formatted_date = req_start_time.strftime("%a %b %d %H:%M:%S %Z %Y")
+ log = INVOKE_TEST_LOG_TEMPLATE.format(
+ request_id=invocation_context.context["requestId"],
+ formatted_date=formatted_date,
+ http_method=invocation_context.method,
+ resource_path=invocation_context.invocation_path,
+ request_path="",
+ query_string="",
+ request_headers="",
+ request_body="",
+ response_body="",
+ response_headers=result.headers,
+ status_code=result.status_code,
+ )
+
+ return TestInvokeMethodResponse(
+ status=result.status_code,
+ headers=dict(result.headers),
+ body=to_str(result.content),
+ log=log,
+ latency=int((req_end_time - req_start_time).total_seconds()),
+ multiValueHeaders=multi_value_dict_for_list(result.headers),
+ )
+
+ @handler("CreateRestApi", expand=False)
+ def create_rest_api(self, context: RequestContext, request: CreateRestApiRequest) -> RestApi:
+ endpoint_configuration = request.get("endpointConfiguration", {})
+ types = endpoint_configuration.get("types", [EndpointType.EDGE])
+ ip_address_type = endpoint_configuration.get("ipAddressType")
+
+ if not types:
+ raise BadRequestException(
+ "REGIONAL Configuration and EDGE Configuration cannot be both DISABLED."
+ )
+ elif len(types) > 1:
+ raise BadRequestException("Cannot create an api with multiple Endpoint Types.")
+ endpoint_type = types[0]
+
+ error_messages = []
+ if endpoint_type not in (EndpointType.PRIVATE, EndpointType.EDGE, EndpointType.REGIONAL):
+ error_messages.append(
+ f"Value '[{endpoint_type}]' at 'createRestApiInput.endpointConfiguration.types' failed to satisfy constraint: Member must satisfy constraint: [Member must satisfy enum value set: [PRIVATE, EDGE, REGIONAL]]",
+ )
+ elif not ip_address_type:
+ if endpoint_type in (EndpointType.EDGE, EndpointType.REGIONAL):
+ ip_address_type = IpAddressType.ipv4
+ else:
+ ip_address_type = IpAddressType.dualstack
+
+ if ip_address_type not in (IpAddressType.ipv4, IpAddressType.dualstack, None):
+ error_messages.append(
+ f"Value '{ip_address_type}' at 'createRestApiInput.endpointConfiguration.ipAddressType' failed to satisfy constraint: Member must satisfy enum value set: [ipv4, dualstack]",
+ )
+ if error_messages:
+ prefix = f"{len(error_messages)} validation error{'s' if len(error_messages) > 1 else ''} detected: "
+ raise CommonServiceException(
+ code="ValidationException",
+ message=prefix + "; ".join(error_messages),
+ )
+ if request.get("description") == "":
+ raise BadRequestException("Description cannot be an empty string")
+ if types == [EndpointType.PRIVATE] and ip_address_type == IpAddressType.ipv4:
+ raise BadRequestException("Only dualstack ipAddressType is supported for Private APIs.")
+
+ minimum_compression_size = request.get("minimumCompressionSize")
+ if minimum_compression_size is not None and (
+ minimum_compression_size < 0 or minimum_compression_size > 10485760
+ ):
+ raise BadRequestException(
+ "Invalid minimum compression size, must be between 0 and 10485760"
+ )
+
+ result = call_moto(context)
+ rest_api = get_moto_rest_api(context, rest_api_id=result["id"])
+ rest_api.version = request.get("version")
+ if binary_media_types := request.get("binaryMediaTypes"):
+ rest_api.binaryMediaTypes = binary_media_types
+
+ response: RestApi = rest_api.to_dict()
+ response["endpointConfiguration"]["ipAddressType"] = ip_address_type
+ remove_empty_attributes_from_rest_api(response)
+ store = get_apigateway_store(context=context)
+ rest_api_container = RestApiContainer(rest_api=response)
+ store.rest_apis[result["id"]] = rest_api_container
+ # add the 2 default models
+ rest_api_container.models[EMPTY_MODEL] = DEFAULT_EMPTY_MODEL
+ rest_api_container.models[ERROR_MODEL] = DEFAULT_ERROR_MODEL
+
+ return response
+
+ def create_api_key(
+ self,
+ context: RequestContext,
+ name: String = None,
+ description: String = None,
+ enabled: Boolean = None,
+ generate_distinct_id: Boolean = None,
+ value: String = None,
+ stage_keys: ListOfStageKeys = None,
+ customer_id: String = None,
+ tags: MapOfStringToString = None,
+ **kwargs,
+ ) -> ApiKey:
+ api_key = call_moto(context)
+
+ # transform array of stage keys [{'restApiId': '0iscapk09u', 'stageName': 'dev'}] into
+ # array of strings ['0iscapk09u/dev']
+ stage_keys = api_key.get("stageKeys", [])
+ api_key["stageKeys"] = [f"{sk['restApiId']}/{sk['stageName']}" for sk in stage_keys]
+
+ return api_key
+
+ def get_rest_api(self, context: RequestContext, rest_api_id: String, **kwargs) -> RestApi:
+ rest_api: RestApi = call_moto(context)
+ remove_empty_attributes_from_rest_api(rest_api)
+ return rest_api
+
+ def update_rest_api(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> RestApi:
+ rest_api = get_moto_rest_api(context, rest_api_id)
+
+ fixed_patch_ops = []
+ binary_media_types_path = "/binaryMediaTypes"
+ # TODO: validate a bit more patch operations
+ for patch_op in patch_operations:
+ if patch_op["op"] not in ("add", "remove", "move", "test", "replace", "copy"):
+ raise CommonServiceException(
+ code="ValidationException",
+ message=f"1 validation error detected: Value '{patch_op['op']}' at 'updateRestApiInput.patchOperations.1.member.op' failed to satisfy constraint: Member must satisfy enum value set: [add, remove, move, test, replace, copy]",
+ )
+ patch_op_path = patch_op.get("path", "")
+ # binaryMediaTypes has a specific way of being set
+ # see https://docs.aws.amazon.com/apigateway/latest/api/API_PatchOperation.html
+ # TODO: maybe implement a more generalized way if this happens anywhere else
+ if patch_op_path.startswith(binary_media_types_path):
+ if patch_op_path == binary_media_types_path:
+ raise BadRequestException(f"Invalid patch path {patch_op_path}")
+ value = patch_op_path.rsplit("/", maxsplit=1)[-1]
+ path_value = value.replace("~1", "/")
+ patch_op["path"] = binary_media_types_path
+
+ if patch_op["op"] == "add":
+ patch_op["value"] = path_value
+
+ elif patch_op["op"] == "remove":
+ remove_index = rest_api.binaryMediaTypes.index(path_value)
+ patch_op["path"] = f"{binary_media_types_path}/{remove_index}"
+
+ elif patch_op["op"] == "replace":
+ # AWS is behaving weirdly, and will actually remove/add instead of replacing in place
+ # it will put the replaced value last in the array
+ replace_index = rest_api.binaryMediaTypes.index(path_value)
+ fixed_patch_ops.append(
+ {"op": "remove", "path": f"{binary_media_types_path}/{replace_index}"}
+ )
+ patch_op["op"] = "add"
+
+ elif patch_op_path == "/minimumCompressionSize":
+ if patch_op["op"] != "replace":
+ raise BadRequestException(
+ "Invalid patch operation specified. Must be one of: [replace]"
+ )
+
+ try:
+ # try to cast the value to integer if truthy, else reject
+ value = int(val) if (val := patch_op.get("value")) else None
+ except ValueError:
+ raise BadRequestException(
+ "Invalid minimum compression size, must be between 0 and 10485760"
+ )
+
+ if value is not None and (value < 0 or value > 10485760):
+ raise BadRequestException(
+ "Invalid minimum compression size, must be between 0 and 10485760"
+ )
+ patch_op["value"] = value
+
+ elif patch_op_path.startswith("/endpointConfiguration/types"):
+ if patch_op["op"] != "replace":
+ raise BadRequestException(
+ "Invalid patch operation specified. Must be 'add'|'remove'|'replace'"
+ )
+ if patch_op.get("value") not in (
+ EndpointType.REGIONAL,
+ EndpointType.EDGE,
+ EndpointType.PRIVATE,
+ ):
+ raise BadRequestException(
+ "Invalid EndpointTypes specified. Valid options are REGIONAL,EDGE,PRIVATE"
+ )
+ if patch_op.get("value") == EndpointType.PRIVATE:
+ fixed_patch_ops.append(patch_op)
+ patch_op = {
+ "op": "replace",
+ "path": "/endpointConfiguration/ipAddressType",
+ "value": IpAddressType.dualstack,
+ }
+ fixed_patch_ops.append(patch_op)
+ continue
+
+ elif patch_op_path.startswith("/endpointConfiguration/ipAddressType"):
+ if patch_op["op"] != "replace":
+ raise BadRequestException(
+ "Invalid patch operation specified. Must be one of: [replace]"
+ )
+ if (ipAddressType := patch_op.get("value")) not in (
+ IpAddressType.ipv4,
+ IpAddressType.dualstack,
+ ):
+ raise BadRequestException("ipAddressType must be either ipv4 or dualstack.")
+ if (
+ rest_api.endpoint_configuration["types"] == [EndpointType.PRIVATE]
+ and ipAddressType == IpAddressType.ipv4
+ ):
+ raise BadRequestException(
+ "Only dualstack ipAddressType is supported for Private APIs."
+ )
+
+ fixed_patch_ops.append(patch_op)
+
+ patch_api_gateway_entity(rest_api, fixed_patch_ops)
+
+ # fix data types after patches have been applied
+ endpoint_configs = rest_api.endpoint_configuration or {}
+ if isinstance(endpoint_configs.get("vpcEndpointIds"), str):
+ endpoint_configs["vpcEndpointIds"] = [endpoint_configs["vpcEndpointIds"]]
+
+ # minimum_compression_size is a unique path as it's a nullable integer,
+ # it would throw an error if it stays an empty string
+ if rest_api.minimum_compression_size == "":
+ rest_api.minimum_compression_size = None
+
+ response = rest_api.to_dict()
+
+ remove_empty_attributes_from_rest_api(response, remove_tags=False)
+ store = get_apigateway_store(context=context)
+ store.rest_apis[rest_api_id].rest_api = response
+ return response
+
+ @handler("PutRestApi", expand=False)
+ def put_rest_api(self, context: RequestContext, request: PutRestApiRequest) -> RestApi:
+ # TODO: take into account the mode: overwrite or merge
+ # the default is now `merge`, but we are removing everything
+ rest_api = get_moto_rest_api(context, request["restApiId"])
+ rest_api, warnings = import_api_from_openapi_spec(
+ rest_api, context=context, request=request
+ )
+
+ rest_api.root_resource_id = get_moto_rest_api_root_resource(rest_api)
+ response = rest_api.to_dict()
+ remove_empty_attributes_from_rest_api(response)
+ store = get_apigateway_store(context=context)
+ store.rest_apis[request["restApiId"]].rest_api = response
+ # TODO: verify this
+ response = to_rest_api_response_json(response)
+ response.setdefault("tags", {})
+
+ # TODO Failing still keeps all applied mutations. We need to revert to the previous state instead
+ if warnings:
+ response["warnings"] = warnings
+
+ return response
+
+ @handler("CreateDomainName")
+ def create_domain_name(
+ self,
+ context: RequestContext,
+ domain_name: String,
+ certificate_name: String = None,
+ certificate_body: String = None,
+ certificate_private_key: String = None,
+ certificate_chain: String = None,
+ certificate_arn: String = None,
+ regional_certificate_name: String = None,
+ regional_certificate_arn: String = None,
+ endpoint_configuration: EndpointConfiguration = None,
+ tags: MapOfStringToString = None,
+ security_policy: SecurityPolicy = None,
+ mutual_tls_authentication: MutualTlsAuthenticationInput = None,
+ ownership_verification_certificate_arn: String = None,
+ policy: String = None,
+ routing_mode: RoutingMode = None,
+ **kwargs,
+ ) -> DomainName:
+ if not domain_name:
+ raise BadRequestException("No Domain Name specified")
+
+ store: ApiGatewayStore = get_apigateway_store(context=context)
+ if store.domain_names.get(domain_name):
+ raise ConflictException(f"Domain name with ID {domain_name} already exists")
+
+ # find matching hosted zone
+ zone_id = None
+ # TODO check if this call is IAM enforced
+ route53 = connect_to(
+ region_name=context.region, aws_access_key_id=context.account_id
+ ).route53
+ hosted_zones = route53.list_hosted_zones().get("HostedZones", [])
+ hosted_zones = [hz for hz in hosted_zones if domain_name.endswith(hz["Name"].strip("."))]
+ zone_id = hosted_zones[0]["Id"].replace("/hostedzone/", "") if hosted_zones else zone_id
+
+ domain: DomainName = DomainName(
+ domainName=domain_name,
+ certificateName=certificate_name,
+ certificateArn=certificate_arn,
+ regionalDomainName=get_regional_domain_name(domain_name),
+ domainNameStatus=DomainNameStatus.AVAILABLE,
+ regionalHostedZoneId=zone_id,
+ regionalCertificateName=regional_certificate_name,
+ regionalCertificateArn=regional_certificate_arn,
+ securityPolicy=SecurityPolicy.TLS_1_2,
+ endpointConfiguration=endpoint_configuration,
+ routingMode=routing_mode,
+ )
+ store.domain_names[domain_name] = domain
+ return domain
+
+ @handler("GetDomainName")
+ def get_domain_name(
+ self, context: RequestContext, domain_name: String, domain_name_id: String = None, **kwargs
+ ) -> DomainName:
+ store: ApiGatewayStore = get_apigateway_store(context=context)
+ if domain := store.domain_names.get(domain_name):
+ return domain
+ raise NotFoundException("Invalid domain name identifier specified")
+
+ @handler("GetDomainNames")
+ def get_domain_names(
+ self,
+ context: RequestContext,
+ position: String = None,
+ limit: NullableInteger = None,
+ resource_owner: ResourceOwner = None,
+ **kwargs,
+ ) -> DomainNames:
+ store = get_apigateway_store(context=context)
+ domain_names = store.domain_names.values()
+ return DomainNames(items=list(domain_names), position=position)
+
+ @handler("DeleteDomainName")
+ def delete_domain_name(
+ self, context: RequestContext, domain_name: String, domain_name_id: String = None, **kwargs
+ ) -> None:
+ store: ApiGatewayStore = get_apigateway_store(context=context)
+ if not store.domain_names.pop(domain_name, None):
+ raise NotFoundException("Invalid domain name identifier specified")
+
+ def delete_rest_api(self, context: RequestContext, rest_api_id: String, **kwargs) -> None:
+ try:
+ store = get_apigateway_store(context=context)
+ store.rest_apis.pop(rest_api_id, None)
+ call_moto(context)
+ except KeyError as e:
+ # moto raises a key error if we're trying to delete an API that doesn't exist
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ ) from e
+
+ def get_rest_apis(
+ self,
+ context: RequestContext,
+ position: String = None,
+ limit: NullableInteger = None,
+ **kwargs,
+ ) -> RestApis:
+ response: RestApis = call_moto(context)
+ for rest_api in response["items"]:
+ remove_empty_attributes_from_rest_api(rest_api)
+ return response
+
+ # resources
+
+ def create_resource(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ parent_id: String,
+ path_part: String,
+ **kwargs,
+ ) -> Resource:
+ moto_rest_api = get_moto_rest_api(context, rest_api_id)
+ parent_moto_resource: MotoResource = moto_rest_api.resources.get(parent_id, None)
+ # validate here if the parent exists. Moto would first create then validate, which would lead to the resource
+ # being created anyway
+ if not parent_moto_resource:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ parent_path = parent_moto_resource.path_part
+ if is_greedy_path(parent_path):
+ raise BadRequestException(
+ f"Cannot create a child of a resource with a greedy path variable: {parent_path}"
+ )
+
+ store = get_apigateway_store(context=context)
+ rest_api = store.rest_apis.get(rest_api_id)
+ children = rest_api.resource_children.setdefault(parent_id, [])
+
+ if is_variable_path(path_part):
+ for sibling in children:
+ sibling_resource: MotoResource = moto_rest_api.resources.get(sibling, None)
+ if is_variable_path(sibling_resource.path_part):
+ raise BadRequestException(
+ f"A sibling ({sibling_resource.path_part}) of this resource already has a variable path part -- only one is allowed"
+ )
+
+ response: Resource = call_moto(context)
+
+ # save children to allow easy deletion of all children if we delete a parent route
+ children.append(response["id"])
+
+ return response
+
+ def delete_resource(
+ self, context: RequestContext, rest_api_id: String, resource_id: String, **kwargs
+ ) -> None:
+ moto_rest_api = get_moto_rest_api(context, rest_api_id)
+
+ moto_resource: MotoResource = moto_rest_api.resources.pop(resource_id, None)
+ if not moto_resource:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ store = get_apigateway_store(context=context)
+ rest_api = store.rest_apis.get(rest_api_id)
+ api_resources = rest_api.resource_children
+ # we need to recursively delete all children resources of the resource we're deleting
+
+ def _delete_children(resource_to_delete: str):
+ children = api_resources.get(resource_to_delete, [])
+ for child in children:
+ moto_rest_api.resources.pop(child)
+ _delete_children(child)
+
+ api_resources.pop(resource_to_delete, None)
+
+ _delete_children(resource_id)
+
+ # remove the resource as a child from its parent
+ parent_id = moto_resource.parent_id
+ api_resources[parent_id].remove(resource_id)
+
+ def update_integration_response(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ resource_id: String,
+ http_method: String,
+ status_code: StatusCode,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> IntegrationResponse:
+ # XXX: THIS IS NOT A COMPLETE IMPLEMENTATION, just the minimum required to get tests going
+ # TODO: validate patch operations
+
+ moto_rest_api = get_moto_rest_api(context, rest_api_id)
+ moto_resource = moto_rest_api.resources.get(resource_id)
+ if not moto_resource:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ moto_method = moto_resource.resource_methods.get(http_method)
+ if not moto_method:
+ raise NotFoundException("Invalid Method identifier specified")
+
+ integration_response = moto_method.method_integration.integration_responses.get(status_code)
+ if not integration_response:
+ raise NotFoundException("Invalid Integration Response identifier specified")
+
+ for patch_operation in patch_operations:
+ op = patch_operation.get("op")
+ path = patch_operation.get("path")
+
+ # for path "/responseTemplates/application~1json"
+ if "/responseTemplates" in path:
+ integration_response.response_templates = (
+ integration_response.response_templates or {}
+ )
+ value = patch_operation.get("value")
+ if not isinstance(value, str):
+ raise BadRequestException(
+ f"Invalid patch value '{value}' specified for op '{op}'. Must be a string"
+ )
+ param = path.removeprefix("/responseTemplates/")
+ param = param.replace("~1", "/")
+ if op == "remove":
+ integration_response.response_templates.pop(param)
+ elif op in ("add", "replace"):
+ integration_response.response_templates[param] = value
+
+ elif "/contentHandling" in path and op == "replace":
+ integration_response.content_handling = patch_operation.get("value")
+
+ elif "/selectionPattern" in path and op == "replace":
+ integration_response.selection_pattern = patch_operation.get("value")
+
+ response: IntegrationResponse = integration_response.to_json()
+ # in case it's empty, we still want to pass it on as ""
+ # TODO: add a test case for this
+ response["selectionPattern"] = integration_response.selection_pattern
+
+ return response
+
+ def update_resource(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ resource_id: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> Resource:
+ moto_rest_api = get_moto_rest_api(context, rest_api_id)
+ moto_resource = moto_rest_api.resources.get(resource_id)
+ if not moto_resource:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ store = get_apigateway_store(context=context)
+
+ rest_api = store.rest_apis.get(rest_api_id)
+ api_resources = rest_api.resource_children
+
+ future_path_part = moto_resource.path_part
+ current_parent_id = moto_resource.parent_id
+
+ for patch_operation in patch_operations:
+ op = patch_operation.get("op")
+ if (path := patch_operation.get("path")) not in ("/pathPart", "/parentId"):
+ raise BadRequestException(
+ f"Invalid patch path '{path}' specified for op '{op}'. Must be one of: [/parentId, /pathPart]"
+ )
+ if op != "replace":
+ raise BadRequestException(
+ f"Invalid patch path '{path}' specified for op '{op}'. Please choose supported operations"
+ )
+
+ if path == "/parentId":
+ value = patch_operation.get("value")
+ future_parent_resource = moto_rest_api.resources.get(value)
+ if not future_parent_resource:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ children_resources = api_resources.get(resource_id, [])
+ if value in children_resources:
+ raise BadRequestException("Resources cannot be cyclical.")
+
+ new_sibling_resources = api_resources.get(value, [])
+
+ else: # path == "/pathPart"
+ future_path_part = patch_operation.get("value")
+ new_sibling_resources = api_resources.get(moto_resource.parent_id, [])
+
+ for sibling in new_sibling_resources:
+ sibling_resource = moto_rest_api.resources[sibling]
+ if sibling_resource.path_part == future_path_part:
+ raise ConflictException(
+ f"Another resource with the same parent already has this name: {future_path_part}"
+ )
+
+ # TODO: test with multiple patch operations which would not be compatible between each other
+ patch_api_gateway_entity(moto_resource, patch_operations)
+
+ # after setting it, mutate the store
+ if moto_resource.parent_id != current_parent_id:
+ current_sibling_resources = api_resources.get(current_parent_id)
+ if current_sibling_resources:
+ current_sibling_resources.remove(resource_id)
+ # if the parent does not have children anymore, remove from the list
+ if not current_sibling_resources:
+ api_resources.pop(current_parent_id)
+
+ # add it to the new parent children
+ future_sibling_resources = api_resources[moto_resource.parent_id]
+ future_sibling_resources.append(resource_id)
+
+ response = moto_resource.to_dict()
+ return response
+
+ # resource method
+
+ def get_method(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ resource_id: String,
+ http_method: String,
+ **kwargs,
+ ) -> Method:
+ response: Method = call_moto(context)
+ remove_empty_attributes_from_method(response)
+ if method_integration := response.get("methodIntegration"):
+ remove_empty_attributes_from_integration(method_integration)
+ # moto will not return `responseParameters` field if it's not truthy, but AWS will return an empty dict
+ # if it was set to an empty dict
+ if "responseParameters" not in method_integration:
+ moto_rest_api = get_moto_rest_api(context, rest_api_id)
+ moto_resource = moto_rest_api.resources[resource_id]
+ moto_method_integration = moto_resource.resource_methods[
+ http_method
+ ].method_integration
+ if moto_method_integration.integration_responses:
+ for (
+ status_code,
+ integration_response,
+ ) in moto_method_integration.integration_responses.items():
+ if integration_response.response_parameters == {}:
+ method_integration["integrationResponses"][str(status_code)][
+ "responseParameters"
+ ] = {}
+
+ return response
+
+ def put_method(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ resource_id: String,
+ http_method: String,
+ authorization_type: String,
+ authorizer_id: String = None,
+ api_key_required: Boolean = None,
+ operation_name: String = None,
+ request_parameters: MapOfStringToBoolean = None,
+ request_models: MapOfStringToString = None,
+ request_validator_id: String = None,
+ authorization_scopes: ListOfString = None,
+ **kwargs,
+ ) -> Method:
+ # TODO: add missing validation? check order of validation as well
+ moto_backend = get_moto_backend(context.account_id, context.region)
+ moto_rest_api: MotoRestAPI = moto_backend.apis.get(rest_api_id)
+ if not moto_rest_api or not (moto_resource := moto_rest_api.resources.get(resource_id)):
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ if http_method not in ("GET", "PUT", "POST", "DELETE", "PATCH", "OPTIONS", "HEAD", "ANY"):
+ raise BadRequestException(
+ "Invalid HttpMethod specified. "
+ "Valid options are GET,PUT,POST,DELETE,PATCH,OPTIONS,HEAD,ANY"
+ )
+
+ if request_parameters:
+ request_parameters_names = {
+ name.rsplit(".", maxsplit=1)[-1] for name in request_parameters.keys()
+ }
+ if len(request_parameters_names) != len(request_parameters):
+ raise BadRequestException(
+ "Parameter names must be unique across querystring, header and path"
+ )
+ need_authorizer_id = authorization_type in ("CUSTOM", "COGNITO_USER_POOLS")
+ store = get_apigateway_store(context=context)
+ rest_api_container = store.rest_apis[rest_api_id]
+ if need_authorizer_id and (
+ not authorizer_id or authorizer_id not in rest_api_container.authorizers
+ ):
+ # TODO: will be cleaner with https://github.com/localstack/localstack/pull/7750
+ raise BadRequestException(
+ "Invalid authorizer ID specified. "
+ "Setting the authorization type to CUSTOM or COGNITO_USER_POOLS requires a valid authorizer."
+ )
+
+ if request_validator_id and request_validator_id not in rest_api_container.validators:
+ raise BadRequestException("Invalid Request Validator identifier specified")
+
+ if request_models:
+ for content_type, model_name in request_models.items():
+ # FIXME: add Empty model to rest api at creation
+ if model_name == EMPTY_MODEL:
+ continue
+ if model_name not in rest_api_container.models:
+ raise BadRequestException(f"Invalid model identifier specified: {model_name}")
+
+ response: Method = call_moto(context)
+ remove_empty_attributes_from_method(response)
+ moto_http_method = moto_resource.resource_methods[http_method]
+ moto_http_method.authorization_type = moto_http_method.authorization_type.upper()
+
+ # this is straight from the moto patch, did not test it yet but has the same functionality
+ # FIXME: check if still necessary after testing Authorizers
+ if need_authorizer_id and "authorizerId" not in response:
+ response["authorizerId"] = authorizer_id
+
+ response["authorizationType"] = response["authorizationType"].upper()
+
+ return response
+
+ def update_method(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ resource_id: String,
+ http_method: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> Method:
+ # see https://www.linkedin.com/pulse/updating-aws-cli-patch-operations-rest-api-yitzchak-meirovich/
+ # for path construction
+ moto_backend = get_moto_backend(context.account_id, context.region)
+ moto_rest_api: MotoRestAPI = moto_backend.apis.get(rest_api_id)
+ if not moto_rest_api or not (moto_resource := moto_rest_api.resources.get(resource_id)):
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ if not (moto_method := moto_resource.resource_methods.get(http_method)):
+ raise NotFoundException("Invalid Method identifier specified")
+ store = get_apigateway_store(context=context)
+ rest_api = store.rest_apis[rest_api_id]
+ applicable_patch_operations = []
+ modifying_auth_type = False
+ modified_authorizer_id = False
+ had_req_params = bool(moto_method.request_parameters)
+ had_req_models = bool(moto_method.request_models)
+
+ for patch_operation in patch_operations:
+ op = patch_operation.get("op")
+ path = patch_operation.get("path")
+ # if the path is not supported at all, raise an Exception
+ if len(path.split("/")) > 3 or not any(
+ path.startswith(s_path) for s_path in UPDATE_METHOD_PATCH_PATHS["supported_paths"]
+ ):
+ raise BadRequestException(f"Invalid patch path {path}")
+
+ # if the path is not supported by the operation, ignore it and skip
+ op_supported_path = UPDATE_METHOD_PATCH_PATHS.get(op, [])
+ if not any(path.startswith(s_path) for s_path in op_supported_path):
+ available_ops = [
+ available_op
+ for available_op in ("add", "replace", "delete")
+ if available_op != op
+ ]
+ supported_ops = ", ".join(
+ [
+ supported_op
+ for supported_op in available_ops
+ if any(
+ path.startswith(s_path)
+ for s_path in UPDATE_METHOD_PATCH_PATHS.get(supported_op, [])
+ )
+ ]
+ )
+ raise BadRequestException(
+ f"Invalid patch operation specified. Must be one of: [{supported_ops}]"
+ )
+
+ value = patch_operation.get("value")
+ if op not in ("add", "replace"):
+ # skip
+ applicable_patch_operations.append(patch_operation)
+ continue
+
+ if path == "/authorizationType" and value in ("CUSTOM", "COGNITO_USER_POOLS"):
+ modifying_auth_type = True
+
+ elif path == "/authorizerId":
+ modified_authorizer_id = value
+
+ if any(
+ path.startswith(s_path) for s_path in ("/apiKeyRequired", "/requestParameters/")
+ ):
+ patch_op = {"op": op, "path": path, "value": str_to_bool(value)}
+ applicable_patch_operations.append(patch_op)
+ continue
+
+ elif path == "/requestValidatorId" and value not in rest_api.validators:
+ if not value:
+ # you can remove a requestValidator by passing an empty string as a value
+ patch_op = {"op": "remove", "path": path, "value": value}
+ applicable_patch_operations.append(patch_op)
+ continue
+ raise BadRequestException("Invalid Request Validator identifier specified")
+
+ elif path.startswith("/requestModels/"):
+ if value != EMPTY_MODEL and value not in rest_api.models:
+ raise BadRequestException(f"Invalid model identifier specified: {value}")
+
+ applicable_patch_operations.append(patch_operation)
+
+ if modifying_auth_type:
+ if not modified_authorizer_id or modified_authorizer_id not in rest_api.authorizers:
+ raise BadRequestException(
+ "Invalid authorizer ID specified. "
+ "Setting the authorization type to CUSTOM or COGNITO_USER_POOLS requires a valid authorizer."
+ )
+ elif modified_authorizer_id:
+ if moto_method.authorization_type not in ("CUSTOM", "COGNITO_USER_POOLS"):
+ # AWS will ignore this patch if the method does not have a proper authorization type
+ # filter the patches to remove the modified authorizerId
+ applicable_patch_operations = [
+ op for op in applicable_patch_operations if op.get("path") != "/authorizerId"
+ ]
+
+ # TODO: test with multiple patch operations which would not be compatible between each other
+ patch_api_gateway_entity(moto_method, applicable_patch_operations)
+
+ # if we removed all values of those fields, set them to None so that they're not returned anymore
+ if had_req_params and len(moto_method.request_parameters) == 0:
+ moto_method.request_parameters = None
+ if had_req_models and len(moto_method.request_models) == 0:
+ moto_method.request_models = None
+
+ response = moto_method.to_json()
+ remove_empty_attributes_from_method(response)
+ remove_empty_attributes_from_integration(response.get("methodIntegration"))
+ return response
+
+ def delete_method(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ resource_id: String,
+ http_method: String,
+ **kwargs,
+ ) -> None:
+ moto_backend = get_moto_backend(context.account_id, context.region)
+ moto_rest_api: MotoRestAPI = moto_backend.apis.get(rest_api_id)
+ if not moto_rest_api or not (moto_resource := moto_rest_api.resources.get(resource_id)):
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ if not (moto_resource.resource_methods.get(http_method)):
+ raise NotFoundException("Invalid Method identifier specified")
+
+ call_moto(context)
+
+ # method responses
+
+ def get_method_response(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ resource_id: String,
+ http_method: String,
+ status_code: StatusCode,
+ **kwargs,
+ ) -> MethodResponse:
+ # this could probably be easier in a patch?
+ moto_backend = get_moto_backend(context.account_id, context.region)
+ moto_rest_api: MotoRestAPI = moto_backend.apis.get(rest_api_id)
+ # TODO: snapshot test different possibilities
+ if not moto_rest_api or not (moto_resource := moto_rest_api.resources.get(resource_id)):
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ if not (moto_method := moto_resource.resource_methods.get(http_method)):
+ raise NotFoundException("Invalid Method identifier specified")
+
+ if not (moto_method_response := moto_method.get_response(status_code)):
+ raise NotFoundException("Invalid Response status code specified")
+
+ method_response = moto_method_response.to_json()
+ return method_response
+
+ @handler("UpdateMethodResponse")
+ def update_method_response(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ resource_id: String,
+ http_method: String,
+ status_code: StatusCode,
+ patch_operations: ListOfPatchOperation | None = None,
+ **kwargs,
+ ) -> MethodResponse:
+ error_messages = []
+ for index, operation in enumerate(patch_operations):
+ op = operation.get("op")
+ if op not in VALID_PATCH_OPERATIONS:
+ error_messages.append(
+ f"Value '{op}' at 'updateMethodResponseInput.patchOperations.{index + 1}.member.op' "
+ f"failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(VALID_PATCH_OPERATIONS)}]"
+ )
+
+ if not re.fullmatch(r"[1-5]\d\d", status_code):
+ error_messages.append(
+ f"Value '{status_code}' at 'statusCode' failed to satisfy constraint: "
+ "Member must satisfy regular expression pattern: [1-5]\\d\\d"
+ )
+
+ if error_messages:
+ prefix = f"{len(error_messages)} validation error{'s' if len(error_messages) > 1 else ''} detected: "
+ raise CommonServiceException(
+ code="ValidationException",
+ message=prefix + "; ".join(error_messages),
+ )
+
+ moto_rest_api = get_moto_rest_api(context, rest_api_id)
+ moto_resource = moto_rest_api.resources.get(resource_id)
+ if not moto_resource:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ moto_method = moto_resource.resource_methods.get(http_method)
+ if not moto_method:
+ raise NotFoundException("Invalid Method identifier specified")
+
+ method_response = moto_method.method_responses.get(status_code)
+ if not method_response:
+ raise NotFoundException("Invalid Response status code specified")
+
+ if method_response.response_models is None:
+ method_response.response_models = {}
+ if method_response.response_parameters is None:
+ method_response.response_parameters = {}
+
+ for patch_operation in patch_operations:
+ op = patch_operation["op"]
+ path = patch_operation["path"]
+ value = patch_operation.get("value")
+
+ if path.startswith("/responseParameters/"):
+ param_name = path.removeprefix("/responseParameters/")
+ if param_name not in method_response.response_parameters and op in (
+ "replace",
+ "remove",
+ ):
+ raise NotFoundException("Invalid parameter name specified")
+ if op in ("add", "replace"):
+ method_response.response_parameters[param_name] = value == "true"
+ elif op == "remove":
+ method_response.response_parameters.pop(param_name)
+
+ elif path.startswith("/responseModels/"):
+ param_name = path.removeprefix("/responseModels/")
+ param_name = param_name.replace("~1", "/")
+ if param_name not in method_response.response_models and op in (
+ "replace",
+ "remove",
+ ):
+ raise NotFoundException("Content-Type specified was not found")
+ if op in ("add", "replace"):
+ method_response.response_models[param_name] = value
+ elif op == "remove":
+ method_response.response_models.pop(param_name)
+ else:
+ raise BadRequestException(f"Invalid patch path {path}")
+
+ response: MethodResponse = method_response.to_json()
+
+ # AWS doesn't send back empty responseParameters or responseModels
+ if not method_response.response_parameters:
+ response.pop("responseParameters")
+ if not method_response.response_models:
+ response.pop("responseModels")
+
+ return response
+
+ # stages
+
+ # TODO: add createdDate / lastUpdatedDate in Stage operations below!
+ @handler("CreateStage", expand=False)
+ def create_stage(self, context: RequestContext, request: CreateStageRequest) -> Stage:
+ call_moto(context)
+ moto_api = get_moto_rest_api(context, rest_api_id=request["restApiId"])
+ stage = moto_api.stages.get(request["stageName"])
+ if not stage:
+ raise NotFoundException("Invalid Stage identifier specified")
+
+ if not hasattr(stage, "documentation_version"):
+ stage.documentation_version = request.get("documentationVersion")
+
+ # make sure we update the stage_name on the deployment entity in moto
+ deployment = moto_api.deployments.get(request["deploymentId"])
+ deployment.stage_name = stage.name
+
+ response = stage.to_json()
+ self._patch_stage_response(response)
+ return response
+
+ def get_stage(
+ self, context: RequestContext, rest_api_id: String, stage_name: String, **kwargs
+ ) -> Stage:
+ response = call_moto(context)
+ self._patch_stage_response(response)
+ return response
+
+ def get_stages(
+ self, context: RequestContext, rest_api_id: String, deployment_id: String = None, **kwargs
+ ) -> Stages:
+ response = call_moto(context)
+ for stage in response["item"]:
+ self._patch_stage_response(stage)
+ if not stage.get("description"):
+ stage.pop("description", None)
+ return Stages(**response)
+
+ @handler("UpdateStage")
+ def update_stage(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ stage_name: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> Stage:
+ call_moto(context)
+
+ moto_backend = get_moto_backend(context.account_id, context.region)
+ moto_rest_api: MotoRestAPI = moto_backend.apis.get(rest_api_id)
+ if not (moto_stage := moto_rest_api.stages.get(stage_name)):
+ raise NotFoundException("Invalid Stage identifier specified")
+
+ # construct list of path regexes for validation
+ path_regexes = [re.sub("{[^}]+}", ".+", path) for path in STAGE_UPDATE_PATHS]
+
+ # copy the patch operations to not mutate them, so that we're logging the correct input
+ patch_operations = copy.deepcopy(patch_operations) or []
+ for patch_operation in patch_operations:
+ patch_path = patch_operation["path"]
+
+ # special case: handle updates (op=remove) for wildcard method settings
+ patch_path_stripped = patch_path.strip("/")
+ if patch_path_stripped == "*/*" and patch_operation["op"] == "remove":
+ if not moto_stage.method_settings.pop(patch_path_stripped, None):
+ raise BadRequestException(
+ "Cannot remove method setting */* because there is no method setting for this method "
+ )
+ response = moto_stage.to_json()
+ self._patch_stage_response(response)
+ return response
+
+ path_valid = patch_path in STAGE_UPDATE_PATHS or any(
+ re.match(regex, patch_path) for regex in path_regexes
+ )
+ if not path_valid:
+ valid_paths = f"[{', '.join(STAGE_UPDATE_PATHS)}]"
+ # note: weird formatting in AWS - required for snapshot testing
+ valid_paths = valid_paths.replace(
+ "/{resourcePath}/{httpMethod}/throttling/burstLimit, /{resourcePath}/{httpMethod}/throttling/rateLimit, /{resourcePath}/{httpMethod}/caching/ttlInSeconds",
+ "/{resourcePath}/{httpMethod}/throttling/burstLimit/{resourcePath}/{httpMethod}/throttling/rateLimit/{resourcePath}/{httpMethod}/caching/ttlInSeconds",
+ )
+ valid_paths = valid_paths.replace("/burstLimit, /", "/burstLimit /")
+ valid_paths = valid_paths.replace("/rateLimit, /", "/rateLimit /")
+ raise BadRequestException(
+ f"Invalid method setting path: {patch_operation['path']}. Must be one of: {valid_paths}"
+ )
+
+ # TODO: check if there are other boolean, maybe add a global step in _patch_api_gateway_entity
+ if patch_path == "/tracingEnabled" and (value := patch_operation.get("value")):
+ patch_operation["value"] = value and value.lower() == "true" or False
+
+ patch_api_gateway_entity(moto_stage, patch_operations)
+ moto_stage.apply_operations(patch_operations)
+
+ response = moto_stage.to_json()
+ self._patch_stage_response(response)
+ return response
+
+ def _patch_stage_response(self, response: dict):
+ """Apply a few patches required for AWS parity"""
+ response.setdefault("cacheClusterStatus", "NOT_AVAILABLE")
+ response.setdefault("tracingEnabled", False)
+ if not response.get("variables"):
+ response.pop("variables", None)
+
+ def update_deployment(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ deployment_id: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> Deployment:
+ moto_rest_api = get_moto_rest_api(context, rest_api_id)
+ try:
+ deployment = moto_rest_api.get_deployment(deployment_id)
+ except KeyError:
+ raise NotFoundException("Invalid Deployment identifier specified")
+
+ for patch_operation in patch_operations:
+ # TODO: add validation for unsupported paths
+ # see https://docs.aws.amazon.com/apigateway/latest/api/patch-operations.html#UpdateDeployment-Patch
+ if (
+ patch_operation.get("path") == "/description"
+ and patch_operation.get("op") == "replace"
+ ):
+ deployment.description = patch_operation["value"]
+
+ deployment_response: Deployment = deployment.to_json() or {}
+ return deployment_response
+
+ # authorizers
+
+ @handler("CreateAuthorizer", expand=False)
+ def create_authorizer(
+ self, context: RequestContext, request: CreateAuthorizerRequest
+ ) -> Authorizer:
+ # TODO: add validation
+ api_id = request["restApiId"]
+ store = get_apigateway_store(context=context)
+ if api_id not in store.rest_apis:
+ # this seems like a weird exception to throw, but couldn't get anything different
+ # we might need to have a look again
+ raise ConflictException(
+ "Unable to complete operation due to concurrent modification. Please try again later."
+ )
+
+ authorizer_id = short_uid()[:6] # length 6 to make TF tests pass
+ authorizer = deepcopy(select_from_typed_dict(Authorizer, request))
+ authorizer["id"] = authorizer_id
+ authorizer["authorizerResultTtlInSeconds"] = int(
+ authorizer.get("authorizerResultTtlInSeconds", 300)
+ )
+ store.rest_apis[api_id].authorizers[authorizer_id] = authorizer
+
+ response = to_authorizer_response_json(api_id, authorizer)
+ return response
+
+ def get_authorizers(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ position: String = None,
+ limit: NullableInteger = None,
+ **kwargs,
+ ) -> Authorizers:
+ # TODO add paging, validation
+ rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id)
+ result = [
+ to_authorizer_response_json(rest_api_id, a)
+ for a in rest_api_container.authorizers.values()
+ ]
+ return Authorizers(items=result)
+
+ def get_authorizer(
+ self, context: RequestContext, rest_api_id: String, authorizer_id: String, **kwargs
+ ) -> Authorizer:
+ store = get_apigateway_store(context=context)
+ rest_api_container = store.rest_apis.get(rest_api_id)
+ # TODO: validate the restAPI id to remove the conditional
+ authorizer = (
+ rest_api_container.authorizers.get(authorizer_id) if rest_api_container else None
+ )
+
+ if authorizer is None:
+ raise NotFoundException(f"Authorizer not found: {authorizer_id}")
+ return to_authorizer_response_json(rest_api_id, authorizer)
+
+ def delete_authorizer(
+ self, context: RequestContext, rest_api_id: String, authorizer_id: String, **kwargs
+ ) -> None:
+ # TODO: add validation if authorizer does not exist
+ store = get_apigateway_store(context=context)
+ rest_api_container = store.rest_apis.get(rest_api_id)
+ if rest_api_container:
+ rest_api_container.authorizers.pop(authorizer_id, None)
+
+ def update_authorizer(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ authorizer_id: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> Authorizer:
+ # TODO: add validation
+ store = get_apigateway_store(context=context)
+ rest_api_container = store.rest_apis.get(rest_api_id)
+ # TODO: validate the restAPI id to remove the conditional
+ authorizer = (
+ rest_api_container.authorizers.get(authorizer_id) if rest_api_container else None
+ )
+
+ if authorizer is None:
+ raise NotFoundException(f"Authorizer not found: {authorizer_id}")
+
+ patched_authorizer = apply_json_patch_safe(authorizer, patch_operations)
+ # terraform sends this as a string in patch, so convert to int
+ patched_authorizer["authorizerResultTtlInSeconds"] = int(
+ patched_authorizer.get("authorizerResultTtlInSeconds", 300)
+ )
+
+ # store the updated Authorizer
+ rest_api_container.authorizers[authorizer_id] = patched_authorizer
+
+ result = to_authorizer_response_json(rest_api_id, patched_authorizer)
+ return result
+
+ # accounts
+
+ def get_account(self, context: RequestContext, **kwargs) -> Account:
+ region_details = get_apigateway_store(context=context)
+ result = to_account_response_json(region_details.account)
+ return Account(**result)
+
+ def update_account(
+ self, context: RequestContext, patch_operations: ListOfPatchOperation = None, **kwargs
+ ) -> Account:
+ region_details = get_apigateway_store(context=context)
+ apply_json_patch_safe(region_details.account, patch_operations, in_place=True)
+ result = to_account_response_json(region_details.account)
+ return Account(**result)
+
+ # documentation parts
+
+ def get_documentation_parts(
+ self, context: RequestContext, request: GetDocumentationPartsRequest, **kwargs
+ ) -> DocumentationParts:
+ # TODO: add validation
+ api_id = request["restApiId"]
+ rest_api_container = get_rest_api_container(context, rest_api_id=api_id)
+
+ result = [
+ to_documentation_part_response_json(api_id, a)
+ for a in rest_api_container.documentation_parts.values()
+ ]
+ return DocumentationParts(items=result)
+
+ def get_documentation_part(
+ self, context: RequestContext, rest_api_id: String, documentation_part_id: String, **kwargs
+ ) -> DocumentationPart:
+ # TODO: add validation
+ store = get_apigateway_store(context=context)
+ rest_api_container = store.rest_apis.get(rest_api_id)
+ # TODO: validate the restAPI id to remove the conditional
+ documentation_part = (
+ rest_api_container.documentation_parts.get(documentation_part_id)
+ if rest_api_container
+ else None
+ )
+
+ if documentation_part is None:
+ raise NotFoundException("Invalid Documentation part identifier specified")
+ return to_documentation_part_response_json(rest_api_id, documentation_part)
+
+ def create_documentation_part(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ location: DocumentationPartLocation,
+ properties: String,
+ **kwargs,
+ ) -> DocumentationPart:
+ entity_id = short_uid()[:6] # length 6 for AWS parity / Terraform compatibility
+ rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id)
+
+ # TODO: add complete validation for
+ # location parameter: https://docs.aws.amazon.com/apigateway/latest/api/API_DocumentationPartLocation.html
+ # As of now we validate only "type"
+ location_type = location.get("type")
+ valid_location_types = [
+ "API",
+ "AUTHORIZER",
+ "MODEL",
+ "RESOURCE",
+ "METHOD",
+ "PATH_PARAMETER",
+ "QUERY_PARAMETER",
+ "REQUEST_HEADER",
+ "REQUEST_BODY",
+ "RESPONSE",
+ "RESPONSE_HEADER",
+ "RESPONSE_BODY",
+ ]
+ if location_type not in valid_location_types:
+ raise CommonServiceException(
+ "ValidationException",
+ f"1 validation error detected: Value '{location_type}' at "
+ f"'createDocumentationPartInput.location.type' failed to satisfy constraint: "
+ f"Member must satisfy enum value set: "
+ f"[RESPONSE_BODY, RESPONSE, METHOD, MODEL, AUTHORIZER, RESPONSE_HEADER, "
+ f"RESOURCE, PATH_PARAMETER, REQUEST_BODY, QUERY_PARAMETER, API, REQUEST_HEADER]",
+ )
+
+ doc_part = DocumentationPart(
+ id=entity_id,
+ location=location,
+ properties=properties,
+ )
+ rest_api_container.documentation_parts[entity_id] = doc_part
+
+ result = to_documentation_part_response_json(rest_api_id, doc_part)
+ return DocumentationPart(**result)
+
+ def update_documentation_part(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ documentation_part_id: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> DocumentationPart:
+ # TODO: add validation
+ store = get_apigateway_store(context=context)
+ rest_api_container = store.rest_apis.get(rest_api_id)
+ # TODO: validate the restAPI id to remove the conditional
+ doc_part = (
+ rest_api_container.documentation_parts.get(documentation_part_id)
+ if rest_api_container
+ else None
+ )
+
+ if doc_part is None:
+ raise NotFoundException("Invalid Documentation part identifier specified")
+
+ for patch_operation in patch_operations:
+ path = patch_operation.get("path")
+ operation = patch_operation.get("op")
+ if operation != "replace":
+ raise BadRequestException(
+ f"Invalid patch path '{path}' specified for op '{operation}'. "
+ f"Please choose supported operations"
+ )
+
+ if path != "/properties":
+ raise BadRequestException(
+ f"Invalid patch path '{path}' specified for op 'replace'. "
+ f"Must be one of: [/properties]"
+ )
+
+ key = path[1:]
+ if key == "properties" and not patch_operation.get("value"):
+ raise BadRequestException("Documentation part properties must be non-empty")
+
+ patched_doc_part = apply_json_patch_safe(doc_part, patch_operations)
+
+ rest_api_container.documentation_parts[documentation_part_id] = patched_doc_part
+
+ return to_documentation_part_response_json(rest_api_id, patched_doc_part)
+
+ def delete_documentation_part(
+ self, context: RequestContext, rest_api_id: String, documentation_part_id: String, **kwargs
+ ) -> None:
+ # TODO: add validation if document_part does not exist, or rest_api
+ rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id)
+
+ documentation_part = rest_api_container.documentation_parts.get(documentation_part_id)
+
+ if documentation_part is None:
+ raise NotFoundException("Invalid Documentation part identifier specified")
+
+ if rest_api_container:
+ rest_api_container.documentation_parts.pop(documentation_part_id, None)
+
+ def import_documentation_parts(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ body: IO[Blob],
+ mode: PutMode = None,
+ fail_on_warnings: Boolean = None,
+ **kwargs,
+ ) -> DocumentationPartIds:
+ body_data = body.read()
+ openapi_spec = parse_json_or_yaml(to_str(body_data))
+
+ rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id)
+
+ # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-documenting-api-quick-start-import-export.html
+ resolved_schema = resolve_references(openapi_spec, rest_api_id=rest_api_id)
+ documentation = resolved_schema.get(OpenAPIExt.DOCUMENTATION)
+
+ ids = []
+ # overwrite mode
+ if mode == PutMode.overwrite:
+ rest_api_container.documentation_parts.clear()
+ for doc_part in documentation["documentationParts"]:
+ entity_id = short_uid()[:6]
+ rest_api_container.documentation_parts[entity_id] = DocumentationPart(
+ id=entity_id, **doc_part
+ )
+ ids.append(entity_id)
+ # TODO: implement the merge mode
+ return DocumentationPartIds(ids=ids)
+
+ # documentation versions
+
+ def create_documentation_version(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ documentation_version: String,
+ stage_name: String = None,
+ description: String = None,
+ **kwargs,
+ ) -> DocumentationVersion:
+ rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id)
+
+ result = DocumentationVersion(
+ version=documentation_version, createdDate=datetime.now(), description=description
+ )
+ rest_api_container.documentation_versions[documentation_version] = result
+
+ return result
+
+ def get_documentation_version(
+ self, context: RequestContext, rest_api_id: String, documentation_version: String, **kwargs
+ ) -> DocumentationVersion:
+ rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id)
+
+ result = rest_api_container.documentation_versions.get(documentation_version)
+ if not result:
+ raise NotFoundException(f"Documentation version not found: {documentation_version}")
+
+ return result
+
+ def get_documentation_versions(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ position: String = None,
+ limit: NullableInteger = None,
+ **kwargs,
+ ) -> DocumentationVersions:
+ rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id)
+ result = list(rest_api_container.documentation_versions.values())
+ return DocumentationVersions(items=result)
+
+ def delete_documentation_version(
+ self, context: RequestContext, rest_api_id: String, documentation_version: String, **kwargs
+ ) -> None:
+ rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id)
+
+ result = rest_api_container.documentation_versions.pop(documentation_version, None)
+ if not result:
+ raise NotFoundException(f"Documentation version not found: {documentation_version}")
+
+ def update_documentation_version(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ documentation_version: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> DocumentationVersion:
+ rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id)
+
+ result = rest_api_container.documentation_versions.get(documentation_version)
+ if not result:
+ raise NotFoundException(f"Documentation version not found: {documentation_version}")
+
+ patch_api_gateway_entity(result, patch_operations)
+
+ return result
+
+ # base path mappings
+
+ def get_base_path_mappings(
+ self,
+ context: RequestContext,
+ domain_name: String,
+ domain_name_id: String = None,
+ position: String = None,
+ limit: NullableInteger = None,
+ **kwargs,
+ ) -> BasePathMappings:
+ region_details = get_apigateway_store(context=context)
+
+ mappings_list = region_details.base_path_mappings.get(domain_name) or []
+
+ result = [
+ to_base_mapping_response_json(domain_name, m["basePath"], m) for m in mappings_list
+ ]
+ return BasePathMappings(items=result)
+
+ def get_base_path_mapping(
+ self,
+ context: RequestContext,
+ domain_name: String,
+ base_path: String,
+ domain_name_id: String = None,
+ **kwargs,
+ ) -> BasePathMapping:
+ region_details = get_apigateway_store(context=context)
+
+ mappings_list = region_details.base_path_mappings.get(domain_name) or []
+ mapping = ([m for m in mappings_list if m["basePath"] == base_path] or [None])[0]
+ if mapping is None:
+ raise NotFoundException(f"Base path mapping not found: {domain_name} - {base_path}")
+
+ result = to_base_mapping_response_json(domain_name, base_path, mapping)
+ return BasePathMapping(**result)
+
+ def create_base_path_mapping(
+ self,
+ context: RequestContext,
+ domain_name: String,
+ rest_api_id: String,
+ domain_name_id: String = None,
+ base_path: String = None,
+ stage: String = None,
+ **kwargs,
+ ) -> BasePathMapping:
+ region_details = get_apigateway_store(context=context)
+
+ # Note: "(none)" is a special value in API GW:
+ # https://docs.aws.amazon.com/apigateway/api-reference/link-relation/basepathmapping-by-base-path
+ base_path = base_path or "(none)"
+
+ entry = {
+ "domainName": domain_name,
+ "restApiId": rest_api_id,
+ "basePath": base_path,
+ "stage": stage,
+ }
+ region_details.base_path_mappings.setdefault(domain_name, []).append(entry)
+
+ result = to_base_mapping_response_json(domain_name, base_path, entry)
+ return BasePathMapping(**result)
+
+ def update_base_path_mapping(
+ self,
+ context: RequestContext,
+ domain_name: String,
+ base_path: String,
+ domain_name_id: String = None,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> BasePathMapping:
+ region_details = get_apigateway_store(context=context)
+
+ mappings_list = region_details.base_path_mappings.get(domain_name) or []
+
+ mapping = ([m for m in mappings_list if m["basePath"] == base_path] or [None])[0]
+ if mapping is None:
+ raise NotFoundException(
+ f"Not found: mapping for domain name {domain_name}, "
+ f"base path {base_path} in list {mappings_list}"
+ )
+
+ patch_operations = ensure_list(patch_operations)
+ for operation in patch_operations:
+ if operation["path"] == "/restapiId":
+ operation["path"] = "/restApiId"
+ result = apply_json_patch_safe(mapping, patch_operations)
+
+ for i in range(len(mappings_list)):
+ if mappings_list[i]["basePath"] == base_path:
+ mappings_list[i] = result
+
+ result = to_base_mapping_response_json(domain_name, base_path, result)
+ return BasePathMapping(**result)
+
+ def delete_base_path_mapping(
+ self,
+ context: RequestContext,
+ domain_name: String,
+ base_path: String,
+ domain_name_id: String = None,
+ **kwargs,
+ ) -> None:
+ region_details = get_apigateway_store(context=context)
+
+ mappings_list = region_details.base_path_mappings.get(domain_name) or []
+ for i in range(len(mappings_list)):
+ if mappings_list[i]["basePath"] == base_path:
+ del mappings_list[i]
+ return
+
+ raise NotFoundException(f"Base path mapping {base_path} for domain {domain_name} not found")
+
+ # client certificates
+
+ def get_client_certificate(
+ self, context: RequestContext, client_certificate_id: String, **kwargs
+ ) -> ClientCertificate:
+ region_details = get_apigateway_store(context=context)
+ result = region_details.client_certificates.get(client_certificate_id)
+ if result is None:
+ raise NotFoundException(f"Client certificate ID {client_certificate_id} not found")
+ return ClientCertificate(**result)
+
+ def get_client_certificates(
+ self,
+ context: RequestContext,
+ position: String = None,
+ limit: NullableInteger = None,
+ **kwargs,
+ ) -> ClientCertificates:
+ region_details = get_apigateway_store(context=context)
+ result = list(region_details.client_certificates.values())
+ return ClientCertificates(items=result)
+
+ def generate_client_certificate(
+ self,
+ context: RequestContext,
+ description: String = None,
+ tags: MapOfStringToString = None,
+ **kwargs,
+ ) -> ClientCertificate:
+ region_details = get_apigateway_store(context=context)
+ cert_id = short_uid()
+ creation_time = now_utc()
+ entry = {
+ "description": description,
+ "tags": tags,
+ "clientCertificateId": cert_id,
+ "createdDate": creation_time,
+ "expirationDate": creation_time + 60 * 60 * 24 * 30, # assume 30 days validity
+ "pemEncodedCertificate": "testcert-123", # TODO return proper certificate!
+ }
+ region_details.client_certificates[cert_id] = entry
+ result = to_client_cert_response_json(entry)
+ return ClientCertificate(**result)
+
+ def update_client_certificate(
+ self,
+ context: RequestContext,
+ client_certificate_id: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> ClientCertificate:
+ region_details = get_apigateway_store(context=context)
+ entity = region_details.client_certificates.get(client_certificate_id)
+ if entity is None:
+ raise NotFoundException(f'Client certificate ID "{client_certificate_id}" not found')
+ result = apply_json_patch_safe(entity, patch_operations)
+ result = to_client_cert_response_json(result)
+ return ClientCertificate(**result)
+
+ def delete_client_certificate(
+ self, context: RequestContext, client_certificate_id: String, **kwargs
+ ) -> None:
+ region_details = get_apigateway_store(context=context)
+ entity = region_details.client_certificates.pop(client_certificate_id, None)
+ if entity is None:
+ raise NotFoundException(f'VPC link ID "{client_certificate_id}" not found for deletion')
+
+ # VPC links
+
+ def create_vpc_link(
+ self,
+ context: RequestContext,
+ name: String,
+ target_arns: ListOfString,
+ description: String = None,
+ tags: MapOfStringToString = None,
+ **kwargs,
+ ) -> VpcLink:
+ region_details = get_apigateway_store(context=context)
+ link_id = short_uid()
+ entry = {"id": link_id, "status": "AVAILABLE"}
+ region_details.vpc_links[link_id] = entry
+ result = to_vpc_link_response_json(entry)
+ return VpcLink(**result)
+
+ def get_vpc_links(
+ self,
+ context: RequestContext,
+ position: String = None,
+ limit: NullableInteger = None,
+ **kwargs,
+ ) -> VpcLinks:
+ region_details = get_apigateway_store(context=context)
+ result = region_details.vpc_links.values()
+ result = [to_vpc_link_response_json(r) for r in result]
+ result = {"items": result}
+ return result
+
+ def get_vpc_link(self, context: RequestContext, vpc_link_id: String, **kwargs) -> VpcLink:
+ region_details = get_apigateway_store(context=context)
+ vpc_link = region_details.vpc_links.get(vpc_link_id)
+ if vpc_link is None:
+ raise NotFoundException(f'VPC link ID "{vpc_link_id}" not found')
+ result = to_vpc_link_response_json(vpc_link)
+ return VpcLink(**result)
+
+ def update_vpc_link(
+ self,
+ context: RequestContext,
+ vpc_link_id: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> VpcLink:
+ region_details = get_apigateway_store(context=context)
+ vpc_link = region_details.vpc_links.get(vpc_link_id)
+ if vpc_link is None:
+ raise NotFoundException(f'VPC link ID "{vpc_link_id}" not found')
+ result = apply_json_patch_safe(vpc_link, patch_operations)
+ result = to_vpc_link_response_json(result)
+ return VpcLink(**result)
+
+ def delete_vpc_link(self, context: RequestContext, vpc_link_id: String, **kwargs) -> None:
+ region_details = get_apigateway_store(context=context)
+ vpc_link = region_details.vpc_links.pop(vpc_link_id, None)
+ if vpc_link is None:
+ raise NotFoundException(f'VPC link ID "{vpc_link_id}" not found for deletion')
+
+ # request validators
+
+ def get_request_validators(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ position: String = None,
+ limit: NullableInteger = None,
+ **kwargs,
+ ) -> RequestValidators:
+ # TODO: add validation and pagination?
+ store = get_apigateway_store(context=context)
+ if not (rest_api_container := store.rest_apis.get(rest_api_id)):
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ )
+
+ result = [
+ to_validator_response_json(rest_api_id, a)
+ for a in rest_api_container.validators.values()
+ ]
+ return RequestValidators(items=result)
+
+ def get_request_validator(
+ self, context: RequestContext, rest_api_id: String, request_validator_id: String, **kwargs
+ ) -> RequestValidator:
+ store = get_apigateway_store(context=context)
+ rest_api_container = store.rest_apis.get(rest_api_id)
+ # TODO: validate the restAPI id to remove the conditional
+ validator = (
+ rest_api_container.validators.get(request_validator_id) if rest_api_container else None
+ )
+
+ if validator is None:
+ raise NotFoundException("Invalid Request Validator identifier specified")
+
+ result = to_validator_response_json(rest_api_id, validator)
+ return result
+
+ def create_request_validator(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ name: String = None,
+ validate_request_body: Boolean = None,
+ validate_request_parameters: Boolean = None,
+ **kwargs,
+ ) -> RequestValidator:
+ # TODO: add validation (ex: name cannot be blank)
+ store = get_apigateway_store(context=context)
+ if not (rest_api_container := store.rest_apis.get(rest_api_id)):
+ raise BadRequestException("Invalid REST API identifier specified")
+ # length 6 for AWS parity and TF compatibility
+ validator_id = short_uid()[:6]
+
+ validator = RequestValidator(
+ id=validator_id,
+ name=name,
+ validateRequestBody=validate_request_body or False,
+ validateRequestParameters=validate_request_parameters or False,
+ )
+
+ rest_api_container.validators[validator_id] = validator
+
+ # missing to_validator_response_json ?
+ return validator
+
+ def update_request_validator(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ request_validator_id: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> RequestValidator:
+ # TODO: add validation
+ store = get_apigateway_store(context=context)
+ rest_api_container = store.rest_apis.get(rest_api_id)
+ # TODO: validate the restAPI id to remove the conditional
+ validator = (
+ rest_api_container.validators.get(request_validator_id) if rest_api_container else None
+ )
+
+ if validator is None:
+ raise NotFoundException(
+ f"Validator {request_validator_id} for API Gateway {rest_api_id} not found"
+ )
+
+ for patch_operation in patch_operations:
+ path = patch_operation.get("path")
+ operation = patch_operation.get("op")
+ if operation != "replace":
+ raise BadRequestException(
+ f"Invalid patch path '{path}' specified for op '{operation}'. "
+ f"Please choose supported operations"
+ )
+ if path not in ("/name", "/validateRequestBody", "/validateRequestParameters"):
+ raise BadRequestException(
+ f"Invalid patch path '{path}' specified for op 'replace'. "
+ f"Must be one of: [/name, /validateRequestParameters, /validateRequestBody]"
+ )
+
+ key = path[1:]
+ value = patch_operation.get("value")
+ if key == "name" and not value:
+ raise BadRequestException("Request Validator name cannot be blank")
+
+ elif key in ("validateRequestParameters", "validateRequestBody"):
+ value = value and value.lower() == "true" or False
+
+ rest_api_container.validators[request_validator_id][key] = value
+
+ return to_validator_response_json(
+ rest_api_id, rest_api_container.validators[request_validator_id]
+ )
+
+ def delete_request_validator(
+ self, context: RequestContext, rest_api_id: String, request_validator_id: String, **kwargs
+ ) -> None:
+ # TODO: add validation if rest api does not exist
+ store = get_apigateway_store(context=context)
+ rest_api_container = store.rest_apis.get(rest_api_id)
+ if not rest_api_container:
+ raise NotFoundException("Invalid Request Validator identifier specified")
+
+ validator = rest_api_container.validators.pop(request_validator_id, None)
+ if not validator:
+ raise NotFoundException("Invalid Request Validator identifier specified")
+
+ # tags
+
+ def get_tags(
+ self,
+ context: RequestContext,
+ resource_arn: String,
+ position: String = None,
+ limit: NullableInteger = None,
+ **kwargs,
+ ) -> Tags:
+ result = get_apigateway_store(context=context).TAGS.get(resource_arn, {})
+ return Tags(tags=result)
+
+ def tag_resource(
+ self, context: RequestContext, resource_arn: String, tags: MapOfStringToString, **kwargs
+ ) -> None:
+ resource_tags = get_apigateway_store(context=context).TAGS.setdefault(resource_arn, {})
+ resource_tags.update(tags)
+
+ def untag_resource(
+ self, context: RequestContext, resource_arn: String, tag_keys: ListOfString, **kwargs
+ ) -> None:
+ resource_tags = get_apigateway_store(context=context).TAGS.setdefault(resource_arn, {})
+ for key in tag_keys:
+ resource_tags.pop(key, None)
+
+ def import_rest_api(
+ self,
+ context: RequestContext,
+ body: IO[Blob],
+ fail_on_warnings: Boolean = None,
+ parameters: MapOfStringToString = None,
+ **kwargs,
+ ) -> RestApi:
+ body_data = body.read()
+
+ # create rest api
+ openapi_spec = parse_json_or_yaml(to_str(body_data))
+ create_api_request = CreateRestApiRequest(name=openapi_spec.get("info").get("title"))
+ create_api_context = create_custom_context(
+ context,
+ "CreateRestApi",
+ create_api_request,
+ )
+ response = self.create_rest_api(create_api_context, create_api_request)
+ api_id = response.get("id")
+ # remove the 2 default models automatically created, but not when importing
+ store = get_apigateway_store(context=context)
+ store.rest_apis[api_id].models = {}
+
+ # put rest api
+ put_api_request = PutRestApiRequest(
+ restApiId=api_id,
+ failOnWarnings=str_to_bool(fail_on_warnings) or False,
+ parameters=parameters or {},
+ body=io.BytesIO(body_data),
+ )
+ put_api_context = create_custom_context(
+ context,
+ "PutRestApi",
+ put_api_request,
+ )
+ put_api_response = self.put_rest_api(put_api_context, put_api_request)
+ if not put_api_response.get("tags"):
+ put_api_response.pop("tags", None)
+ return put_api_response
+
+ # integrations
+
+ def get_integration(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ resource_id: String,
+ http_method: String,
+ **kwargs,
+ ) -> Integration:
+ try:
+ response: Integration = call_moto(context)
+ except CommonServiceException as e:
+ # the Exception raised by moto does not have the right message not status code
+ if e.code == "NotFoundException":
+ raise NotFoundException("Invalid Integration identifier specified")
+ raise
+
+ if integration_responses := response.get("integrationResponses"):
+ for integration_response in integration_responses.values():
+ remove_empty_attributes_from_integration_response(integration_response)
+
+ return response
+
+ def put_integration(
+ self, context: RequestContext, request: PutIntegrationRequest, **kwargs
+ ) -> Integration:
+ if (integration_type := request.get("type")) not in VALID_INTEGRATION_TYPES:
+ raise CommonServiceException(
+ "ValidationException",
+ f"1 validation error detected: Value '{integration_type}' at "
+ f"'putIntegrationInput.type' failed to satisfy constraint: "
+ f"Member must satisfy enum value set: [HTTP, MOCK, AWS_PROXY, HTTP_PROXY, AWS]",
+ )
+
+ elif integration_type in (IntegrationType.AWS_PROXY, IntegrationType.AWS):
+ if not request.get("integrationHttpMethod"):
+ raise BadRequestException("Enumeration value for HttpMethod must be non-empty")
+ if not (integration_uri := request.get("uri") or "").startswith("arn:"):
+ raise BadRequestException("Invalid ARN specified in the request")
+
+ try:
+ parsed_arn = parse_arn(integration_uri)
+ except InvalidArnException:
+ raise BadRequestException("Invalid ARN specified in the request")
+
+ if not any(
+ parsed_arn["resource"].startswith(action_type) for action_type in ("path", "action")
+ ):
+ raise BadRequestException("AWS ARN for integration must contain path or action")
+
+ if integration_type == IntegrationType.AWS_PROXY and (
+ parsed_arn["account"] != "lambda"
+ or not parsed_arn["resource"].startswith("path/2015-03-31/functions/")
+ ):
+ # the Firehose message is misleading, this is not implemented in AWS
+ raise BadRequestException(
+ "Integrations of type 'AWS_PROXY' currently only supports "
+ "Lambda function and Firehose stream invocations."
+ )
+
+ moto_rest_api = get_moto_rest_api(context=context, rest_api_id=request.get("restApiId"))
+ resource = moto_rest_api.resources.get(request.get("resourceId"))
+ if not resource:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ method = resource.resource_methods.get(request.get("httpMethod"))
+ if not method:
+ raise NotFoundException("Invalid Method identifier specified")
+
+ # TODO: if the IntegrationType is AWS, `credentials` is mandatory
+ moto_request = copy.copy(request)
+ moto_request.setdefault("passthroughBehavior", "WHEN_NO_MATCH")
+ moto_request.setdefault("timeoutInMillis", 29000)
+ if integration_type in (IntegrationType.HTTP, IntegrationType.HTTP_PROXY):
+ moto_request.setdefault("connectionType", ConnectionType.INTERNET)
+ response = call_moto_with_request(context, moto_request)
+ remove_empty_attributes_from_integration(integration=response)
+
+ # TODO: should fix fundamentally once we move away from moto
+ if integration_type == "MOCK":
+ response.pop("uri", None)
+
+ return response
+
+ def update_integration(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ resource_id: String,
+ http_method: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> Integration:
+ moto_rest_api = get_moto_rest_api(context=context, rest_api_id=rest_api_id)
+ resource = moto_rest_api.resources.get(resource_id)
+ if not resource:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ method = resource.resource_methods.get(http_method)
+ if not method:
+ raise NotFoundException("Invalid Integration identifier specified")
+
+ integration = method.method_integration
+ patch_api_gateway_entity(integration, patch_operations)
+
+ # fix data types
+ if integration.timeout_in_millis:
+ integration.timeout_in_millis = int(integration.timeout_in_millis)
+ if skip_verification := (integration.tls_config or {}).get("insecureSkipVerification"):
+ integration.tls_config["insecureSkipVerification"] = str_to_bool(skip_verification)
+
+ integration_dict: Integration = integration.to_json()
+ return integration_dict
+
+ def delete_integration(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ resource_id: String,
+ http_method: String,
+ **kwargs,
+ ) -> None:
+ try:
+ call_moto(context)
+ except Exception as e:
+ raise NotFoundException("Invalid Resource identifier specified") from e
+
+ # integration responses
+
+ def get_integration_response(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ resource_id: String,
+ http_method: String,
+ status_code: StatusCode,
+ **kwargs,
+ ) -> IntegrationResponse:
+ if not re.fullmatch(r"[1-5]\d\d", status_code):
+ raise CommonServiceException(
+ code="ValidationException",
+ message=f"1 validation error detected: Value '{status_code}' at 'statusCode' failed to "
+ f"satisfy constraint: Member must satisfy regular expression pattern: [1-5]\\d\\d",
+ )
+ try:
+ moto_rest_api = get_moto_rest_api(context, rest_api_id)
+ except NotFoundException:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ if not (moto_resource := moto_rest_api.resources.get(resource_id)):
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ if not (moto_method := moto_resource.resource_methods.get(http_method)):
+ raise NotFoundException("Invalid Method identifier specified")
+
+ if not moto_method.method_integration:
+ raise NotFoundException("Invalid Integration identifier specified")
+ if not (
+ integration_responses := moto_method.method_integration.integration_responses
+ ) or not (integration_response := integration_responses.get(status_code)):
+ raise NotFoundException("Invalid Response status code specified")
+
+ response: IntegrationResponse = call_moto(context)
+ remove_empty_attributes_from_integration_response(response)
+ # moto does not return selectionPattern is set to an empty string
+ # TODO: fix upstream
+ if (
+ "selectionPattern" not in response
+ and integration_response.selection_pattern is not None
+ ):
+ response["selectionPattern"] = integration_response.selection_pattern
+ return response
+
+ @handler("PutIntegrationResponse", expand=False)
+ def put_integration_response(
+ self,
+ context: RequestContext,
+ request: PutIntegrationResponseRequest,
+ ) -> IntegrationResponse:
+ status_code = request.get("statusCode")
+ if not re.fullmatch(r"[1-5]\d\d", status_code):
+ raise CommonServiceException(
+ code="ValidationException",
+ message=f"1 validation error detected: Value '{status_code}' at 'statusCode' failed to "
+ f"satisfy constraint: Member must satisfy regular expression pattern: [1-5]\\d\\d",
+ )
+ try:
+ # put integration response doesn't return the right exception compared to AWS
+ moto_rest_api = get_moto_rest_api(context=context, rest_api_id=request.get("restApiId"))
+ except NotFoundException:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ moto_resource = moto_rest_api.resources.get(request.get("resourceId"))
+ if not moto_resource:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ method = moto_resource.resource_methods.get(request.get("httpMethod"))
+ if not method:
+ raise NotFoundException("Invalid Method identifier specified")
+
+ response = call_moto(context)
+ # Moto has a specific case where it will set a None to an empty dict, but AWS does not behave the same
+ if request.get("responseTemplates") is None:
+ method_integration = moto_resource.resource_methods[
+ request["httpMethod"]
+ ].method_integration
+ integration_response = method_integration.integration_responses[request["statusCode"]]
+ integration_response.response_templates = None
+ response.pop("responseTemplates", None)
+
+ # Moto also does not return the selection pattern if it is set to an empty string
+ # TODO: fix upstream
+ if (selection_pattern := request.get("selectionPattern")) is not None:
+ response["selectionPattern"] = selection_pattern
+
+ return response
+
+ def get_export(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ stage_name: String,
+ export_type: String,
+ parameters: MapOfStringToString = None,
+ accepts: String = None,
+ **kwargs,
+ ) -> ExportResponse:
+ moto_rest_api = get_moto_rest_api(context, rest_api_id)
+ openapi_exporter = OpenApiExporter()
+ # FIXME: look into parser why `parameters` is always None
+ has_extension = context.request.values.get("extensions") == "apigateway"
+ result = openapi_exporter.export_api(
+ api_id=rest_api_id,
+ stage=stage_name,
+ export_type=export_type,
+ export_format=accepts,
+ with_extension=has_extension,
+ account_id=context.account_id,
+ region_name=context.region,
+ )
+
+ accepts = accepts or APPLICATION_JSON
+
+ if accepts == APPLICATION_JSON:
+ result = json.dumps(result, indent=2)
+
+ file_ext = accepts.split("/")[-1]
+ version = moto_rest_api.version or timestamp(
+ moto_rest_api.create_date, format=TIMESTAMP_FORMAT_TZ
+ )
+ return ExportResponse(
+ body=to_bytes(result),
+ contentType="application/octet-stream",
+ contentDisposition=f'attachment; filename="{export_type}_{version}.{file_ext}"',
+ )
+
+ def get_api_keys(
+ self,
+ context: RequestContext,
+ position: String = None,
+ limit: NullableInteger = None,
+ name_query: String = None,
+ customer_id: String = None,
+ include_values: NullableBoolean = None,
+ **kwargs,
+ ) -> ApiKeys:
+ # TODO: migrate API keys in our store
+ moto_backend = get_moto_backend(context.account_id, context.region)
+ api_keys = [api_key.to_json() for api_key in reversed(moto_backend.keys.values())]
+ if not include_values:
+ for api_key in api_keys:
+ api_key.pop("value")
+
+ item_list = PaginatedList(api_keys)
+
+ def token_generator(item):
+ return md5(item["id"])
+
+ def filter_function(item):
+ return item["name"].startswith(name_query)
+
+ paginated_list, next_token = item_list.get_page(
+ token_generator=token_generator,
+ next_token=position,
+ page_size=limit,
+ filter_function=filter_function if name_query else None,
+ )
+
+ return ApiKeys(items=paginated_list, position=next_token)
+
+ def update_api_key(
+ self,
+ context: RequestContext,
+ api_key: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> ApiKey:
+ response: ApiKey = call_moto(context)
+ if "value" in response:
+ response.pop("value", None)
+
+ if "tags" not in response:
+ response["tags"] = {}
+
+ return response
+
+ def create_model(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ name: String,
+ content_type: String,
+ description: String = None,
+ schema: String = None,
+ **kwargs,
+ ) -> Model:
+ store = get_apigateway_store(context=context)
+ if rest_api_id not in store.rest_apis:
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ )
+
+ if not name:
+ raise BadRequestException("Model name must be non-empty")
+
+ if name in store.rest_apis[rest_api_id].models:
+ raise ConflictException("Model name already exists for this REST API")
+
+ if not schema:
+ # TODO: maybe add more validation around the schema, valid json string?
+ raise BadRequestException(
+ "Model schema must have at least 1 property or array items defined"
+ )
+
+ model_id = short_uid()[:6] # length 6 to make TF tests pass
+ model = Model(
+ id=model_id, name=name, contentType=content_type, description=description, schema=schema
+ )
+ store.rest_apis[rest_api_id].models[name] = model
+ remove_empty_attributes_from_model(model)
+ return model
+
+ def get_models(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ position: String = None,
+ limit: NullableInteger = None,
+ **kwargs,
+ ) -> Models:
+ store = get_apigateway_store(context=context)
+ if rest_api_id not in store.rest_apis:
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ )
+
+ models = [
+ remove_empty_attributes_from_model(model)
+ for model in store.rest_apis[rest_api_id].models.values()
+ ]
+ return Models(items=models)
+
+ def get_model(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ model_name: String,
+ flatten: Boolean = None,
+ **kwargs,
+ ) -> Model:
+ store = get_apigateway_store(context=context)
+ if rest_api_id not in store.rest_apis or not (
+ model := store.rest_apis[rest_api_id].models.get(model_name)
+ ):
+ raise NotFoundException(f"Invalid model name specified: {model_name}")
+
+ return model
+
+ def update_model(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ model_name: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> Model:
+ # manually update the model, not need for JSON patch, only 2 path supported with replace operation
+ # /schema
+ # /description
+ store = get_apigateway_store(context=context)
+ if rest_api_id not in store.rest_apis or not (
+ model := store.rest_apis[rest_api_id].models.get(model_name)
+ ):
+ raise NotFoundException(f"Invalid model name specified: {model_name}")
+
+ for operation in patch_operations:
+ path = operation.get("path")
+ if operation.get("op") != "replace":
+ raise BadRequestException(
+ f"Invalid patch path '{path}' specified for op 'add'. Please choose supported operations"
+ )
+ if path not in ("/schema", "/description"):
+ raise BadRequestException(
+ f"Invalid patch path '{path}' specified for op 'replace'. Must be one of: [/description, /schema]"
+ )
+
+ key = path[1:] # remove the leading slash
+ value = operation.get("value")
+ if key == "schema":
+ if not value:
+ raise BadRequestException(
+ "Model schema must have at least 1 property or array items defined"
+ )
+ # delete the resolved model to invalidate it
+ store.rest_apis[rest_api_id].resolved_models.pop(model_name, None)
+ model[key] = value
+ remove_empty_attributes_from_model(model)
+ return model
+
+ def delete_model(
+ self, context: RequestContext, rest_api_id: String, model_name: String, **kwargs
+ ) -> None:
+ store = get_apigateway_store(context=context)
+
+ if (
+ rest_api_id not in store.rest_apis
+ or model_name not in store.rest_apis[rest_api_id].models
+ ):
+ raise NotFoundException(f"Invalid model name specified: {model_name}")
+
+ moto_rest_api = get_moto_rest_api(context, rest_api_id)
+ validate_model_in_use(moto_rest_api, model_name)
+
+ store.rest_apis[rest_api_id].models.pop(model_name, None)
+ store.rest_apis[rest_api_id].resolved_models.pop(model_name, None)
+
+ @handler("CreateUsagePlan")
+ def create_usage_plan(
+ self,
+ context: RequestContext,
+ name: String,
+ description: String = None,
+ api_stages: ListOfApiStage = None,
+ throttle: ThrottleSettings = None,
+ quota: QuotaSettings = None,
+ tags: MapOfStringToString = None,
+ **kwargs,
+ ) -> UsagePlan:
+ usage_plan: UsagePlan = call_moto(context=context)
+ if not usage_plan.get("quota"):
+ usage_plan.pop("quota", None)
+
+ fix_throttle_and_quota_from_usage_plan(usage_plan)
+
+ return usage_plan
+
+ def update_usage_plan(
+ self,
+ context: RequestContext,
+ usage_plan_id: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> UsagePlan:
+ for patch_op in patch_operations:
+ if patch_op.get("op") == "remove" and patch_op.get("path") == "/apiStages":
+ if not (api_stage_id := patch_op.get("value")):
+ raise BadRequestException("Invalid API Stage specified")
+ if not len(split_stage_id := api_stage_id.split(":")) == 2:
+ raise BadRequestException("Invalid API Stage specified")
+ rest_api_id, stage_name = split_stage_id
+ moto_backend = apigw_models.apigateway_backends[context.account_id][context.region]
+ if not (rest_api := moto_backend.apis.get(rest_api_id)):
+ raise NotFoundException(
+ f"Invalid API Stage {{api: {rest_api_id}, stage: {stage_name}}} specified for usageplan {usage_plan_id}"
+ )
+ if stage_name not in rest_api.stages:
+ raise NotFoundException(
+ f"Invalid API Stage {{api: {rest_api_id}, stage: {stage_name}}} specified for usageplan {usage_plan_id}"
+ )
+
+ usage_plan = call_moto(context=context)
+ if not usage_plan.get("quota"):
+ usage_plan.pop("quota", None)
+
+ usage_plan_arn = f"arn:{get_partition(context.region)}:apigateway:{context.region}::/usageplans/{usage_plan_id}"
+ existing_tags = get_apigateway_store(context=context).TAGS.get(usage_plan_arn, {})
+ if "tags" not in usage_plan:
+ usage_plan["tags"] = existing_tags
+ else:
+ usage_plan["tags"].update(existing_tags)
+
+ fix_throttle_and_quota_from_usage_plan(usage_plan)
+
+ return usage_plan
+
+ def get_usage_plan(self, context: RequestContext, usage_plan_id: String, **kwargs) -> UsagePlan:
+ usage_plan: UsagePlan = call_moto(context=context)
+ if not usage_plan.get("quota"):
+ usage_plan.pop("quota", None)
+
+ fix_throttle_and_quota_from_usage_plan(usage_plan)
+
+ usage_plan_arn = f"arn:{get_partition(context.region)}:apigateway:{context.region}::/usageplans/{usage_plan_id}"
+ existing_tags = get_apigateway_store(context=context).TAGS.get(usage_plan_arn, {})
+ if "tags" not in usage_plan:
+ usage_plan["tags"] = existing_tags
+ else:
+ usage_plan["tags"].update(existing_tags)
+
+ return usage_plan
+
+ @handler("GetUsagePlans")
+ def get_usage_plans(
+ self,
+ context: RequestContext,
+ position: String = None,
+ key_id: String = None,
+ limit: NullableInteger = None,
+ **kwargs,
+ ) -> UsagePlans:
+ usage_plans: UsagePlans = call_moto(context=context)
+ if not usage_plans.get("items"):
+ usage_plans["items"] = []
+
+ items = usage_plans["items"]
+ for up in items:
+ if not up.get("quota"):
+ up.pop("quota", None)
+
+ fix_throttle_and_quota_from_usage_plan(up)
+
+ if "tags" not in up:
+ up.pop("tags", None)
+
+ return usage_plans
+
+ def get_usage_plan_keys(
+ self,
+ context: RequestContext,
+ usage_plan_id: String,
+ position: String = None,
+ limit: NullableInteger = None,
+ name_query: String = None,
+ **kwargs,
+ ) -> UsagePlanKeys:
+ # TODO: migrate Usage Plan and UsagePlan Keys to our store
+ moto_backend = get_moto_backend(context.account_id, context.region)
+
+ if not (usage_plan_keys := moto_backend.usage_plan_keys.get(usage_plan_id)):
+ return UsagePlanKeys(items=[])
+
+ usage_plan_keys = [
+ usage_plan_key.to_json()
+ for usage_plan_key in reversed(usage_plan_keys.values())
+ if usage_plan_key.id in moto_backend.keys
+ ]
+
+ item_list = PaginatedList(usage_plan_keys)
+
+ def token_generator(item):
+ return md5(item["id"])
+
+ def filter_function(item):
+ return item["name"].startswith(name_query)
+
+ paginated_list, next_token = item_list.get_page(
+ token_generator=token_generator,
+ next_token=position,
+ page_size=limit,
+ filter_function=filter_function if name_query else None,
+ )
+
+ return UsagePlanKeys(items=paginated_list, position=next_token)
+
+ def put_gateway_response(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ response_type: GatewayResponseType,
+ status_code: StatusCode = None,
+ response_parameters: MapOfStringToString = None,
+ response_templates: MapOfStringToString = None,
+ **kwargs,
+ ) -> GatewayResponse:
+ # There were no validation in moto, so implementing as is
+ # TODO: add validation
+ # TODO: this is only the CRUD implementation, implement it in the invocation part of the code
+ store = get_apigateway_store(context=context)
+ if not (rest_api_container := store.rest_apis.get(rest_api_id)):
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ )
+
+ if response_type not in DEFAULT_GATEWAY_RESPONSES:
+ raise CommonServiceException(
+ code="ValidationException",
+ message=f"1 validation error detected: Value '{response_type}' at 'responseType' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(DEFAULT_GATEWAY_RESPONSES)}]",
+ )
+
+ gateway_response = GatewayResponse(
+ statusCode=status_code,
+ responseParameters=response_parameters,
+ responseTemplates=response_templates,
+ responseType=response_type,
+ defaultResponse=False,
+ )
+ rest_api_container.gateway_responses[response_type] = gateway_response
+ return gateway_response
+
+ def get_gateway_response(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ response_type: GatewayResponseType,
+ **kwargs,
+ ) -> GatewayResponse:
+ store = get_apigateway_store(context=context)
+ if not (rest_api_container := store.rest_apis.get(rest_api_id)):
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ )
+
+ if response_type not in DEFAULT_GATEWAY_RESPONSES:
+ raise CommonServiceException(
+ code="ValidationException",
+ message=f"1 validation error detected: Value '{response_type}' at 'responseType' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(DEFAULT_GATEWAY_RESPONSES)}]",
+ )
+
+ gateway_response = rest_api_container.gateway_responses.get(
+ response_type, DEFAULT_GATEWAY_RESPONSES[response_type]
+ )
+ # TODO: add validation with the parameters? seems like it validated client side? how to try?
+ return gateway_response
+
+ def get_gateway_responses(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ position: String = None,
+ limit: NullableInteger = None,
+ **kwargs,
+ ) -> GatewayResponses:
+ store = get_apigateway_store(context=context)
+ if not (rest_api_container := store.rest_apis.get(rest_api_id)):
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ )
+
+ user_gateway_resp = rest_api_container.gateway_responses
+ gateway_responses = [
+ user_gateway_resp.get(key) or value for key, value in DEFAULT_GATEWAY_RESPONSES.items()
+ ]
+ return GatewayResponses(items=gateway_responses)
+
+ def delete_gateway_response(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ response_type: GatewayResponseType,
+ **kwargs,
+ ) -> None:
+ store = get_apigateway_store(context=context)
+ if not (rest_api_container := store.rest_apis.get(rest_api_id)):
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ )
+
+ if response_type not in DEFAULT_GATEWAY_RESPONSES:
+ raise CommonServiceException(
+ code="ValidationException",
+ message=f"1 validation error detected: Value '{response_type}' at 'responseType' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(DEFAULT_GATEWAY_RESPONSES)}]",
+ )
+
+ if not rest_api_container.gateway_responses.pop(response_type, None):
+ raise NotFoundException("Gateway response type not defined on api")
+
+ def update_gateway_response(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ response_type: GatewayResponseType,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> GatewayResponse:
+ """
+ Support operations table:
+ Path | op:add | op:replace | op:remove | op:copy
+ /statusCode | Not supported | Supported | Not supported | Not supported
+ /responseParameters | Supported | Supported | Supported | Not supported
+ /responseTemplates | Supported | Supported | Supported | Not supported
+ See https://docs.aws.amazon.com/apigateway/latest/api/patch-operations.html#UpdateGatewayResponse-Patch
+ """
+ store = get_apigateway_store(context=context)
+ if not (rest_api_container := store.rest_apis.get(rest_api_id)):
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ )
+
+ if response_type not in DEFAULT_GATEWAY_RESPONSES:
+ raise CommonServiceException(
+ code="ValidationException",
+ message=f"1 validation error detected: Value '{response_type}' at 'responseType' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(DEFAULT_GATEWAY_RESPONSES)}]",
+ )
+
+ if response_type not in rest_api_container.gateway_responses:
+ # deep copy to avoid in place mutation of the default response when update using JSON patch
+ rest_api_container.gateway_responses[response_type] = copy.deepcopy(
+ DEFAULT_GATEWAY_RESPONSES[response_type]
+ )
+ rest_api_container.gateway_responses[response_type]["defaultResponse"] = False
+
+ patched_entity = rest_api_container.gateway_responses[response_type]
+
+ for index, operation in enumerate(patch_operations):
+ if (op := operation.get("op")) not in VALID_PATCH_OPERATIONS:
+ raise CommonServiceException(
+ code="ValidationException",
+ message=f"1 validation error detected: Value '{op}' at 'updateGatewayResponseInput.patchOperations.{index + 1}.member.op' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(VALID_PATCH_OPERATIONS)}]",
+ )
+
+ path = operation.get("path", "null")
+ if not any(
+ path.startswith(s_path)
+ for s_path in ("/statusCode", "/responseParameters", "/responseTemplates")
+ ):
+ raise BadRequestException(f"Invalid patch path {path}")
+
+ if op in ("add", "remove") and path == "/statusCode":
+ raise BadRequestException(f"Invalid patch path {path}")
+
+ elif op in ("add", "replace"):
+ for param_type in ("responseParameters", "responseTemplates"):
+ if path.startswith(f"/{param_type}"):
+ if op == "replace":
+ param = path.removeprefix(f"/{param_type}/")
+ param = param.replace("~1", "/")
+ if param not in patched_entity.get(param_type):
+ raise NotFoundException("Invalid parameter name specified")
+ if operation.get("value") is None:
+ raise BadRequestException(
+ f"Invalid null or empty value in {param_type}"
+ )
+
+ patch_api_gateway_entity(patched_entity, patch_operations)
+
+ return patched_entity
+
+ # TODO
+
+
+# ---------------
+# UTIL FUNCTIONS
+# ---------------
+
+
+def remove_empty_attributes_from_rest_api(rest_api: RestApi, remove_tags=True) -> RestApi:
+ if not rest_api.get("binaryMediaTypes"):
+ rest_api.pop("binaryMediaTypes", None)
+
+ if not isinstance(rest_api.get("minimumCompressionSize"), int):
+ rest_api.pop("minimumCompressionSize", None)
+
+ if not rest_api.get("tags"):
+ if remove_tags:
+ rest_api.pop("tags", None)
+ else:
+ # if `tags` is falsy, set it to an empty dict
+ rest_api["tags"] = {}
+
+ if not rest_api.get("version"):
+ rest_api.pop("version", None)
+ if not rest_api.get("description"):
+ rest_api.pop("description", None)
+
+ return rest_api
+
+
+def remove_empty_attributes_from_method(method: Method) -> Method:
+ if not method.get("methodResponses"):
+ method.pop("methodResponses", None)
+
+ if method.get("requestModels") is None:
+ method.pop("requestModels", None)
+
+ if method.get("requestParameters") is None:
+ method.pop("requestParameters", None)
+
+ return method
+
+
+def remove_empty_attributes_from_integration(integration: Integration):
+ if not integration:
+ return integration
+
+ if not integration.get("integrationResponses"):
+ integration.pop("integrationResponses", None)
+
+ if integration.get("requestParameters") is None:
+ integration.pop("requestParameters", None)
+
+ return integration
+
+
+def remove_empty_attributes_from_model(model: Model) -> Model:
+ if not model.get("description"):
+ model.pop("description", None)
+
+ return model
+
+
+def remove_empty_attributes_from_integration_response(integration_response: IntegrationResponse):
+ if integration_response.get("responseTemplates") is None:
+ integration_response.pop("responseTemplates", None)
+
+ return integration_response
+
+
+def fix_throttle_and_quota_from_usage_plan(usage_plan: UsagePlan) -> None:
+ if quota := usage_plan.get("quota"):
+ if "offset" not in quota:
+ quota["offset"] = 0
+ else:
+ usage_plan.pop("quota", None)
+
+ if throttle := usage_plan.get("throttle"):
+ if rate_limit := throttle.get("rateLimit"):
+ throttle["rateLimit"] = float(rate_limit)
+
+ if burst_limit := throttle.get("burstLimit"):
+ throttle["burstLimit"] = int(burst_limit)
+ else:
+ usage_plan.pop("throttle", None)
+
+
+def validate_model_in_use(moto_rest_api: MotoRestAPI, model_name: str) -> None:
+ for resource in moto_rest_api.resources.values():
+ for method in resource.resource_methods.values():
+ if method.request_models and model_name in set(method.request_models.values()):
+ path = f"{resource.get_path()}/{method.http_method}"
+ raise ConflictException(
+ f"Cannot delete model '{model_name}', is referenced in method request: {path}"
+ )
+
+
+def get_moto_rest_api_root_resource(moto_rest_api: MotoRestAPI) -> str:
+ for res_id, res_obj in moto_rest_api.resources.items():
+ if res_obj.path_part == "/" and not res_obj.parent_id:
+ return res_id
+ raise Exception(f"Unable to find root resource for API {moto_rest_api.id}")
+
+
+def create_custom_context(
+ context: RequestContext, action: str, parameters: ServiceRequest
+) -> RequestContext:
+ ctx = create_aws_request_context(
+ service_name=context.service.service_name,
+ action=action,
+ parameters=parameters,
+ region=context.region,
+ )
+ ctx.request.headers.update(context.request.headers)
+ ctx.account_id = context.account_id
+ return ctx
+
+
+def patch_api_gateway_entity(entity: Any, patch_operations: ListOfPatchOperation):
+ patch_operations = patch_operations or []
+
+ if isinstance(entity, dict):
+ entity_dict = entity
+ else:
+ if not isinstance(entity.__dict__, DelSafeDict):
+ entity.__dict__ = DelSafeDict(entity.__dict__)
+ entity_dict = entity.__dict__
+
+ not_supported_attributes = {"/id", "/region_name", "/create_date"}
+
+ model_attributes = list(entity_dict.keys())
+ for operation in patch_operations:
+ path_start = operation["path"].strip("/").split("/")[0]
+ path_start_usc = camelcase_to_underscores(path_start)
+ if path_start not in model_attributes and path_start_usc in model_attributes:
+ operation["path"] = operation["path"].replace(path_start, path_start_usc)
+ if operation["path"] in not_supported_attributes:
+ raise BadRequestException(f"Invalid patch path {operation['path']}")
+
+ apply_json_patch_safe(entity_dict, patch_operations, in_place=True)
+
+
+def to_authorizer_response_json(api_id, data):
+ result = to_response_json("authorizer", data, api_id=api_id)
+ result = select_from_typed_dict(Authorizer, result)
+ return result
+
+
+def to_validator_response_json(api_id, data):
+ result = to_response_json("validator", data, api_id=api_id)
+ result = select_from_typed_dict(RequestValidator, result)
+ return result
+
+
+def to_documentation_part_response_json(api_id, data):
+ result = to_response_json("documentationpart", data, api_id=api_id)
+ result = select_from_typed_dict(DocumentationPart, result)
+ return result
+
+
+def to_base_mapping_response_json(domain_name, base_path, data):
+ self_link = "/domainnames/%s/basepathmappings/%s" % (domain_name, base_path)
+ result = to_response_json("basepathmapping", data, self_link=self_link)
+ result = select_from_typed_dict(BasePathMapping, result)
+ return result
+
+
+def to_account_response_json(data):
+ result = to_response_json("account", data, self_link="/account")
+ result = select_from_typed_dict(Account, result)
+ return result
+
+
+def to_vpc_link_response_json(data):
+ result = to_response_json("vpclink", data)
+ result = select_from_typed_dict(VpcLink, result)
+ return result
+
+
+def to_client_cert_response_json(data):
+ result = to_response_json("clientcertificate", data, id_attr="clientCertificateId")
+ result = select_from_typed_dict(ClientCertificate, result)
+ return result
+
+
+def to_rest_api_response_json(data):
+ result = to_response_json("restapi", data)
+ result = select_from_typed_dict(RestApi, result)
+ return result
+
+
+def to_response_json(model_type, data, api_id=None, self_link=None, id_attr=None):
+ if isinstance(data, list) and len(data) == 1:
+ data = data[0]
+ id_attr = id_attr or "id"
+ result = deepcopy(data)
+ if not self_link:
+ self_link = "/%ss/%s" % (model_type, data[id_attr])
+ if api_id:
+ self_link = "/restapis/%s/%s" % (api_id, self_link)
+ # TODO: check if this is still required - "_links" are listed in the sample responses in the docs, but
+ # recent parity tests indicate that this field is not returned by real AWS...
+ # https://docs.aws.amazon.com/apigateway/latest/api/API_GetAuthorizers.html#API_GetAuthorizers_Example_1_Response
+ if "_links" not in result:
+ result["_links"] = {}
+ result["_links"]["self"] = {"href": self_link}
+ result["_links"]["curies"] = {
+ "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-authorizer-latest.html",
+ "name": model_type,
+ "templated": True,
+ }
+ result["_links"]["%s:delete" % model_type] = {"href": self_link}
+ return result
+
+
+DEFAULT_EMPTY_MODEL = Model(
+ id=short_uid()[:6],
+ name=EMPTY_MODEL,
+ contentType="application/json",
+ description="This is a default empty schema model",
+ schema=json.dumps(
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "title": "Empty Schema",
+ "type": "object",
+ }
+ ),
+)
+
+DEFAULT_ERROR_MODEL = Model(
+ id=short_uid()[:6],
+ name=ERROR_MODEL,
+ contentType="application/json",
+ description="This is a default error schema model",
+ schema=json.dumps(
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "title": "Error Schema",
+ "type": "object",
+ "properties": {"message": {"type": "string"}},
+ }
+ ),
+)
+
+
+# TODO: maybe extract this in its own files, or find a better generalizable way
+UPDATE_METHOD_PATCH_PATHS = {
+ "supported_paths": [
+ "/authorizationScopes",
+ "/authorizationType",
+ "/authorizerId",
+ "/apiKeyRequired",
+ "/operationName",
+ "/requestParameters/",
+ "/requestModels/",
+ "/requestValidatorId",
+ ],
+ "add": [
+ "/authorizationScopes",
+ "/requestParameters/",
+ "/requestModels/",
+ ],
+ "remove": [
+ "/authorizationScopes",
+ "/requestParameters/",
+ "/requestModels/",
+ ],
+ "replace": [
+ "/authorizationType",
+ "/authorizerId",
+ "/apiKeyRequired",
+ "/operationName",
+ "/requestParameters/",
+ "/requestModels/",
+ "/requestValidatorId",
+ ],
+}
+
+DEFAULT_GATEWAY_RESPONSES: dict[GatewayResponseType, GatewayResponse] = {
+ GatewayResponseType.REQUEST_TOO_LARGE: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "REQUEST_TOO_LARGE",
+ "statusCode": "413",
+ },
+ GatewayResponseType.RESOURCE_NOT_FOUND: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "RESOURCE_NOT_FOUND",
+ "statusCode": "404",
+ },
+ GatewayResponseType.AUTHORIZER_CONFIGURATION_ERROR: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "AUTHORIZER_CONFIGURATION_ERROR",
+ "statusCode": "500",
+ },
+ GatewayResponseType.MISSING_AUTHENTICATION_TOKEN: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "MISSING_AUTHENTICATION_TOKEN",
+ "statusCode": "403",
+ },
+ GatewayResponseType.BAD_REQUEST_BODY: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "BAD_REQUEST_BODY",
+ "statusCode": "400",
+ },
+ GatewayResponseType.INVALID_SIGNATURE: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "INVALID_SIGNATURE",
+ "statusCode": "403",
+ },
+ GatewayResponseType.INVALID_API_KEY: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "INVALID_API_KEY",
+ "statusCode": "403",
+ },
+ GatewayResponseType.BAD_REQUEST_PARAMETERS: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "BAD_REQUEST_PARAMETERS",
+ "statusCode": "400",
+ },
+ GatewayResponseType.AUTHORIZER_FAILURE: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "AUTHORIZER_FAILURE",
+ "statusCode": "500",
+ },
+ GatewayResponseType.UNAUTHORIZED: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "UNAUTHORIZED",
+ "statusCode": "401",
+ },
+ GatewayResponseType.INTEGRATION_TIMEOUT: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "INTEGRATION_TIMEOUT",
+ "statusCode": "504",
+ },
+ GatewayResponseType.ACCESS_DENIED: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "ACCESS_DENIED",
+ "statusCode": "403",
+ },
+ GatewayResponseType.DEFAULT_4XX: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "DEFAULT_4XX",
+ },
+ GatewayResponseType.DEFAULT_5XX: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "DEFAULT_5XX",
+ },
+ GatewayResponseType.WAF_FILTERED: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "WAF_FILTERED",
+ "statusCode": "403",
+ },
+ GatewayResponseType.QUOTA_EXCEEDED: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "QUOTA_EXCEEDED",
+ "statusCode": "429",
+ },
+ GatewayResponseType.THROTTLED: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "THROTTLED",
+ "statusCode": "429",
+ },
+ GatewayResponseType.API_CONFIGURATION_ERROR: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "API_CONFIGURATION_ERROR",
+ "statusCode": "500",
+ },
+ GatewayResponseType.UNSUPPORTED_MEDIA_TYPE: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "UNSUPPORTED_MEDIA_TYPE",
+ "statusCode": "415",
+ },
+ GatewayResponseType.INTEGRATION_FAILURE: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "INTEGRATION_FAILURE",
+ "statusCode": "504",
+ },
+ GatewayResponseType.EXPIRED_TOKEN: {
+ "defaultResponse": True,
+ "responseParameters": {},
+ "responseTemplates": {"application/json": '{"message":$context.error.messageString}'},
+ "responseType": "EXPIRED_TOKEN",
+ "statusCode": "403",
+ },
+}
+
+VALID_PATCH_OPERATIONS = ["add", "remove", "move", "test", "replace", "copy"]
diff --git a/localstack-core/localstack/services/apigateway/legacy/router_asf.py b/localstack-core/localstack/services/apigateway/legacy/router_asf.py
new file mode 100644
index 0000000000000..0664c98c56f20
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/legacy/router_asf.py
@@ -0,0 +1,160 @@
+import logging
+from typing import Any, Dict
+
+from requests.models import Response as RequestsResponse
+from werkzeug.datastructures import Headers
+from werkzeug.exceptions import NotFound
+
+from localstack.constants import HEADER_LOCALSTACK_EDGE_URL
+from localstack.http import Request, Response, Router
+from localstack.http.dispatcher import Handler
+from localstack.http.request import restore_payload
+from localstack.services.apigateway.legacy.context import ApiInvocationContext
+from localstack.services.apigateway.legacy.helpers import get_api_account_id_and_region
+from localstack.services.apigateway.legacy.invocations import invoke_rest_api_from_request
+from localstack.utils.aws.aws_responses import LambdaResponse
+from localstack.utils.strings import remove_leading_extra_slashes
+
+LOG = logging.getLogger(__name__)
+
+
+# TODO: with the latest snapshot tests, we might start moving away from the
+# invocation context property decorators and use the url_params directly,
+# something asked for a long time.
+def to_invocation_context(
+ request: Request, url_params: Dict[str, Any] = None
+) -> ApiInvocationContext:
+ """
+ Converts an HTTP Request object into an ApiInvocationContext.
+
+ :param request: the original request
+ :param url_params: the parameters extracted from the URL matching rules
+ :return: the ApiInvocationContext
+ """
+ if url_params is None:
+ url_params = {}
+
+ method = request.method
+ # Base path is not URL-decoded.
+ # Example: test%2Balias@gmail.com => test%2Balias@gmail.com
+ raw_uri = path = request.environ.get("RAW_URI")
+ if raw_uri.startswith("//"):
+ # if starts with //, then replace the first // with /
+ path = remove_leading_extra_slashes(raw_uri)
+
+ data = restore_payload(request)
+ headers = Headers(request.headers)
+
+ # TODO: verify that this is needed
+ # adjust the X-Forwarded-For header
+ x_forwarded_for = headers.getlist("X-Forwarded-For")
+ x_forwarded_for.append(request.remote_addr)
+ x_forwarded_for.append(request.host)
+ headers["X-Forwarded-For"] = ", ".join(x_forwarded_for)
+
+ # set the x-localstack-edge header, it is used to parse the domain
+ headers[HEADER_LOCALSTACK_EDGE_URL] = request.host_url.strip("/")
+
+ # FIXME: Use the already parsed url params instead of parsing them into the ApiInvocationContext part-by-part.
+ # We already would have all params at hand to avoid _all_ the parsing, but the parsing
+ # has side-effects (f.e. setting the region in a thread local)!
+ # It would be best to use a small (immutable) context for the already parsed params and the Request object
+ # and use it everywhere.
+ ctx = ApiInvocationContext(method, path, data, headers, stage=url_params.get("stage"))
+ ctx.raw_uri = raw_uri
+ ctx.auth_identity["sourceIp"] = request.remote_addr
+
+ return ctx
+
+
+def convert_response(result: RequestsResponse) -> Response:
+ """
+ Utility function to convert a response for the requests library to our internal (Werkzeug based) Response object.
+ """
+ if result is None:
+ return Response()
+
+ if isinstance(result, LambdaResponse):
+ headers = Headers(dict(result.headers))
+ for k, values in result.multi_value_headers.items():
+ for value in values:
+ headers.add(k, value)
+ else:
+ headers = dict(result.headers)
+
+ response = Response(status=result.status_code, headers=headers)
+
+ if isinstance(result.content, dict):
+ response.set_json(result.content)
+ elif isinstance(result.content, (str, bytes)):
+ response.data = result.content
+ else:
+ raise ValueError(f"Unhandled content type {type(result.content)}")
+
+ return response
+
+
+class ApigatewayRouter:
+ """
+ Simple implementation around a Router to manage dynamic restapi routes (routes added by a user through the
+ apigateway API).
+ """
+
+ router: Router[Handler]
+
+ def __init__(self, router: Router[Handler]):
+ self.router = router
+ self.registered = False
+
+ def register_routes(self) -> None:
+ """Registers parameterized routes for API Gateway user invocations."""
+ if self.registered:
+ LOG.debug("Skipped API Gateway route registration (routes already registered).")
+ return
+ self.registered = True
+ LOG.debug("Registering parameterized API Gateway routes.")
+ host_pattern = ".execute-api."
+ self.router.add(
+ "/",
+ host=host_pattern,
+ endpoint=self.invoke_rest_api,
+ defaults={"path": "", "stage": None},
+ strict_slashes=True,
+ )
+ self.router.add(
+ "//",
+ host=host_pattern,
+ endpoint=self.invoke_rest_api,
+ defaults={"path": ""},
+ strict_slashes=False,
+ )
+ self.router.add(
+ "//",
+ host=host_pattern,
+ endpoint=self.invoke_rest_api,
+ strict_slashes=True,
+ )
+
+ # add the localstack-specific _user_request_ routes
+ self.router.add(
+ "/restapis///_user_request_",
+ endpoint=self.invoke_rest_api,
+ defaults={"path": ""},
+ )
+ self.router.add(
+ "/restapis///_user_request_/",
+ endpoint=self.invoke_rest_api,
+ strict_slashes=True,
+ )
+
+ def invoke_rest_api(self, request: Request, **url_params: str) -> Response:
+ account_id, region_name = get_api_account_id_and_region(url_params["api_id"])
+ if not region_name:
+ return Response(status=404)
+ invocation_context = to_invocation_context(request, url_params)
+ invocation_context.region_name = region_name
+ invocation_context.account_id = account_id
+ result = invoke_rest_api_from_request(invocation_context)
+ if result is not None:
+ return convert_response(result)
+ raise NotFound()
diff --git a/localstack-core/localstack/services/apigateway/legacy/templates.py b/localstack-core/localstack/services/apigateway/legacy/templates.py
new file mode 100644
index 0000000000000..0ae853981ac02
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/legacy/templates.py
@@ -0,0 +1,381 @@
+import base64
+import copy
+import json
+import logging
+from enum import Enum
+from typing import Any, Dict, Union
+from urllib.parse import quote_plus, unquote_plus
+
+import xmltodict
+
+from localstack import config
+from localstack.constants import APPLICATION_JSON, APPLICATION_XML
+from localstack.services.apigateway.legacy.context import ApiInvocationContext
+from localstack.services.apigateway.legacy.helpers import select_integration_response
+from localstack.utils.aws.templating import APIGW_SOURCE, VelocityUtil, VtlTemplate
+from localstack.utils.json import extract_jsonpath, json_safe, try_json
+from localstack.utils.strings import to_str
+
+LOG = logging.getLogger(__name__)
+
+
+class PassthroughBehavior(Enum):
+ WHEN_NO_MATCH = "WHEN_NO_MATCH"
+ WHEN_NO_TEMPLATES = "WHEN_NO_TEMPLATES"
+ NEVER = "NEVER"
+
+
+class MappingTemplates:
+ """
+ API Gateway uses mapping templates to transform incoming requests before they are sent to the
+ integration back end. With API Gateway, you can define one mapping template for each possible
+ content type. The content type selection is based on the Content-Type header of the incoming
+ request. If no content type is specified in the request, API Gateway uses an application/json
+ mapping template. By default, mapping templates are configured to simply pass through the
+ request input. Mapping templates use Apache Velocity to generate a request to your back end.
+ """
+
+ passthrough_behavior: PassthroughBehavior
+
+ class UnsupportedMediaType(Exception):
+ pass
+
+ def __init__(self, passthrough_behaviour: str):
+ self.passthrough_behavior = self.get_passthrough_behavior(passthrough_behaviour)
+
+ def check_passthrough_behavior(self, request_template):
+ """
+ Specifies how the method request body of an unmapped content type will be passed through
+ the integration request to the back end without transformation.
+ A content type is unmapped if no mapping template is defined in the integration or the
+ content type does not match any of the mapped content types, as specified in requestTemplates
+ """
+ if not request_template and self.passthrough_behavior in {
+ PassthroughBehavior.NEVER,
+ PassthroughBehavior.WHEN_NO_TEMPLATES,
+ }:
+ raise MappingTemplates.UnsupportedMediaType()
+
+ @staticmethod
+ def get_passthrough_behavior(passthrough_behaviour: str):
+ return getattr(PassthroughBehavior, passthrough_behaviour, None)
+
+
+class AttributeDict(dict):
+ """
+ Wrapper returned by VelocityUtilApiGateway.parseJson to allow access to dict values as attributes (dot notation),
+ e.g.: $util.parseJson('$.foo').bar
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(AttributeDict, self).__init__(*args, **kwargs)
+ for key, value in self.items():
+ if isinstance(value, dict):
+ self[key] = AttributeDict(value)
+
+ def __getattr__(self, name):
+ if name in self:
+ return self[name]
+ raise AttributeError(f"'AttributeDict' object has no attribute '{name}'")
+
+ def __setattr__(self, name, value):
+ self[name] = value
+
+ def __delattr__(self, name):
+ if name in self:
+ del self[name]
+ else:
+ raise AttributeError(f"'AttributeDict' object has no attribute '{name}'")
+
+
+class VelocityUtilApiGateway(VelocityUtil):
+ """
+ Simple class to mimic the behavior of variable '$util' in AWS API Gateway integration
+ velocity templates.
+ See: https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html
+ """
+
+ def base64Encode(self, s):
+ if not isinstance(s, str):
+ s = json.dumps(s)
+ encoded_str = s.encode(config.DEFAULT_ENCODING)
+ encoded_b64_str = base64.b64encode(encoded_str)
+ return encoded_b64_str.decode(config.DEFAULT_ENCODING)
+
+ def base64Decode(self, s):
+ if not isinstance(s, str):
+ s = json.dumps(s)
+ return base64.b64decode(s)
+
+ def toJson(self, obj):
+ return obj and json.dumps(obj)
+
+ def urlEncode(self, s):
+ return quote_plus(s)
+
+ def urlDecode(self, s):
+ return unquote_plus(s)
+
+ def escapeJavaScript(self, obj: Any) -> str:
+ """
+ Converts the given object to a string and escapes any regular single quotes (') into escaped ones (\').
+ JSON dumps will escape the single quotes.
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html
+ """
+ if obj is None:
+ return "null"
+ if isinstance(obj, str):
+ # empty string escapes to empty object
+ if len(obj.strip()) == 0:
+ return "{}"
+ return json.dumps(obj)[1:-1]
+ if obj in (True, False):
+ return str(obj).lower()
+ return str(obj)
+
+ def parseJson(self, s: str):
+ obj = json.loads(s)
+ return AttributeDict(obj) if isinstance(obj, dict) else obj
+
+
+class VelocityInput:
+ """
+ Simple class to mimic the behavior of variable '$input' in AWS API Gateway integration
+ velocity templates.
+ See: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html
+ """
+
+ def __init__(self, body, params):
+ self.parameters = params or {}
+ self.value = body
+
+ def path(self, path):
+ if not self.value:
+ return {}
+ value = self.value if isinstance(self.value, dict) else json.loads(self.value)
+ return extract_jsonpath(value, path)
+
+ def json(self, path):
+ path = path or "$"
+ matching = self.path(path)
+ if isinstance(matching, (list, dict)):
+ matching = json_safe(matching)
+ return json.dumps(matching)
+
+ @property
+ def body(self):
+ return self.value
+
+ def params(self, name=None):
+ if not name:
+ return self.parameters
+ for k in ["path", "querystring", "header"]:
+ if val := self.parameters.get(k).get(name):
+ return val
+ return ""
+
+ def __getattr__(self, name):
+ return self.value.get(name)
+
+ def __repr__(self):
+ return "$input"
+
+
+class ApiGatewayVtlTemplate(VtlTemplate):
+ """Util class for rendering VTL templates with API Gateway specific extensions"""
+
+ def prepare_namespace(self, variables, source: str = APIGW_SOURCE) -> Dict[str, Any]:
+ namespace = super().prepare_namespace(variables, source)
+ if stage_var := variables.get("stage_variables") or {}:
+ namespace["stageVariables"] = stage_var
+ input_var = variables.get("input") or {}
+ variables = {
+ "input": VelocityInput(input_var.get("body"), input_var.get("params")),
+ "util": VelocityUtilApiGateway(),
+ }
+ namespace.update(variables)
+ return namespace
+
+
+class Templates:
+ __slots__ = ["vtl"]
+
+ def __init__(self):
+ self.vtl = ApiGatewayVtlTemplate()
+
+ def render(self, api_context: ApiInvocationContext) -> Union[bytes, str]:
+ pass
+
+ def render_vtl(self, template, variables):
+ return self.vtl.render_vtl(template, variables=variables)
+
+ @staticmethod
+ def build_variables_mapping(api_context: ApiInvocationContext) -> dict[str, Any]:
+ # TODO: make this (dict) an object so usages of "render_vtl" variables are defined
+ ctx = copy.deepcopy(api_context.context or {})
+ # https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-override-request-response-parameters.html
+ # create namespace for request override
+ ctx["requestOverride"] = {
+ "header": {},
+ "path": {},
+ "querystring": {},
+ }
+
+ ctx["responseOverride"] = {
+ "header": {},
+ "status": 200,
+ }
+
+ return {
+ "context": ctx,
+ "stage_variables": api_context.stage_variables or {},
+ "input": {
+ "body": api_context.data_as_string(),
+ "params": {
+ "path": api_context.path_params,
+ "querystring": api_context.query_params(),
+ # Sometimes we get a werkzeug.datastructures.Headers object, sometimes a dict
+ # depending on the request. We need to convert to a dict to be able to render
+ # the template.
+ "header": dict(api_context.headers),
+ },
+ },
+ }
+
+
+class RequestTemplates(Templates):
+ """
+ Handles request template rendering
+ """
+
+ def render(
+ self, api_context: ApiInvocationContext, template_key: str = APPLICATION_JSON
+ ) -> Union[bytes, str]:
+ LOG.debug(
+ "Method request body before transformations: %s", to_str(api_context.data_as_string())
+ )
+ request_templates = api_context.integration.get("requestTemplates", {})
+ template = request_templates.get(template_key)
+ if not template:
+ return api_context.data_as_string()
+
+ variables = self.build_variables_mapping(api_context)
+ result = self.render_vtl(template.strip(), variables=variables)
+
+ # set the request overrides into context
+ api_context.headers.update(
+ variables.get("context", {}).get("requestOverride", {}).get("header", {})
+ )
+
+ LOG.debug("Endpoint request body after transformations:\n%s", result)
+ return result
+
+
+class ResponseTemplates(Templates):
+ """
+ Handles response template rendering. The integration response status code is used to select
+ the correct template to render, if there is no template for the status code, the default
+ template is used.
+ """
+
+ def render(self, api_context: ApiInvocationContext, **kwargs) -> Union[bytes, str]:
+ # XXX: keep backwards compatibility until we migrate all integrations to this new classes
+ # api_context contains a response object that we want slowly remove from it
+ data = kwargs.get("response", "")
+ response = data or api_context.response
+ integration = api_context.integration
+ # we set context data with the response content because later on we use context data as
+ # the body field in the template. We need to improve this by using the right source
+ # depending on the type of templates.
+ api_context.data = response._content
+
+ # status code returned by the integration
+ status_code = str(response.status_code)
+
+ # get the integration responses configuration from the integration object
+ integration_responses = integration.get("integrationResponses")
+ if not integration_responses:
+ return response._content
+
+ # get the configured integration response status codes,
+ # e.g. ["200", "400", "500"]
+ integration_status_codes = [str(code) for code in list(integration_responses.keys())]
+ # if there are no integration responses, we return the response as is
+ if not integration_status_codes:
+ return response.content
+
+ # The following code handles two use cases.If there is an integration response for the status code returned
+ # by the integration, we use the template configured for that status code (1) or the errorMessage (2) for
+ # lambda integrations.
+ # For an HTTP integration, API Gateway matches the regex to the HTTP status code to return
+ # For a Lambda function, API Gateway matches the regex to the errorMessage header to
+ # return a status code.
+ # For example, to set a 400 response for any error that starts with Malformed,
+ # set the method response status code to 400 and the Lambda error regex to Malformed.*.
+ match_resp = status_code
+ if isinstance(try_json(response._content), dict):
+ resp_dict = try_json(response._content)
+ if "errorMessage" in resp_dict:
+ match_resp = resp_dict.get("errorMessage")
+
+ selected_integration_response = select_integration_response(match_resp, api_context)
+ response.status_code = int(selected_integration_response.get("statusCode", 200))
+ response_templates = selected_integration_response.get("responseTemplates", {})
+
+ # we only support JSON and XML templates for now - if there is no template we return the response as is
+ # If the content type is not supported we always use application/json as default value
+ # TODO - support other content types, besides application/json and application/xml
+ # see https://docs.aws.amazon.com/apigateway/latest/developerguide/request-response-data-mappings.html#selecting-mapping-templates
+ accept = api_context.headers.get("accept", APPLICATION_JSON)
+ supported_types = [APPLICATION_JSON, APPLICATION_XML]
+ media_type = accept if accept in supported_types else APPLICATION_JSON
+ if not (template := response_templates.get(media_type, {})):
+ return response._content
+
+ # we render the template with the context data and the response content
+ variables = self.build_variables_mapping(api_context)
+ # update the response body
+ response._content = self._render_as_text(template, variables)
+ if media_type == APPLICATION_JSON:
+ self._validate_json(response.content)
+ elif media_type == APPLICATION_XML:
+ self._validate_xml(response.content)
+
+ if response_overrides := variables.get("context", {}).get("responseOverride", {}):
+ response.headers.update(response_overrides.get("header", {}).items())
+ response.status_code = response_overrides.get("status", 200)
+
+ LOG.debug("Endpoint response body after transformations:\n%s", response._content)
+ return response._content
+
+ def _render_as_text(self, template: str, variables: dict[str, Any]) -> str:
+ """
+ Render the given Velocity template string + variables into a plain string.
+ :return: the template rendering result as a string
+ """
+ rendered_tpl = self.render_vtl(template, variables=variables)
+ return rendered_tpl.strip()
+
+ @staticmethod
+ def _validate_json(content: str):
+ """
+ Checks that the content received is a valid JSON.
+ :raise JSONDecodeError: if content is not valid JSON
+ """
+ try:
+ json.loads(content)
+ except Exception as e:
+ LOG.info("Unable to parse template result as JSON: %s - %s", e, content)
+ raise
+
+ @staticmethod
+ def _validate_xml(content: str):
+ """
+ Checks that the content received is a valid XML.
+ :raise xml.parsers.expat.ExpatError: if content is not valid XML
+ """
+ try:
+ xmltodict.parse(content)
+ except Exception as e:
+ LOG.info("Unable to parse template result as XML: %s - %s", e, content)
+ raise
diff --git a/localstack-core/localstack/services/apigateway/models.py b/localstack-core/localstack/services/apigateway/models.py
new file mode 100644
index 0000000000000..44fca6b65ae29
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/models.py
@@ -0,0 +1,155 @@
+from typing import Any, Dict, List
+
+from requests.structures import CaseInsensitiveDict
+
+from localstack.aws.api.apigateway import (
+ Authorizer,
+ DocumentationPart,
+ DocumentationVersion,
+ DomainName,
+ GatewayResponse,
+ GatewayResponseType,
+ Model,
+ RequestValidator,
+ Resource,
+ RestApi,
+)
+from localstack.services.stores import (
+ AccountRegionBundle,
+ BaseStore,
+ CrossAccountAttribute,
+ CrossRegionAttribute,
+ LocalAttribute,
+)
+from localstack.utils.aws import arns
+
+
+class RestApiContainer:
+ # contains the RestApi dictionary. We're not making use of it yet, still using moto data.
+ rest_api: RestApi
+ # maps AuthorizerId -> Authorizer
+ authorizers: Dict[str, Authorizer]
+ # maps RequestValidatorId -> RequestValidator
+ validators: Dict[str, RequestValidator]
+ # map DocumentationPartId -> DocumentationPart
+ documentation_parts: Dict[str, DocumentationPart]
+ # map doc version name -> DocumentationVersion
+ documentation_versions: Dict[str, DocumentationVersion]
+ # not used yet, still in moto
+ gateway_responses: Dict[GatewayResponseType, GatewayResponse]
+ # maps Model name -> Model
+ models: Dict[str, Model]
+ # maps Model name -> resolved dict Model, so we don't need to load the JSON everytime
+ resolved_models: Dict[str, dict]
+ # maps ResourceId of a Resource to its children ResourceIds
+ resource_children: Dict[str, List[str]]
+
+ def __init__(self, rest_api: RestApi):
+ self.rest_api = rest_api
+ self.authorizers = {}
+ self.validators = {}
+ self.documentation_parts = {}
+ self.documentation_versions = {}
+ self.gateway_responses = {}
+ self.models = {}
+ self.resolved_models = {}
+ self.resource_children = {}
+
+
+class MergedRestApi(RestApiContainer):
+ """Merged REST API between Moto data and LocalStack data, used in our Invocation logic"""
+
+ # TODO: when migrating away from Moto, RestApiContainer and MergedRestApi will have the same signature, so we can
+ # safely remove it and only use RestApiContainer in our invocation logic
+ resources: dict[str, Resource]
+
+ def __init__(self, rest_api: RestApi):
+ super().__init__(rest_api)
+ self.resources = {}
+
+ @classmethod
+ def from_rest_api_container(
+ cls,
+ rest_api_container: RestApiContainer,
+ resources: dict[str, Resource],
+ ) -> "MergedRestApi":
+ merged = cls(rest_api=rest_api_container.rest_api)
+ merged.authorizers = rest_api_container.authorizers
+ merged.validators = rest_api_container.validators
+ merged.documentation_parts = rest_api_container.documentation_parts
+ merged.documentation_versions = rest_api_container.documentation_versions
+ merged.gateway_responses = rest_api_container.gateway_responses
+ merged.models = rest_api_container.models
+ merged.resolved_models = rest_api_container.resolved_models
+ merged.resource_children = rest_api_container.resource_children
+ merged.resources = resources
+
+ return merged
+
+
+class RestApiDeployment:
+ def __init__(
+ self,
+ account_id: str,
+ region: str,
+ rest_api: MergedRestApi,
+ ):
+ self.rest_api = rest_api
+ self.account_id = account_id
+ self.region = region
+
+
+class ApiGatewayStore(BaseStore):
+ # maps (API id) -> RestApiContainer
+ # TODO: remove CaseInsensitiveDict, and lower the value of the ID when getting it from the tags
+ rest_apis: Dict[str, RestApiContainer] = LocalAttribute(default=CaseInsensitiveDict)
+
+ # account details
+ _account: Dict[str, Any] = LocalAttribute(default=dict)
+
+ # maps (domain_name) -> [path_mappings]
+ base_path_mappings: Dict[str, List[Dict]] = LocalAttribute(default=dict)
+
+ # maps ID to VPC link details
+ vpc_links: Dict[str, Dict] = LocalAttribute(default=dict)
+
+ # maps cert ID to client certificate details
+ client_certificates: Dict[str, Dict] = LocalAttribute(default=dict)
+
+ # maps domain name to domain name model
+ domain_names: Dict[str, DomainName] = LocalAttribute(default=dict)
+
+ # maps resource ARN to tags
+ TAGS: Dict[str, Dict[str, str]] = CrossRegionAttribute(default=dict)
+
+ # internal deployments, represents a frozen REST API for a deployment, used in our router
+ # TODO: make sure API ID are unique across all accounts
+ # maps ApiID to a map of deploymentId and RestApiDeployment, an executable/snapshot of a REST API
+ internal_deployments: dict[str, dict[str, RestApiDeployment]] = CrossAccountAttribute(
+ default=dict
+ )
+
+ # active deployments, mapping API ID to a map of Stage and deployment ID
+ # TODO: make sure API ID are unique across all accounts
+ active_deployments: dict[str, dict[str, str]] = CrossAccountAttribute(dict)
+
+ def __init__(self):
+ super().__init__()
+
+ @property
+ def account(self):
+ if not self._account:
+ self._account.update(
+ {
+ "cloudwatchRoleArn": arns.iam_role_arn(
+ "api-gw-cw-role", self._account_id, self._region_name
+ ),
+ "throttleSettings": {"burstLimit": 1000, "rateLimit": 500},
+ "features": ["UsagePlans"],
+ "apiKeyVersion": "1",
+ }
+ )
+ return self._account
+
+
+apigateway_stores = AccountRegionBundle("apigateway", ApiGatewayStore)
diff --git a/localstack/utils/cloudformation/__init__.py b/localstack-core/localstack/services/apigateway/next_gen/__init__.py
similarity index 100%
rename from localstack/utils/cloudformation/__init__.py
rename to localstack-core/localstack/services/apigateway/next_gen/__init__.py
diff --git a/localstack/utils/cloudwatch/__init__.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/__init__.py
similarity index 100%
rename from localstack/utils/cloudwatch/__init__.py
rename to localstack-core/localstack/services/apigateway/next_gen/execute_api/__init__.py
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/api.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/api.py
new file mode 100644
index 0000000000000..843938e0611ed
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/api.py
@@ -0,0 +1,17 @@
+from typing import Callable, Type
+
+from rolo import Response
+from rolo.gateway.chain import HandlerChain as RoloHandlerChain
+
+from .context import RestApiInvocationContext
+
+RestApiGatewayHandler = Callable[
+ [RoloHandlerChain[RestApiInvocationContext], RestApiInvocationContext, Response], None
+]
+
+RestApiGatewayExceptionHandler = Callable[
+ [RoloHandlerChain[RestApiInvocationContext], Exception, RestApiInvocationContext, Response],
+ None,
+]
+
+RestApiGatewayHandlerChain: Type[RoloHandlerChain[RestApiInvocationContext]] = RoloHandlerChain
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/context.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/context.py
new file mode 100644
index 0000000000000..9f6be795d9af8
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/context.py
@@ -0,0 +1,141 @@
+from http import HTTPMethod
+from typing import Optional, TypedDict
+
+from rolo import Request
+from rolo.gateway import RequestContext
+from werkzeug.datastructures import Headers
+
+from localstack.aws.api.apigateway import Integration, Method, Resource, Stage
+from localstack.services.apigateway.models import RestApiDeployment
+
+from .variables import ContextVariableOverrides, ContextVariables, LoggingContextVariables
+
+
+class InvocationRequest(TypedDict, total=False):
+ http_method: HTTPMethod
+ """HTTP Method of the incoming request"""
+ raw_path: Optional[str]
+ # TODO: verify if raw_path is needed
+ """Raw path of the incoming request with no modification, needed to keep double forward slashes"""
+ path: Optional[str]
+ """Path of the request with no URL decoding"""
+ path_parameters: Optional[dict[str, str]]
+ """Path parameters of the request"""
+ query_string_parameters: dict[str, str]
+ """Query string parameters of the request"""
+ headers: Headers
+ """Raw headers using the Headers datastructure which allows access with no regards to casing"""
+ multi_value_query_string_parameters: dict[str, list[str]]
+ """Multi value query string parameters of the request"""
+ body: bytes
+ """Body content of the request"""
+
+
+class IntegrationRequest(TypedDict, total=False):
+ http_method: HTTPMethod
+ """HTTP Method of the incoming request"""
+ uri: str
+ """URI of the integration"""
+ query_string_parameters: dict[str, str | list[str]]
+ """Query string parameters of the request"""
+ headers: Headers
+ """Headers of the request"""
+ body: bytes
+ """Body content of the request"""
+
+
+class BaseResponse(TypedDict):
+ """Base class for Response objects in the context"""
+
+ status_code: int
+ """Status code of the response"""
+ headers: Headers
+ """Headers of the response"""
+ body: bytes
+ """Body content of the response"""
+
+
+class EndpointResponse(BaseResponse):
+ """Represents the response coming from an integration, called Endpoint Response in AWS"""
+
+ pass
+
+
+class InvocationResponse(BaseResponse):
+ """Represents the response coming after being serialized in an Integration Response in AWS"""
+
+ pass
+
+
+class RestApiInvocationContext(RequestContext):
+ """
+ This context is going to be used to pass relevant information across an API Gateway invocation.
+ """
+
+ deployment: Optional[RestApiDeployment]
+ """Contains the invoked REST API Resources"""
+ integration: Optional[Integration]
+ """The Method Integration for the invoked request"""
+ api_id: Optional[str]
+ """The REST API identifier of the invoked API"""
+ stage: Optional[str]
+ """The REST API stage name linked to this invocation"""
+ base_path: Optional[str]
+ """The REST API base path mapped to the stage of this invocation"""
+ deployment_id: Optional[str]
+ """The REST API deployment linked to this invocation"""
+ region: Optional[str]
+ """The region the REST API is living in."""
+ account_id: Optional[str]
+ """The account the REST API is living in."""
+ trace_id: Optional[str]
+ """The X-Ray trace ID for the request."""
+ resource: Optional[Resource]
+ """The resource the invocation matched"""
+ resource_method: Optional[Method]
+ """The method of the resource the invocation matched"""
+ stage_variables: Optional[dict[str, str]]
+ """The Stage variables, also used in parameters mapping and mapping templates"""
+ stage_configuration: Optional[Stage]
+ """The Stage configuration, containing canary deployment settings"""
+ is_canary: Optional[bool]
+ """If the current call was directed to a canary deployment"""
+ context_variables: Optional[ContextVariables]
+ """The $context used in data models, authorizers, mapping templates, and CloudWatch access logging"""
+ context_variable_overrides: Optional[ContextVariableOverrides]
+ """requestOverrides and responseOverrides are passed from request templates to response templates but are
+ not in the integration context"""
+ logging_context_variables: Optional[LoggingContextVariables]
+ """Additional $context variables available only for access logging, not yet implemented"""
+ invocation_request: Optional[InvocationRequest]
+ """Contains the data relative to the invocation request"""
+ integration_request: Optional[IntegrationRequest]
+ """Contains the data needed to construct an HTTP request to an Integration"""
+ endpoint_response: Optional[EndpointResponse]
+ """Contains the data returned by an Integration"""
+ invocation_response: Optional[InvocationResponse]
+ """Contains the data serialized and to be returned by an invocation"""
+
+ def __init__(self, request: Request):
+ super().__init__(request)
+ self.deployment = None
+ self.api_id = None
+ self.stage = None
+ self.base_path = None
+ self.deployment_id = None
+ self.account_id = None
+ self.region = None
+ self.invocation_request = None
+ self.resource = None
+ self.resource_method = None
+ self.integration = None
+ self.stage_variables = None
+ self.stage_configuration = None
+ self.is_canary = None
+ self.context_variables = None
+ self.logging_context_variables = None
+ self.integration_request = None
+ self.endpoint_response = None
+ self.invocation_response = None
+ self.trace_id = None
+ self.context_variable_overrides = None
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/gateway.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/gateway.py
new file mode 100644
index 0000000000000..85a31da903fde
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/gateway.py
@@ -0,0 +1,50 @@
+from rolo import Response
+from rolo.gateway import Gateway
+
+from . import handlers
+from .context import RestApiInvocationContext
+
+
+class RestApiGateway(Gateway):
+ """
+ This class controls the main path of an API Gateway REST API. It contains the definitions of the different handlers
+ to be called as part of the different steps of the invocation of the API.
+
+ For now, you can extend the behavior of the invocation by adding handlers to the `preprocess_request`
+ CompositeHandler.
+ The documentation of this class will be extended as more behavior will be added to its handlers, as well as more
+ ways to extend it.
+ """
+
+ def __init__(self):
+ super().__init__(context_class=RestApiInvocationContext)
+ self.request_handlers.extend(
+ [
+ handlers.parse_request,
+ handlers.modify_request,
+ handlers.route_request,
+ handlers.preprocess_request,
+ handlers.api_key_validation_handler,
+ handlers.method_request_handler,
+ handlers.integration_request_handler,
+ handlers.integration_handler,
+ handlers.integration_response_handler,
+ handlers.method_response_handler,
+ ]
+ )
+ self.exception_handlers.extend(
+ [
+ handlers.gateway_exception_handler,
+ ]
+ )
+ self.response_handlers.extend(
+ [
+ handlers.response_enricher,
+ handlers.usage_counter,
+ # add composite response handlers?
+ ]
+ )
+
+ def process_with_context(self, context: RestApiInvocationContext, response: Response):
+ chain = self.new_chain()
+ chain.handle(context, response)
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/gateway_response.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/gateway_response.py
new file mode 100644
index 0000000000000..a0e9935ccf775
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/gateway_response.py
@@ -0,0 +1,298 @@
+from enum import Enum
+
+from localstack.aws.api.apigateway import (
+ GatewayResponse,
+ GatewayResponseType,
+ MapOfStringToString,
+ StatusCode,
+)
+from localstack.constants import APPLICATION_JSON
+
+
+class GatewayResponseCode(StatusCode, Enum):
+ REQUEST_TOO_LARGE = "413"
+ RESOURCE_NOT_FOUND = "404"
+ AUTHORIZER_CONFIGURATION_ERROR = "500"
+ MISSING_AUTHENTICATION_TOKEN = "403"
+ BAD_REQUEST_BODY = "400"
+ INVALID_SIGNATURE = "403"
+ INVALID_API_KEY = "403"
+ BAD_REQUEST_PARAMETERS = "400"
+ AUTHORIZER_FAILURE = "500"
+ UNAUTHORIZED = "401"
+ INTEGRATION_TIMEOUT = "504"
+ ACCESS_DENIED = "403"
+ DEFAULT_4XX = ""
+ DEFAULT_5XX = ""
+ WAF_FILTERED = "403"
+ QUOTA_EXCEEDED = "429"
+ THROTTLED = "429"
+ API_CONFIGURATION_ERROR = "500"
+ UNSUPPORTED_MEDIA_TYPE = "415"
+ INTEGRATION_FAILURE = "504"
+ EXPIRED_TOKEN = "403"
+
+
+class BaseGatewayException(Exception):
+ """
+ Base class for all Gateway exceptions
+ Do not raise from this class directly. Instead, raise the specific Exception
+ """
+
+ message: str = "Unimplemented Response"
+ type: GatewayResponseType = None
+ status_code: int | str = None
+ code: str = ""
+
+ def __init__(self, message: str = None, status_code: int | str = None):
+ if message is not None:
+ self.message = message
+ if status_code is not None:
+ self.status_code = status_code
+ elif self.status_code is None and self.type:
+ # Fallback to the default value
+ self.status_code = GatewayResponseCode[self.type]
+
+
+class Default4xxError(BaseGatewayException):
+ """Do not raise from this class directly.
+ Use one of the subclasses instead, as they contain the appropriate header
+ """
+
+ type = GatewayResponseType.DEFAULT_4XX
+ status_code = 400
+
+
+class Default5xxError(BaseGatewayException):
+ """Do not raise from this class directly.
+ Use one of the subclasses instead, as they contain the appropriate header
+ """
+
+ type = GatewayResponseType.DEFAULT_5XX
+ status_code = 500
+
+
+class BadRequestException(Default4xxError):
+ code = "BadRequestException"
+
+
+class InternalFailureException(Default5xxError):
+ code = "InternalFailureException"
+
+
+class InternalServerError(Default5xxError):
+ code = "InternalServerErrorException"
+
+
+class AccessDeniedError(BaseGatewayException):
+ type = GatewayResponseType.ACCESS_DENIED
+ # TODO validate this header with aws validated tests
+ code = "AccessDeniedException"
+
+
+class ApiConfigurationError(BaseGatewayException):
+ type = GatewayResponseType.API_CONFIGURATION_ERROR
+ # TODO validate this header with aws validated tests
+ code = "ApiConfigurationException"
+
+
+class AuthorizerConfigurationError(BaseGatewayException):
+ type = GatewayResponseType.AUTHORIZER_CONFIGURATION_ERROR
+ # TODO validate this header with aws validated tests
+ code = "AuthorizerConfigurationException"
+ # the message is set to None by default in AWS
+ message = None
+
+
+class AuthorizerFailureError(BaseGatewayException):
+ type = GatewayResponseType.AUTHORIZER_FAILURE
+ # TODO validate this header with aws validated tests
+ code = "AuthorizerFailureException"
+
+
+class BadRequestParametersError(BaseGatewayException):
+ type = GatewayResponseType.BAD_REQUEST_PARAMETERS
+ code = "BadRequestException"
+
+
+class BadRequestBodyError(BaseGatewayException):
+ type = GatewayResponseType.BAD_REQUEST_BODY
+ code = "BadRequestException"
+
+
+class ExpiredTokenError(BaseGatewayException):
+ type = GatewayResponseType.EXPIRED_TOKEN
+ # TODO validate this header with aws validated tests
+ code = "ExpiredTokenException"
+
+
+class IntegrationFailureError(BaseGatewayException):
+ type = GatewayResponseType.INTEGRATION_FAILURE
+ code = "InternalServerErrorException"
+ status_code = 500
+
+
+class IntegrationTimeoutError(BaseGatewayException):
+ type = GatewayResponseType.INTEGRATION_TIMEOUT
+ code = "InternalServerErrorException"
+
+
+class InvalidAPIKeyError(BaseGatewayException):
+ type = GatewayResponseType.INVALID_API_KEY
+ code = "ForbiddenException"
+
+
+class InvalidSignatureError(BaseGatewayException):
+ type = GatewayResponseType.INVALID_SIGNATURE
+ # TODO validate this header with aws validated tests
+ code = "InvalidSignatureException"
+
+
+class MissingAuthTokenError(BaseGatewayException):
+ type = GatewayResponseType.MISSING_AUTHENTICATION_TOKEN
+ code = "MissingAuthenticationTokenException"
+
+
+class QuotaExceededError(BaseGatewayException):
+ type = GatewayResponseType.QUOTA_EXCEEDED
+ code = "LimitExceededException"
+
+
+class RequestTooLargeError(BaseGatewayException):
+ type = GatewayResponseType.REQUEST_TOO_LARGE
+ # TODO validate this header with aws validated tests
+ code = "RequestTooLargeException"
+
+
+class ResourceNotFoundError(BaseGatewayException):
+ type = GatewayResponseType.RESOURCE_NOT_FOUND
+ # TODO validate this header with aws validated tests
+ code = "ResourceNotFoundException"
+
+
+class ThrottledError(BaseGatewayException):
+ type = GatewayResponseType.THROTTLED
+ code = "TooManyRequestsException"
+
+
+class UnauthorizedError(BaseGatewayException):
+ type = GatewayResponseType.UNAUTHORIZED
+ code = "UnauthorizedException"
+
+
+class UnsupportedMediaTypeError(BaseGatewayException):
+ type = GatewayResponseType.UNSUPPORTED_MEDIA_TYPE
+ code = "BadRequestException"
+
+
+class WafFilteredError(BaseGatewayException):
+ type = GatewayResponseType.WAF_FILTERED
+ # TODO validate this header with aws validated tests
+ code = "WafFilteredException"
+
+
+def build_gateway_response(
+ response_type: GatewayResponseType,
+ status_code: StatusCode = None,
+ response_parameters: MapOfStringToString = None,
+ response_templates: MapOfStringToString = None,
+ default_response: bool = True,
+) -> GatewayResponse:
+ """Building a Gateway Response. Non provided attributes will use default."""
+ response = GatewayResponse(
+ responseParameters=response_parameters or {},
+ responseTemplates=response_templates
+ or {APPLICATION_JSON: '{"message":$context.error.messageString}'},
+ responseType=response_type,
+ defaultResponse=default_response,
+ statusCode=status_code,
+ )
+
+ return response
+
+
+def get_gateway_response_or_default(
+ response_type: GatewayResponseType,
+ gateway_responses: dict[GatewayResponseType, GatewayResponse],
+) -> GatewayResponse:
+ """Utility function that will look for a matching Gateway Response in the following order.
+ - If provided in the gateway_response, return the dicts value
+ - If the DEFAULT_XXX was configured will create a new response
+ - Otherwise we return from DEFAULT_GATEWAY_RESPONSE"""
+
+ if response := gateway_responses.get(response_type):
+ # User configured response
+ return response
+ response_code = GatewayResponseCode[response_type]
+ if response_code == "":
+ # DEFAULT_XXX response do not have a default code
+ return DEFAULT_GATEWAY_RESPONSES.get(response_type)
+ if response_code >= "500":
+ # 5XX response will either get a user configured DEFAULT_5XX or the DEFAULT_GATEWAY_RESPONSES
+ default = gateway_responses.get(GatewayResponseType.DEFAULT_5XX)
+ else:
+ # 4XX response will either get a user configured DEFAULT_4XX or the DEFAULT_GATEWAY_RESPONSES
+ default = gateway_responses.get(GatewayResponseType.DEFAULT_4XX)
+
+ if not default:
+ # If DEFAULT_XXX was not provided return default
+ return DEFAULT_GATEWAY_RESPONSES.get(response_type)
+
+ return build_gateway_response(
+ # Build a new response from default
+ response_type,
+ status_code=default.get("statusCode"),
+ response_parameters=default.get("responseParameters"),
+ response_templates=default.get("responseTemplates"),
+ )
+
+
+DEFAULT_GATEWAY_RESPONSES = {
+ GatewayResponseType.REQUEST_TOO_LARGE: build_gateway_response(
+ GatewayResponseType.REQUEST_TOO_LARGE
+ ),
+ GatewayResponseType.RESOURCE_NOT_FOUND: build_gateway_response(
+ GatewayResponseType.RESOURCE_NOT_FOUND
+ ),
+ GatewayResponseType.AUTHORIZER_CONFIGURATION_ERROR: build_gateway_response(
+ GatewayResponseType.AUTHORIZER_CONFIGURATION_ERROR
+ ),
+ GatewayResponseType.MISSING_AUTHENTICATION_TOKEN: build_gateway_response(
+ GatewayResponseType.MISSING_AUTHENTICATION_TOKEN
+ ),
+ GatewayResponseType.BAD_REQUEST_BODY: build_gateway_response(
+ GatewayResponseType.BAD_REQUEST_BODY
+ ),
+ GatewayResponseType.INVALID_SIGNATURE: build_gateway_response(
+ GatewayResponseType.INVALID_SIGNATURE
+ ),
+ GatewayResponseType.INVALID_API_KEY: build_gateway_response(
+ GatewayResponseType.INVALID_API_KEY
+ ),
+ GatewayResponseType.BAD_REQUEST_PARAMETERS: build_gateway_response(
+ GatewayResponseType.BAD_REQUEST_PARAMETERS
+ ),
+ GatewayResponseType.AUTHORIZER_FAILURE: build_gateway_response(
+ GatewayResponseType.AUTHORIZER_FAILURE
+ ),
+ GatewayResponseType.UNAUTHORIZED: build_gateway_response(GatewayResponseType.UNAUTHORIZED),
+ GatewayResponseType.INTEGRATION_TIMEOUT: build_gateway_response(
+ GatewayResponseType.INTEGRATION_TIMEOUT
+ ),
+ GatewayResponseType.ACCESS_DENIED: build_gateway_response(GatewayResponseType.ACCESS_DENIED),
+ GatewayResponseType.DEFAULT_4XX: build_gateway_response(GatewayResponseType.DEFAULT_4XX),
+ GatewayResponseType.DEFAULT_5XX: build_gateway_response(GatewayResponseType.DEFAULT_5XX),
+ GatewayResponseType.WAF_FILTERED: build_gateway_response(GatewayResponseType.WAF_FILTERED),
+ GatewayResponseType.QUOTA_EXCEEDED: build_gateway_response(GatewayResponseType.QUOTA_EXCEEDED),
+ GatewayResponseType.THROTTLED: build_gateway_response(GatewayResponseType.THROTTLED),
+ GatewayResponseType.API_CONFIGURATION_ERROR: build_gateway_response(
+ GatewayResponseType.API_CONFIGURATION_ERROR
+ ),
+ GatewayResponseType.UNSUPPORTED_MEDIA_TYPE: build_gateway_response(
+ GatewayResponseType.UNSUPPORTED_MEDIA_TYPE
+ ),
+ GatewayResponseType.INTEGRATION_FAILURE: build_gateway_response(
+ GatewayResponseType.INTEGRATION_FAILURE
+ ),
+ GatewayResponseType.EXPIRED_TOKEN: build_gateway_response(GatewayResponseType.EXPIRED_TOKEN),
+}
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/__init__.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/__init__.py
new file mode 100644
index 0000000000000..e9e1dcb618166
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/__init__.py
@@ -0,0 +1,29 @@
+from rolo.gateway import CompositeHandler
+
+from localstack.services.apigateway.analytics import invocation_counter
+
+from .analytics import IntegrationUsageCounter
+from .api_key_validation import ApiKeyValidationHandler
+from .gateway_exception import GatewayExceptionHandler
+from .integration import IntegrationHandler
+from .integration_request import IntegrationRequestHandler
+from .integration_response import IntegrationResponseHandler
+from .method_request import MethodRequestHandler
+from .method_response import MethodResponseHandler
+from .parse import InvocationRequestParser
+from .resource_router import InvocationRequestRouter
+from .response_enricher import InvocationResponseEnricher
+
+parse_request = InvocationRequestParser()
+modify_request = CompositeHandler()
+route_request = InvocationRequestRouter()
+preprocess_request = CompositeHandler()
+method_request_handler = MethodRequestHandler()
+integration_request_handler = IntegrationRequestHandler()
+integration_handler = IntegrationHandler()
+integration_response_handler = IntegrationResponseHandler()
+method_response_handler = MethodResponseHandler()
+gateway_exception_handler = GatewayExceptionHandler()
+api_key_validation_handler = ApiKeyValidationHandler()
+response_enricher = InvocationResponseEnricher()
+usage_counter = IntegrationUsageCounter(counter=invocation_counter)
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/analytics.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/analytics.py
new file mode 100644
index 0000000000000..46fe8d06a9e9e
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/analytics.py
@@ -0,0 +1,48 @@
+import logging
+
+from localstack.http import Response
+from localstack.utils.analytics.metrics import LabeledCounter
+
+from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain
+from ..context import RestApiInvocationContext
+
+LOG = logging.getLogger(__name__)
+
+
+class IntegrationUsageCounter(RestApiGatewayHandler):
+ counter: LabeledCounter
+
+ def __init__(self, counter: LabeledCounter):
+ self.counter = counter
+
+ def __call__(
+ self,
+ chain: RestApiGatewayHandlerChain,
+ context: RestApiInvocationContext,
+ response: Response,
+ ):
+ if context.integration:
+ invocation_type = context.integration["type"]
+ if invocation_type == "AWS":
+ service_name = self._get_aws_integration_service(context.integration.get("uri"))
+ invocation_type = f"{invocation_type}:{service_name}"
+ else:
+ # if the invocation does not have an integration attached, it probably failed before routing the request,
+ # hence we should count it as a NOT_FOUND invocation
+ invocation_type = "NOT_FOUND"
+
+ self.counter.labels(invocation_type=invocation_type).increment()
+
+ @staticmethod
+ def _get_aws_integration_service(integration_uri: str) -> str:
+ if not integration_uri:
+ return "null"
+
+ if len(split_arn := integration_uri.split(":", maxsplit=5)) < 4:
+ return "null"
+
+ service = split_arn[4]
+ # the URI can also contain some .-api kind of route like `execute-api` or `appsync-api`
+ # we need to make sure we do not pass the full value back
+ service = service.split(".")[-1]
+ return service
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/api_key_validation.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/api_key_validation.py
new file mode 100644
index 0000000000000..ba8ada9769f17
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/api_key_validation.py
@@ -0,0 +1,113 @@
+import logging
+from typing import Optional
+
+from localstack.aws.api.apigateway import ApiKey, ApiKeySourceType, RestApi
+from localstack.http import Response
+
+from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain
+from ..context import InvocationRequest, RestApiInvocationContext
+from ..gateway_response import InvalidAPIKeyError
+from ..moto_helpers import get_api_key, get_usage_plan_keys, get_usage_plans
+from ..variables import ContextVarsIdentity
+
+LOG = logging.getLogger(__name__)
+
+
+class ApiKeyValidationHandler(RestApiGatewayHandler):
+ """
+ Handles Api key validation.
+ If an api key is required, we will validate that a usage plan associated with that stage
+ has a usage plan key with the corresponding value.
+ """
+
+ # TODO We currently do not support rate limiting or quota limit. As such we are not raising any related Exception
+
+ def __call__(
+ self,
+ chain: RestApiGatewayHandlerChain,
+ context: RestApiInvocationContext,
+ response: Response,
+ ):
+ method = context.resource_method
+ request = context.invocation_request
+ rest_api = context.deployment.rest_api.rest_api
+
+ # If api key is not required by the method, we can exit the handler
+ if not method.get("apiKeyRequired"):
+ return
+
+ identity = context.context_variables.get("identity")
+
+ # Look for the api key value in the request. If it is not found, raise an exception
+ if not (api_key_value := self.get_request_api_key(rest_api, request, identity)):
+ LOG.debug("API Key is empty")
+ raise InvalidAPIKeyError("Forbidden")
+
+ # Get the validated key, if no key is found, raise an exception
+ if not (validated_key := self.validate_api_key(api_key_value, context)):
+ LOG.debug("Provided API Key is not valid")
+ raise InvalidAPIKeyError("Forbidden")
+
+ # Update the context's identity with the key value and id
+ if not identity.get("apiKey"):
+ LOG.debug("Updating $context.identity.apiKey='%s'", validated_key["value"])
+ identity["apiKey"] = validated_key["value"]
+
+ LOG.debug("Updating $context.identity.apiKeyId='%s'", validated_key["id"])
+ identity["apiKeyId"] = validated_key["id"]
+
+ def validate_api_key(
+ self, api_key_value, context: RestApiInvocationContext
+ ) -> Optional[ApiKey]:
+ api_id = context.api_id
+ stage = context.stage
+ account_id = context.account_id
+ region = context.region
+
+ # Get usage plans from the store
+ usage_plans = get_usage_plans(account_id=account_id, region_name=region)
+
+ # Loop through usage plans and keep ids of the plans associated with the deployment stage
+ usage_plan_ids = []
+ for usage_plan in usage_plans:
+ api_stages = usage_plan.get("apiStages", [])
+ usage_plan_ids.extend(
+ usage_plan.get("id")
+ for api_stage in api_stages
+ if (api_stage.get("stage") == stage and api_stage.get("apiId") == api_id)
+ )
+ if not usage_plan_ids:
+ LOG.debug("No associated usage plans found stage '%s'", stage)
+ return
+
+ # Loop through plans with an association with the stage find a key with matching value
+ for usage_plan_id in usage_plan_ids:
+ usage_plan_keys = get_usage_plan_keys(
+ usage_plan_id=usage_plan_id, account_id=account_id, region_name=region
+ )
+ for key in usage_plan_keys:
+ if key["value"] == api_key_value:
+ api_key = get_api_key(
+ api_key_id=key["id"], account_id=account_id, region_name=region
+ )
+ LOG.debug("Found Api Key '%s'", api_key["id"])
+ return api_key if api_key["enabled"] else None
+
+ def get_request_api_key(
+ self, rest_api: RestApi, request: InvocationRequest, identity: ContextVarsIdentity
+ ) -> Optional[str]:
+ """https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-api-key-source.html
+ The source of the API key for metering requests according to a usage plan.
+ Valid values are:
+ - HEADER to read the API key from the X-API-Key header of a request.
+ - AUTHORIZER to read the API key from the Context Variables.
+ """
+ match api_key_source := rest_api.get("apiKeySource"):
+ case ApiKeySourceType.HEADER:
+ LOG.debug("Looking for api key in header 'X-API-Key'")
+ return request.get("headers", {}).get("X-API-Key")
+ case ApiKeySourceType.AUTHORIZER:
+ LOG.debug("Looking for api key in Identity Context")
+ return identity.get("apiKey")
+ case _:
+ LOG.debug("Api Key Source is not valid: '%s'", api_key_source)
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/gateway_exception.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/gateway_exception.py
new file mode 100644
index 0000000000000..174b2cf8c1bc2
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/gateway_exception.py
@@ -0,0 +1,98 @@
+import json
+import logging
+
+from rolo import Response
+from werkzeug.datastructures import Headers
+
+from localstack.constants import APPLICATION_JSON
+from localstack.services.apigateway.next_gen.execute_api.api import (
+ RestApiGatewayExceptionHandler,
+ RestApiGatewayHandlerChain,
+)
+from localstack.services.apigateway.next_gen.execute_api.context import RestApiInvocationContext
+from localstack.services.apigateway.next_gen.execute_api.gateway_response import (
+ AccessDeniedError,
+ BaseGatewayException,
+ get_gateway_response_or_default,
+)
+from localstack.services.apigateway.next_gen.execute_api.variables import (
+ GatewayResponseContextVarsError,
+)
+
+LOG = logging.getLogger(__name__)
+
+
+class GatewayExceptionHandler(RestApiGatewayExceptionHandler):
+ """
+ Exception handler that serializes the Gateway Exceptions into Gateway Responses
+ """
+
+ def __call__(
+ self,
+ chain: RestApiGatewayHandlerChain,
+ exception: Exception,
+ context: RestApiInvocationContext,
+ response: Response,
+ ):
+ if not isinstance(exception, BaseGatewayException):
+ LOG.warning(
+ "Non Gateway Exception raised: %s",
+ exception,
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
+ )
+ response.update_from(
+ Response(response=f"Error in apigateway invocation: {exception}", status="500")
+ )
+ return
+
+ LOG.info("Error raised during invocation: %s", exception.type)
+ self.set_error_context(exception, context)
+ error = self.create_exception_response(exception, context)
+ if error:
+ response.update_from(error)
+
+ @staticmethod
+ def set_error_context(exception: BaseGatewayException, context: RestApiInvocationContext):
+ context.context_variables["error"] = GatewayResponseContextVarsError(
+ message=exception.message,
+ messageString=exception.message,
+ responseType=exception.type,
+ validationErrorString="", # TODO
+ )
+
+ def create_exception_response(
+ self, exception: BaseGatewayException, context: RestApiInvocationContext
+ ):
+ gateway_response = get_gateway_response_or_default(
+ exception.type, context.deployment.rest_api.gateway_responses
+ )
+
+ content = self._build_response_content(exception)
+
+ headers = self._build_response_headers(exception)
+
+ status_code = gateway_response.get("statusCode")
+ if not status_code:
+ status_code = exception.status_code or 500
+
+ response = Response(response=content, headers=headers, status=status_code)
+ return response
+
+ @staticmethod
+ def _build_response_content(exception: BaseGatewayException) -> str:
+ # TODO apply responseTemplates to the content. We should also handle the default simply by managing the default
+ # template body `{"message":$context.error.messageString}`
+
+ # TODO: remove this workaround by properly managing the responseTemplate for UnauthorizedError
+ # on the CRUD level, it returns the same template as all other errors but in reality the message field is
+ # capitalized
+ if isinstance(exception, AccessDeniedError):
+ return json.dumps({"Message": exception.message}, separators=(",", ":"))
+
+ return json.dumps({"message": exception.message})
+
+ @staticmethod
+ def _build_response_headers(exception: BaseGatewayException) -> dict:
+ # TODO apply responseParameters to the headers and get content-type from the gateway_response
+ headers = Headers({"Content-Type": APPLICATION_JSON, "x-amzn-ErrorType": exception.code})
+ return headers
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration.py
new file mode 100644
index 0000000000000..a05e87e201cd4
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration.py
@@ -0,0 +1,33 @@
+import logging
+
+from localstack.http import Response
+
+from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain
+from ..context import EndpointResponse, RestApiInvocationContext
+from ..integrations import REST_API_INTEGRATIONS
+
+LOG = logging.getLogger(__name__)
+
+
+class IntegrationHandler(RestApiGatewayHandler):
+ def __call__(
+ self,
+ chain: RestApiGatewayHandlerChain,
+ context: RestApiInvocationContext,
+ response: Response,
+ ):
+ integration_type = context.integration["type"]
+ is_proxy = "PROXY" in integration_type
+
+ integration = REST_API_INTEGRATIONS.get(integration_type)
+
+ if not integration:
+ # this should not happen, as we validated the type in the provider
+ raise NotImplementedError(
+ f"This integration type is not yet supported: {integration_type}"
+ )
+
+ endpoint_response: EndpointResponse = integration.invoke(context)
+ context.endpoint_response = endpoint_response
+ if is_proxy:
+ context.invocation_response = endpoint_response
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_request.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_request.py
new file mode 100644
index 0000000000000..b9cf68b1ab006
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_request.py
@@ -0,0 +1,349 @@
+import base64
+import logging
+from http import HTTPMethod
+
+from werkzeug.datastructures import Headers
+
+from localstack.aws.api.apigateway import ContentHandlingStrategy, Integration, IntegrationType
+from localstack.constants import APPLICATION_JSON
+from localstack.http import Request, Response
+from localstack.utils.collections import merge_recursive
+from localstack.utils.strings import to_bytes, to_str
+
+from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain
+from ..context import IntegrationRequest, InvocationRequest, RestApiInvocationContext
+from ..gateway_response import InternalServerError, UnsupportedMediaTypeError
+from ..header_utils import drop_headers, set_default_headers
+from ..helpers import mime_type_matches_binary_media_types, render_integration_uri
+from ..parameters_mapping import ParametersMapper, RequestDataMapping
+from ..template_mapping import (
+ ApiGatewayVtlTemplate,
+ MappingTemplateInput,
+ MappingTemplateParams,
+ MappingTemplateVariables,
+)
+from ..variables import ContextVariableOverrides, ContextVarsRequestOverride
+
+LOG = logging.getLogger(__name__)
+
+# Illegal headers to include in transformation
+ILLEGAL_INTEGRATION_REQUESTS_COMMON = [
+ "content-length",
+ "transfer-encoding",
+ "x-amzn-trace-id",
+ "X-Amzn-Apigateway-Api-Id",
+]
+ILLEGAL_INTEGRATION_REQUESTS_AWS = [
+ *ILLEGAL_INTEGRATION_REQUESTS_COMMON,
+ "authorization",
+ "connection",
+ "expect",
+ "proxy-authenticate",
+ "te",
+]
+
+# These are dropped after the templates override were applied. they will never make it to the requests.
+DROPPED_FROM_INTEGRATION_REQUESTS_COMMON = ["Expect", "Proxy-Authenticate", "TE"]
+DROPPED_FROM_INTEGRATION_REQUESTS_AWS = [*DROPPED_FROM_INTEGRATION_REQUESTS_COMMON, "Referer"]
+DROPPED_FROM_INTEGRATION_REQUESTS_HTTP = [*DROPPED_FROM_INTEGRATION_REQUESTS_COMMON, "Via"]
+
+# Default headers
+DEFAULT_REQUEST_HEADERS = {"Accept": APPLICATION_JSON, "Connection": "keep-alive"}
+
+
+class PassthroughBehavior(str):
+ # TODO maybe this class should be moved where it can also be used for validation in
+ # the provider when we switch out of moto
+ WHEN_NO_MATCH = "WHEN_NO_MATCH"
+ WHEN_NO_TEMPLATES = "WHEN_NO_TEMPLATES"
+ NEVER = "NEVER"
+
+
+class IntegrationRequestHandler(RestApiGatewayHandler):
+ """
+ This class will take care of the Integration Request part, which is mostly linked to template mapping
+ See https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-integration-settings-integration-request.html
+ """
+
+ def __init__(self):
+ self._param_mapper = ParametersMapper()
+ self._vtl_template = ApiGatewayVtlTemplate()
+
+ def __call__(
+ self,
+ chain: RestApiGatewayHandlerChain,
+ context: RestApiInvocationContext,
+ response: Response,
+ ):
+ integration: Integration = context.integration
+ integration_type = integration["type"]
+
+ integration_request_parameters = integration["requestParameters"] or {}
+ request_data_mapping = self.get_integration_request_data(
+ context, integration_request_parameters
+ )
+ path_parameters = request_data_mapping["path"]
+
+ if integration_type in (IntegrationType.AWS_PROXY, IntegrationType.HTTP_PROXY):
+ # `PROXY` types cannot use integration mapping templates, they pass most of the data straight
+ # We make a copy to avoid modifying the invocation headers and keep a cleaner history
+ headers = context.invocation_request["headers"].copy()
+ query_string_parameters: dict[str, list[str]] = context.invocation_request[
+ "multi_value_query_string_parameters"
+ ]
+ body = context.invocation_request["body"]
+
+ # HTTP_PROXY still make uses of the request data mappings, and merges it with the invocation request
+ # this is undocumented but validated behavior
+ if integration_type == IntegrationType.HTTP_PROXY:
+ # These headers won't be passed through by default from the invocation.
+ # They can however be added through request mappings.
+ drop_headers(headers, ["Host", "Content-Encoding"])
+ headers.update(request_data_mapping["header"])
+
+ query_string_parameters = self._merge_http_proxy_query_string(
+ query_string_parameters, request_data_mapping["querystring"]
+ )
+
+ else:
+ self._set_proxy_headers(headers, context.request)
+ # AWS_PROXY does not allow URI path rendering
+ # TODO: verify this
+ path_parameters = {}
+
+ else:
+ # find request template to raise UnsupportedMediaTypeError early
+ request_template = self.get_request_template(
+ integration=integration, request=context.invocation_request
+ )
+
+ converted_body = self.convert_body(context)
+
+ body, mapped_overrides = self.render_request_template_mapping(
+ context=context, body=converted_body, template=request_template
+ )
+ # Update the context with the returned mapped overrides
+ context.context_variable_overrides = mapped_overrides
+ # mutate the ContextVariables with the requestOverride result, as we copy the context when rendering the
+ # template to avoid mutation on other fields
+ request_override: ContextVarsRequestOverride = mapped_overrides.get(
+ "requestOverride", {}
+ )
+ # TODO: log every override that happens afterwards (in a loop on `request_override`)
+ merge_recursive(request_override, request_data_mapping, overwrite=True)
+
+ headers = Headers(request_data_mapping["header"])
+ query_string_parameters = request_data_mapping["querystring"]
+
+ # Some headers can't be modified by parameter mappings or mapping templates.
+ # Aws will raise in those were present. Even for AWS_PROXY, where it is not applying them.
+ if header_mappings := request_data_mapping["header"]:
+ self._validate_headers_mapping(header_mappings, integration_type)
+
+ self._apply_header_transforms(headers, integration_type, context)
+
+ # looks like the stageVariables rendering part is done in the Integration part in AWS
+ # but we can avoid duplication by doing it here for now
+ # TODO: if the integration if of AWS Lambda type and the Lambda is in another account, we cannot render
+ # stageVariables. Work on that special case later (we can add a quick check for the URI region and set the
+ # stage variables to an empty dict)
+ rendered_integration_uri = render_integration_uri(
+ uri=integration["uri"],
+ path_parameters=path_parameters,
+ stage_variables=context.stage_variables,
+ )
+
+ # if the integration method is defined and is not ANY, we can use it for the integration
+ if not (integration_method := integration["httpMethod"]) or integration_method == "ANY":
+ # otherwise, fallback to the request's method
+ integration_method = context.invocation_request["http_method"]
+
+ integration_request = IntegrationRequest(
+ http_method=integration_method,
+ uri=rendered_integration_uri,
+ query_string_parameters=query_string_parameters,
+ headers=headers,
+ body=body,
+ )
+
+ context.integration_request = integration_request
+
+ def get_integration_request_data(
+ self, context: RestApiInvocationContext, request_parameters: dict[str, str]
+ ) -> RequestDataMapping:
+ return self._param_mapper.map_integration_request(
+ request_parameters=request_parameters,
+ invocation_request=context.invocation_request,
+ context_variables=context.context_variables,
+ stage_variables=context.stage_variables,
+ )
+
+ def render_request_template_mapping(
+ self,
+ context: RestApiInvocationContext,
+ body: str | bytes,
+ template: str,
+ ) -> tuple[bytes, ContextVariableOverrides]:
+ request: InvocationRequest = context.invocation_request
+
+ if not template:
+ return to_bytes(body), context.context_variable_overrides
+
+ try:
+ body_utf8 = to_str(body)
+ except UnicodeError:
+ raise InternalServerError("Internal server error")
+
+ body, mapped_overrides = self._vtl_template.render_request(
+ template=template,
+ variables=MappingTemplateVariables(
+ context=context.context_variables,
+ stageVariables=context.stage_variables or {},
+ input=MappingTemplateInput(
+ body=body_utf8,
+ params=MappingTemplateParams(
+ path=request.get("path_parameters"),
+ querystring=request.get("query_string_parameters", {}),
+ header=request.get("headers"),
+ ),
+ ),
+ ),
+ context_overrides=context.context_variable_overrides,
+ )
+ return to_bytes(body), mapped_overrides
+
+ @staticmethod
+ def get_request_template(integration: Integration, request: InvocationRequest) -> str:
+ """
+ Attempts to return the request template.
+ Will raise UnsupportedMediaTypeError if there are no match according to passthrough behavior.
+ """
+ request_templates = integration.get("requestTemplates") or {}
+ passthrough_behavior = integration.get("passthroughBehavior")
+ # If content-type is not provided aws assumes application/json
+ content_type = request["headers"].get("Content-Type", APPLICATION_JSON)
+ # first look to for a template associated to the content-type, otherwise look for the $default template
+ request_template = request_templates.get(content_type) or request_templates.get("$default")
+
+ if request_template or passthrough_behavior == PassthroughBehavior.WHEN_NO_MATCH:
+ return request_template
+
+ match passthrough_behavior:
+ case PassthroughBehavior.NEVER:
+ LOG.debug(
+ "No request template found for '%s' and passthrough behavior set to NEVER",
+ content_type,
+ )
+ raise UnsupportedMediaTypeError("Unsupported Media Type")
+ case PassthroughBehavior.WHEN_NO_TEMPLATES:
+ if request_templates:
+ LOG.debug(
+ "No request template found for '%s' and passthrough behavior set to WHEN_NO_TEMPLATES",
+ content_type,
+ )
+ raise UnsupportedMediaTypeError("Unsupported Media Type")
+ case _:
+ LOG.debug("Unknown passthrough behavior: '%s'", passthrough_behavior)
+
+ return request_template
+
+ @staticmethod
+ def convert_body(context: RestApiInvocationContext) -> bytes | str:
+ """
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-payload-encodings.html
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-payload-encodings-workflow.html
+ :param context:
+ :return: the body, either as is, or converted depending on the table in the second link
+ """
+ request: InvocationRequest = context.invocation_request
+ body = request["body"]
+
+ is_binary_request = mime_type_matches_binary_media_types(
+ mime_type=request["headers"].get("Content-Type"),
+ binary_media_types=context.deployment.rest_api.rest_api.get("binaryMediaTypes", []),
+ )
+ content_handling = context.integration.get("contentHandling")
+ if is_binary_request:
+ if content_handling and content_handling == ContentHandlingStrategy.CONVERT_TO_TEXT:
+ body = base64.b64encode(body)
+ # if the content handling is not defined, or CONVERT_TO_BINARY, we do not touch the body and leave it as
+ # proper binary
+ else:
+ if not content_handling or content_handling == ContentHandlingStrategy.CONVERT_TO_TEXT:
+ body = body.decode(encoding="UTF-8", errors="replace")
+ else:
+ # it means we have CONVERT_TO_BINARY, so we need to try to decode the base64 string
+ try:
+ body = base64.b64decode(body)
+ except ValueError:
+ raise InternalServerError("Internal server error")
+
+ return body
+
+ @staticmethod
+ def _merge_http_proxy_query_string(
+ query_string_parameters: dict[str, list[str]],
+ mapped_query_string: dict[str, str | list[str]],
+ ):
+ new_query_string_parameters = {k: v.copy() for k, v in query_string_parameters.items()}
+ for param, value in mapped_query_string.items():
+ if existing := new_query_string_parameters.get(param):
+ if isinstance(value, list):
+ existing.extend(value)
+ else:
+ existing.append(value)
+ else:
+ new_query_string_parameters[param] = value
+
+ return new_query_string_parameters
+
+ @staticmethod
+ def _set_proxy_headers(headers: Headers, request: Request):
+ headers.set("X-Forwarded-For", request.remote_addr)
+ headers.set("X-Forwarded-Port", request.environ.get("SERVER_PORT"))
+ headers.set(
+ "X-Forwarded-Proto",
+ request.environ.get("SERVER_PROTOCOL", "").split("/")[0],
+ )
+
+ @staticmethod
+ def _apply_header_transforms(
+ headers: Headers, integration_type: IntegrationType, context: RestApiInvocationContext
+ ):
+ # Dropping matching headers for the provided integration type
+ match integration_type:
+ case IntegrationType.AWS:
+ drop_headers(headers, DROPPED_FROM_INTEGRATION_REQUESTS_AWS)
+ case IntegrationType.HTTP | IntegrationType.HTTP_PROXY:
+ drop_headers(headers, DROPPED_FROM_INTEGRATION_REQUESTS_HTTP)
+ case _:
+ drop_headers(headers, DROPPED_FROM_INTEGRATION_REQUESTS_COMMON)
+
+ # Adding default headers to the requests headers
+ default_headers = {
+ **DEFAULT_REQUEST_HEADERS,
+ "User-Agent": f"AmazonAPIGateway_{context.api_id}",
+ }
+ if (
+ content_type := context.request.headers.get("Content-Type")
+ ) and context.request.method not in {HTTPMethod.OPTIONS, HTTPMethod.GET, HTTPMethod.HEAD}:
+ default_headers["Content-Type"] = content_type
+
+ set_default_headers(headers, default_headers)
+ headers.set("X-Amzn-Trace-Id", context.trace_id)
+ if integration_type not in (IntegrationType.AWS_PROXY, IntegrationType.AWS):
+ headers.set("X-Amzn-Apigateway-Api-Id", context.api_id)
+
+ @staticmethod
+ def _validate_headers_mapping(headers: dict[str, str], integration_type: IntegrationType):
+ """Validates and raises an error when attempting to set an illegal header"""
+ to_validate = ILLEGAL_INTEGRATION_REQUESTS_COMMON
+ if integration_type in {IntegrationType.AWS, IntegrationType.AWS_PROXY}:
+ to_validate = ILLEGAL_INTEGRATION_REQUESTS_AWS
+
+ for header in headers:
+ if header.lower() in to_validate:
+ LOG.debug(
+ "Execution failed due to configuration error: %s header already present", header
+ )
+ raise InternalServerError("Internal server error")
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_response.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_response.py
new file mode 100644
index 0000000000000..2dccb39c74a6b
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_response.py
@@ -0,0 +1,312 @@
+import base64
+import json
+import logging
+import re
+
+from werkzeug.datastructures import Headers
+
+from localstack.aws.api.apigateway import (
+ ContentHandlingStrategy,
+ Integration,
+ IntegrationResponse,
+ IntegrationType,
+)
+from localstack.constants import APPLICATION_JSON
+from localstack.http import Response
+from localstack.utils.strings import to_bytes
+
+from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain
+from ..context import (
+ EndpointResponse,
+ InvocationRequest,
+ InvocationResponse,
+ RestApiInvocationContext,
+)
+from ..gateway_response import ApiConfigurationError, InternalServerError
+from ..helpers import mime_type_matches_binary_media_types
+from ..parameters_mapping import ParametersMapper, ResponseDataMapping
+from ..template_mapping import (
+ ApiGatewayVtlTemplate,
+ MappingTemplateInput,
+ MappingTemplateParams,
+ MappingTemplateVariables,
+)
+from ..variables import ContextVarsResponseOverride
+
+LOG = logging.getLogger(__name__)
+
+
+class IntegrationResponseHandler(RestApiGatewayHandler):
+ """
+ This class will take care of the Integration Response part, which is mostly linked to template mapping
+ See https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-integration-settings-integration-response.html
+ """
+
+ def __init__(self):
+ self._param_mapper = ParametersMapper()
+ self._vtl_template = ApiGatewayVtlTemplate()
+
+ def __call__(
+ self,
+ chain: RestApiGatewayHandlerChain,
+ context: RestApiInvocationContext,
+ response: Response,
+ ):
+ # TODO: we should log the response coming in from the Integration, either in Integration or here.
+ # before modification / after?
+ integration: Integration = context.integration
+ integration_type = integration["type"]
+
+ if integration_type in (IntegrationType.AWS_PROXY, IntegrationType.HTTP_PROXY):
+ # `PROXY` types cannot use integration response mapping templates
+ # TODO: verify assumptions against AWS
+ return
+
+ endpoint_response: EndpointResponse = context.endpoint_response
+ status_code = endpoint_response["status_code"]
+ body = endpoint_response["body"]
+
+ # we first need to find the right IntegrationResponse based on their selection template, linked to the status
+ # code of the Response
+ if integration_type == IntegrationType.AWS and "lambda:path/" in integration["uri"]:
+ selection_value = self.parse_error_message_from_lambda(body)
+ else:
+ selection_value = str(status_code)
+
+ integration_response: IntegrationResponse = self.select_integration_response(
+ selection_value,
+ integration["integrationResponses"],
+ )
+
+ # we then need to apply Integration Response parameters mapping, to only return select headers
+ response_parameters = integration_response.get("responseParameters") or {}
+ response_data_mapping = self.get_method_response_data(
+ context=context,
+ response=endpoint_response,
+ response_parameters=response_parameters,
+ )
+
+ # We then fetch a response templates and apply the template mapping
+ response_template = self.get_response_template(
+ integration_response=integration_response, request=context.invocation_request
+ )
+ # binary support
+ converted_body = self.convert_body(
+ context,
+ body=body,
+ content_handling=integration_response.get("contentHandling"),
+ )
+
+ body, response_override = self.render_response_template_mapping(
+ context=context, template=response_template, body=converted_body
+ )
+
+ # We basically need to remove all headers and replace them with the mapping, then
+ # override them if there are overrides.
+ # The status code is pretty straight forward. By default, it would be set by the integration response,
+ # unless there was an override
+ response_status_code = int(integration_response["statusCode"])
+ if response_status_override := response_override["status"]:
+ # maybe make a better error message format, same for the overrides for request too
+ LOG.debug("Overriding response status code: '%s'", response_status_override)
+ response_status_code = response_status_override
+
+ # Create a new headers object that we can manipulate before overriding the original response headers
+ response_headers = Headers(response_data_mapping.get("header"))
+ if header_override := response_override["header"]:
+ LOG.debug("Response header overrides: %s", header_override)
+ response_headers.update(header_override)
+
+ LOG.debug("Method response body after transformations: %s", body)
+ context.invocation_response = InvocationResponse(
+ body=body,
+ headers=response_headers,
+ status_code=response_status_code,
+ )
+
+ def get_method_response_data(
+ self,
+ context: RestApiInvocationContext,
+ response: EndpointResponse,
+ response_parameters: dict[str, str],
+ ) -> ResponseDataMapping:
+ return self._param_mapper.map_integration_response(
+ response_parameters=response_parameters,
+ integration_response=response,
+ context_variables=context.context_variables,
+ stage_variables=context.stage_variables,
+ )
+
+ @staticmethod
+ def select_integration_response(
+ selection_value: str, integration_responses: dict[str, IntegrationResponse]
+ ) -> IntegrationResponse:
+ if not integration_responses:
+ LOG.warning(
+ "Configuration error: No match for output mapping and no default output mapping configured. "
+ "Endpoint Response Status Code: %s",
+ selection_value,
+ )
+ raise ApiConfigurationError("Internal server error")
+
+ if select_by_pattern := [
+ response
+ for response in integration_responses.values()
+ if (selectionPatten := response.get("selectionPattern"))
+ and re.match(selectionPatten, selection_value)
+ ]:
+ selected_response = select_by_pattern[0]
+ if len(select_by_pattern) > 1:
+ LOG.warning(
+ "Multiple integration responses matching '%s' statuscode. Choosing '%s' (first).",
+ selection_value,
+ selected_response["statusCode"],
+ )
+ else:
+ # choose default return code
+ # TODO: the provider should check this, as we should only have one default with no value in selectionPattern
+ default_responses = [
+ response
+ for response in integration_responses.values()
+ if not response.get("selectionPattern")
+ ]
+ if not default_responses:
+ # TODO: verify log message when the selection_value is a lambda errorMessage
+ LOG.warning(
+ "Configuration error: No match for output mapping and no default output mapping configured. "
+ "Endpoint Response Status Code: %s",
+ selection_value,
+ )
+ raise ApiConfigurationError("Internal server error")
+
+ selected_response = default_responses[0]
+ if len(default_responses) > 1:
+ LOG.warning(
+ "Multiple default integration responses. Choosing %s (first).",
+ selected_response["statusCode"],
+ )
+ return selected_response
+
+ @staticmethod
+ def get_response_template(
+ integration_response: IntegrationResponse, request: InvocationRequest
+ ) -> str:
+ """The Response Template is selected from the response templates.
+ If there are no templates defined, the body will pass through.
+ Apigateway looks at the integration request `Accept` header and defaults to `application/json`.
+ If no template is matched, Apigateway will use the "first" existing template and use it as default.
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/request-response-data-mappings.html#transforming-request-response-body
+ """
+ if not (response_templates := integration_response["responseTemplates"]):
+ return ""
+
+ # The invocation request header is used to find the right response templated
+ accepts = request["headers"].getlist("accept")
+ if accepts and (template := response_templates.get(accepts[-1])):
+ return template
+ # TODO aws seemed to favor application/json as default when unmatched regardless of "first"
+ if template := response_templates.get(APPLICATION_JSON):
+ return template
+ # TODO What is first? do we need to keep an order as to when they were added/modified?
+ template = next(iter(response_templates.values()))
+ LOG.warning("No templates were matched, Using template: %s", template)
+ return template
+
+ @staticmethod
+ def convert_body(
+ context: RestApiInvocationContext,
+ body: bytes,
+ content_handling: ContentHandlingStrategy | None,
+ ) -> bytes | str:
+ """
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-payload-encodings.html
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-payload-encodings-workflow.html
+ :param context: RestApiInvocationContext
+ :param body: the endpoint response body
+ :param content_handling: the contentHandling of the IntegrationResponse
+ :return: the body, either as is, or converted depending on the table in the second link
+ """
+
+ request: InvocationRequest = context.invocation_request
+ response: EndpointResponse = context.endpoint_response
+ binary_media_types = context.deployment.rest_api.rest_api.get("binaryMediaTypes", [])
+
+ is_binary_payload = mime_type_matches_binary_media_types(
+ mime_type=response["headers"].get("Content-Type"),
+ binary_media_types=binary_media_types,
+ )
+ is_binary_accept = mime_type_matches_binary_media_types(
+ mime_type=request["headers"].get("Accept"),
+ binary_media_types=binary_media_types,
+ )
+
+ if is_binary_payload:
+ if (
+ content_handling and content_handling == ContentHandlingStrategy.CONVERT_TO_TEXT
+ ) or (not content_handling and not is_binary_accept):
+ body = base64.b64encode(body)
+ else:
+ # this means the Payload is of type `Text` in AWS terms for the table
+ if (
+ content_handling and content_handling == ContentHandlingStrategy.CONVERT_TO_TEXT
+ ) or (not content_handling and not is_binary_accept):
+ body = body.decode(encoding="UTF-8", errors="replace")
+ else:
+ try:
+ body = base64.b64decode(body)
+ except ValueError:
+ raise InternalServerError("Internal server error")
+
+ return body
+
+ def render_response_template_mapping(
+ self, context: RestApiInvocationContext, template: str, body: bytes | str
+ ) -> tuple[bytes, ContextVarsResponseOverride]:
+ if not template:
+ return to_bytes(body), context.context_variable_overrides["responseOverride"]
+
+ # if there are no template, we can pass binary data through
+ if not isinstance(body, str):
+ # TODO: check, this might be ApiConfigurationError
+ raise InternalServerError("Internal server error")
+
+ body, response_override = self._vtl_template.render_response(
+ template=template,
+ variables=MappingTemplateVariables(
+ context=context.context_variables,
+ stageVariables=context.stage_variables or {},
+ input=MappingTemplateInput(
+ body=body,
+ params=MappingTemplateParams(
+ path=context.invocation_request.get("path_parameters"),
+ querystring=context.invocation_request.get("query_string_parameters", {}),
+ header=context.invocation_request.get("headers", {}),
+ ),
+ ),
+ ),
+ context_overrides=context.context_variable_overrides,
+ )
+
+ # AWS ignores the status if the override isn't an integer between 100 and 599
+ if (status := response_override["status"]) and not (
+ isinstance(status, int) and 100 <= status < 600
+ ):
+ response_override["status"] = 0
+ return to_bytes(body), response_override
+
+ @staticmethod
+ def parse_error_message_from_lambda(payload: bytes) -> str:
+ try:
+ lambda_response = json.loads(payload)
+ if not isinstance(lambda_response, dict):
+ return ""
+
+ # very weird case, but AWS will not return the Error from Lambda in AWS integration, where it does for
+ # Kinesis and such. The AWS Lambda only behavior is concentrated in this method
+ if lambda_response.get("__type") == "AccessDeniedException":
+ raise InternalServerError("Internal server error")
+
+ return lambda_response.get("errorMessage", "")
+
+ except json.JSONDecodeError:
+ return ""
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/method_request.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/method_request.py
new file mode 100644
index 0000000000000..00a35129225b1
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/method_request.py
@@ -0,0 +1,147 @@
+import json
+import logging
+
+from jsonschema import ValidationError, validate
+
+from localstack.aws.api.apigateway import Method
+from localstack.constants import APPLICATION_JSON
+from localstack.http import Response
+from localstack.services.apigateway.helpers import EMPTY_MODEL, ModelResolver
+from localstack.services.apigateway.models import RestApiContainer
+
+from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain
+from ..context import InvocationRequest, RestApiInvocationContext
+from ..gateway_response import BadRequestBodyError, BadRequestParametersError
+
+LOG = logging.getLogger(__name__)
+
+
+class MethodRequestHandler(RestApiGatewayHandler):
+ """
+ This class will mostly take care of Request validation with Models
+ See https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-method-settings-method-request.html
+ """
+
+ def __call__(
+ self,
+ chain: RestApiGatewayHandlerChain,
+ context: RestApiInvocationContext,
+ response: Response,
+ ):
+ self.validate_request(
+ context.resource_method,
+ context.deployment.rest_api,
+ context.invocation_request,
+ )
+
+ def validate_request(
+ self, method: Method, rest_api: RestApiContainer, request: InvocationRequest
+ ) -> None:
+ """
+ :raises BadRequestParametersError if the request has required parameters which are not present
+ :raises BadRequestBodyError if the request has required body validation with a model and it does not respect it
+ :return: None
+ """
+
+ # check if there is validator for the method
+ if not (request_validator_id := method.get("requestValidatorId") or "").strip():
+ return
+
+ # check if there is a validator for this request
+ if not (validator := rest_api.validators.get(request_validator_id)):
+ # TODO Should we raise an exception instead?
+ LOG.exception("No validator were found with matching id: '%s'", request_validator_id)
+ return
+
+ if self.should_validate_request(validator) and (
+ missing_parameters := self._get_missing_required_parameters(method, request)
+ ):
+ message = f"Missing required request parameters: [{', '.join(missing_parameters)}]"
+ raise BadRequestParametersError(message=message)
+
+ if self.should_validate_body(validator) and not self._is_body_valid(
+ method, rest_api, request
+ ):
+ raise BadRequestBodyError(message="Invalid request body")
+
+ return
+
+ @staticmethod
+ def _is_body_valid(
+ method: Method, rest_api: RestApiContainer, request: InvocationRequest
+ ) -> bool:
+ # if there's no model to validate the body, use the Empty model
+ # https://docs.aws.amazon.com/cdk/api/v1/docs/@aws-cdk_aws-apigateway.EmptyModel.html
+ if not (request_models := method.get("requestModels")):
+ model_name = EMPTY_MODEL
+ else:
+ model_name = request_models.get(
+ APPLICATION_JSON, request_models.get("$default", EMPTY_MODEL)
+ )
+
+ model_resolver = ModelResolver(
+ rest_api_container=rest_api,
+ model_name=model_name,
+ )
+
+ # try to get the resolved model first
+ resolved_schema = model_resolver.get_resolved_model()
+ if not resolved_schema:
+ LOG.exception(
+ "An exception occurred while trying to validate the request: could not resolve the model '%s'",
+ model_name,
+ )
+ return False
+
+ try:
+ # if the body is empty, replace it with an empty JSON body
+ validate(
+ instance=json.loads(request.get("body") or "{}"),
+ schema=resolved_schema,
+ )
+ return True
+ except ValidationError as e:
+ LOG.debug("failed to validate request body %s", e)
+ return False
+ except json.JSONDecodeError as e:
+ LOG.debug("failed to validate request body, request data is not valid JSON %s", e)
+ return False
+
+ @staticmethod
+ def _get_missing_required_parameters(method: Method, request: InvocationRequest) -> list[str]:
+ missing_params = []
+ if not (request_parameters := method.get("requestParameters")):
+ return missing_params
+
+ case_sensitive_headers = list(request.get("headers").keys())
+
+ for request_parameter, required in sorted(request_parameters.items()):
+ if not required:
+ continue
+
+ param_type, param_value = request_parameter.removeprefix("method.request.").split(".")
+ match param_type:
+ case "header":
+ is_missing = param_value not in case_sensitive_headers
+ case "path":
+ path = request.get("path_parameters", "")
+ is_missing = param_value not in path
+ case "querystring":
+ is_missing = param_value not in request.get("query_string_parameters", [])
+ case _:
+ # This shouldn't happen
+ LOG.debug("Found an invalid request parameter: %s", request_parameter)
+ is_missing = False
+
+ if is_missing:
+ missing_params.append(param_value)
+
+ return missing_params
+
+ @staticmethod
+ def should_validate_body(validator):
+ return validator.get("validateRequestBody")
+
+ @staticmethod
+ def should_validate_request(validator):
+ return validator.get("validateRequestParameters")
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/method_response.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/method_response.py
new file mode 100644
index 0000000000000..004f99b98a4da
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/method_response.py
@@ -0,0 +1,96 @@
+import logging
+
+from werkzeug.datastructures import Headers
+
+from localstack.aws.api.apigateway import IntegrationType
+from localstack.http import Response
+
+from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain
+from ..context import InvocationResponse, RestApiInvocationContext
+from ..header_utils import drop_headers
+
+LOG = logging.getLogger(__name__)
+
+# These are dropped after the templates override were applied. they will never make it to the requests.
+DROPPED_FROM_INTEGRATION_RESPONSES_COMMON = ["Transfer-Encoding"]
+DROPPED_FROM_INTEGRATION_RESPONSES_HTTP_PROXY = [
+ *DROPPED_FROM_INTEGRATION_RESPONSES_COMMON,
+ "Content-Encoding",
+ "Via",
+]
+
+
+# Headers that will receive a remap
+REMAPPED_FROM_INTEGRATION_RESPONSE_COMMON = [
+ "Connection",
+ "Content-Length",
+ "Date",
+ "Server",
+]
+REMAPPED_FROM_INTEGRATION_RESPONSE_NON_PROXY = [
+ *REMAPPED_FROM_INTEGRATION_RESPONSE_COMMON,
+ "Authorization",
+ "Content-MD5",
+ "Expect",
+ "Host",
+ "Max-Forwards",
+ "Proxy-Authenticate",
+ "Trailer",
+ "Upgrade",
+ "User-Agent",
+ "WWW-Authenticate",
+]
+
+
+class MethodResponseHandler(RestApiGatewayHandler):
+ """
+ Last handler of the chain, responsible for serializing the Response object
+ """
+
+ def __call__(
+ self,
+ chain: RestApiGatewayHandlerChain,
+ context: RestApiInvocationContext,
+ response: Response,
+ ):
+ invocation_response = context.invocation_response
+ integration_type = context.integration["type"]
+ headers = invocation_response["headers"]
+
+ self._transform_headers(headers, integration_type)
+
+ method_response = self.serialize_invocation_response(invocation_response)
+ response.update_from(method_response)
+
+ @staticmethod
+ def serialize_invocation_response(invocation_response: InvocationResponse) -> Response:
+ is_content_type_set = invocation_response["headers"].get("content-type") is not None
+ response = Response(
+ response=invocation_response["body"],
+ headers=invocation_response["headers"],
+ status=invocation_response["status_code"],
+ )
+ if not is_content_type_set:
+ # Response sets a content-type by default. This will always be ignored.
+ response.headers.remove("content-type")
+ return response
+
+ @staticmethod
+ def _transform_headers(headers: Headers, integration_type: IntegrationType):
+ """Remaps the provided headers in-place. Adding new `x-amzn-Remapped-` headers and dropping the original headers"""
+ to_remap = REMAPPED_FROM_INTEGRATION_RESPONSE_COMMON
+ to_drop = DROPPED_FROM_INTEGRATION_RESPONSES_COMMON
+
+ match integration_type:
+ case IntegrationType.HTTP | IntegrationType.AWS:
+ to_remap = REMAPPED_FROM_INTEGRATION_RESPONSE_NON_PROXY
+ case IntegrationType.HTTP_PROXY:
+ to_drop = DROPPED_FROM_INTEGRATION_RESPONSES_HTTP_PROXY
+
+ for header in to_remap:
+ if headers.get(header):
+ LOG.debug("Remapping header: %s", header)
+ remapped = headers.pop(header)
+ headers[f"x-amzn-Remapped-{header}"] = remapped
+
+ drop_headers(headers, to_drop)
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/parse.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/parse.py
new file mode 100644
index 0000000000000..3da898bf8845e
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/parse.py
@@ -0,0 +1,204 @@
+import datetime
+import logging
+import re
+from collections import defaultdict
+from typing import Optional
+from urllib.parse import urlparse
+
+from rolo.request import restore_payload
+from werkzeug.datastructures import Headers, MultiDict
+
+from localstack.http import Response
+from localstack.services.apigateway.helpers import REQUEST_TIME_DATE_FORMAT
+from localstack.utils.strings import long_uid, short_uid
+from localstack.utils.time import timestamp
+
+from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain
+from ..context import InvocationRequest, RestApiInvocationContext
+from ..header_utils import should_drop_header_from_invocation
+from ..helpers import generate_trace_id, generate_trace_parent, parse_trace_id
+from ..variables import (
+ ContextVariableOverrides,
+ ContextVariables,
+ ContextVarsIdentity,
+ ContextVarsRequestOverride,
+ ContextVarsResponseOverride,
+)
+
+LOG = logging.getLogger(__name__)
+
+
+class InvocationRequestParser(RestApiGatewayHandler):
+ def __call__(
+ self,
+ chain: RestApiGatewayHandlerChain,
+ context: RestApiInvocationContext,
+ response: Response,
+ ):
+ context.account_id = context.deployment.account_id
+ context.region = context.deployment.region
+ self.parse_and_enrich(context)
+
+ def parse_and_enrich(self, context: RestApiInvocationContext):
+ # first, create the InvocationRequest with the incoming request
+ context.invocation_request = self.create_invocation_request(context)
+ # then we can create the ContextVariables, used throughout the invocation as payload and to render authorizer
+ # payload, mapping templates and such.
+ context.context_variables = self.create_context_variables(context)
+ context.context_variable_overrides = ContextVariableOverrides(
+ requestOverride=ContextVarsRequestOverride(header={}, querystring={}, path={}),
+ responseOverride=ContextVarsResponseOverride(header={}, status=0),
+ )
+ # TODO: maybe adjust the logging
+ LOG.debug("Initializing $context='%s'", context.context_variables)
+ # then populate the stage variables
+ context.stage_variables = self.get_stage_variables(context)
+ LOG.debug("Initializing $stageVariables='%s'", context.stage_variables)
+
+ context.trace_id = self.populate_trace_id(context.request.headers)
+
+ def create_invocation_request(self, context: RestApiInvocationContext) -> InvocationRequest:
+ request = context.request
+ params, multi_value_params = self._get_single_and_multi_values_from_multidict(request.args)
+ headers = self._get_invocation_headers(request.headers)
+ invocation_request = InvocationRequest(
+ http_method=request.method,
+ query_string_parameters=params,
+ multi_value_query_string_parameters=multi_value_params,
+ headers=headers,
+ body=restore_payload(request),
+ )
+ self._enrich_with_raw_path(context, invocation_request)
+
+ return invocation_request
+
+ @staticmethod
+ def _enrich_with_raw_path(
+ context: RestApiInvocationContext, invocation_request: InvocationRequest
+ ):
+ # Base path is not URL-decoded, so we need to get the `RAW_URI` from the request
+ request = context.request
+ raw_uri = request.environ.get("RAW_URI") or request.path
+
+ # if the request comes from the LocalStack only `_user_request_` route, we need to remove this prefix from the
+ # path, in order to properly route the request
+ if "_user_request_" in raw_uri:
+ # in this format, the stage is before `_user_request_`, so we don't need to remove it
+ raw_uri = raw_uri.partition("_user_request_")[2]
+ else:
+ if raw_uri.startswith("/_aws/execute-api"):
+ # the API can be cased in the path, so we need to ignore it to remove it
+ raw_uri = re.sub(
+ f"^/_aws/execute-api/{context.api_id}",
+ "",
+ raw_uri,
+ flags=re.IGNORECASE,
+ )
+
+ # remove the stage from the path, only replace the first occurrence
+ raw_uri = raw_uri.replace(f"/{context.stage}", "", 1)
+
+ if raw_uri.startswith("//"):
+ # TODO: AWS validate this assumption
+ # if the RAW_URI starts with double slashes, `urlparse` will fail to decode it as path only
+ # it also means that we already only have the path, so we just need to remove the query string
+ raw_uri = raw_uri.split("?")[0]
+ raw_path = "/" + raw_uri.lstrip("/")
+
+ else:
+ # we need to make sure we have a path here, sometimes RAW_URI can be a full URI (when proxied)
+ raw_path = raw_uri = urlparse(raw_uri).path
+
+ invocation_request["path"] = raw_path
+ invocation_request["raw_path"] = raw_uri
+
+ @staticmethod
+ def _get_single_and_multi_values_from_multidict(
+ multi_dict: MultiDict,
+ ) -> tuple[dict[str, str], dict[str, list[str]]]:
+ single_values = {}
+ multi_values = defaultdict(list)
+
+ for key, value in multi_dict.items(multi=True):
+ multi_values[key].append(value)
+ # for the single value parameters, AWS only keeps the last value of the list
+ single_values[key] = value
+
+ return single_values, dict(multi_values)
+
+ @staticmethod
+ def _get_invocation_headers(headers: Headers) -> Headers:
+ invocation_headers = Headers()
+ for key, value in headers:
+ if should_drop_header_from_invocation(key):
+ LOG.debug("Dropping header from invocation request: '%s'", key)
+ continue
+ invocation_headers.add(key, value)
+ return invocation_headers
+
+ @staticmethod
+ def create_context_variables(context: RestApiInvocationContext) -> ContextVariables:
+ invocation_request: InvocationRequest = context.invocation_request
+ domain_name = invocation_request["headers"].get("Host", "")
+ domain_prefix = domain_name.split(".")[0]
+ now = datetime.datetime.now()
+
+ context_variables = ContextVariables(
+ accountId=context.account_id,
+ apiId=context.api_id,
+ deploymentId=context.deployment_id,
+ domainName=domain_name,
+ domainPrefix=domain_prefix,
+ extendedRequestId=short_uid(), # TODO: use snapshot tests to verify format
+ httpMethod=invocation_request["http_method"],
+ identity=ContextVarsIdentity(
+ accountId=None,
+ accessKey=None,
+ caller=None,
+ cognitoAuthenticationProvider=None,
+ cognitoAuthenticationType=None,
+ cognitoIdentityId=None,
+ cognitoIdentityPoolId=None,
+ principalOrgId=None,
+ sourceIp="127.0.0.1", # TODO: get the sourceIp from the Request
+ user=None,
+ userAgent=invocation_request["headers"].get("User-Agent"),
+ userArn=None,
+ ),
+ path=f"/{context.stage}{invocation_request['raw_path']}",
+ protocol="HTTP/1.1",
+ requestId=long_uid(),
+ requestTime=timestamp(time=now, format=REQUEST_TIME_DATE_FORMAT),
+ requestTimeEpoch=int(now.timestamp() * 1000),
+ stage=context.stage,
+ )
+ if context.is_canary is not None:
+ context_variables["isCanaryRequest"] = context.is_canary
+
+ return context_variables
+
+ @staticmethod
+ def get_stage_variables(context: RestApiInvocationContext) -> Optional[dict[str, str]]:
+ stage_variables = context.stage_configuration.get("variables")
+ if context.is_canary:
+ overrides = (
+ context.stage_configuration["canarySettings"].get("stageVariableOverrides") or {}
+ )
+ stage_variables = (stage_variables or {}) | overrides
+
+ if not stage_variables:
+ return None
+
+ return stage_variables
+
+ @staticmethod
+ def populate_trace_id(headers: Headers) -> str:
+ incoming_trace = parse_trace_id(headers.get("x-amzn-trace-id", ""))
+ # parse_trace_id always return capitalized keys
+
+ trace = incoming_trace.get("Root", generate_trace_id())
+ incoming_parent = incoming_trace.get("Parent")
+ parent = incoming_parent or generate_trace_parent()
+ sampled = incoming_trace.get("Sampled", "1" if incoming_parent else "0")
+ # TODO: lineage? not sure what it related to
+ return f"Root={trace};Parent={parent};Sampled={sampled}"
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/resource_router.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/resource_router.py
new file mode 100644
index 0000000000000..4dfe6f95dbcbe
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/resource_router.py
@@ -0,0 +1,170 @@
+import logging
+from functools import cache
+from http import HTTPMethod
+from typing import Iterable
+
+from werkzeug.exceptions import MethodNotAllowed, NotFound
+from werkzeug.routing import Map, MapAdapter, Rule
+
+from localstack.aws.api.apigateway import Resource
+from localstack.aws.protocol.routing import (
+ path_param_regex,
+ post_process_arg_name,
+ transform_path_params_to_rule_vars,
+)
+from localstack.http import Response
+from localstack.http.router import GreedyPathConverter
+from localstack.services.apigateway.models import RestApiDeployment
+
+from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain
+from ..context import RestApiInvocationContext
+from ..gateway_response import MissingAuthTokenError
+from ..variables import ContextVariables
+
+LOG = logging.getLogger(__name__)
+
+
+class ApiGatewayMethodRule(Rule):
+ """
+ Small extension to Werkzeug's Rule class which reverts unwanted assumptions made by Werkzeug.
+ Reverted assumptions:
+ - Werkzeug automatically matches HEAD requests to the corresponding GET request (i.e. Werkzeug's rule
+ automatically adds the HEAD HTTP method to a rule which should only match GET requests).
+ Added behavior:
+ - ANY is equivalent to 7 HTTP methods listed. We manually set them to the rule's methods
+ """
+
+ def __init__(self, string: str, method: str, **kwargs) -> None:
+ super().__init__(string=string, methods=[method], **kwargs)
+
+ if method == "ANY":
+ self.methods = {
+ HTTPMethod.DELETE,
+ HTTPMethod.GET,
+ HTTPMethod.HEAD,
+ HTTPMethod.OPTIONS,
+ HTTPMethod.PATCH,
+ HTTPMethod.POST,
+ HTTPMethod.PUT,
+ }
+ else:
+ # Make sure Werkzeug's Rule does not add any other methods
+ # (f.e. the HEAD method even though the rule should only match GET)
+ self.methods = {method.upper()}
+
+
+class RestAPIResourceRouter:
+ """
+ A router implementation which abstracts the routing of incoming REST API Context to a specific
+ resource of the Deployment.
+ """
+
+ _map: Map
+
+ def __init__(self, deployment: RestApiDeployment):
+ self._resources = deployment.rest_api.resources
+ self._map = get_rule_map_for_resources(self._resources.values())
+
+ def match(self, context: RestApiInvocationContext) -> tuple[Resource, dict[str, str]]:
+ """
+ Matches the given request to the resource it targets (or raises an exception if no resource matches).
+
+ :param context:
+ :return: A tuple with the matched resource and the (already parsed) path params
+ :raises: MissingAuthTokenError, weird naming but that is the default NotFound for REST API
+ """
+
+ request = context.request
+ # bind the map to get the actual matcher
+ matcher: MapAdapter = self._map.bind(context.request.host)
+
+ # perform the matching
+ # trailing slashes are ignored in APIGW
+ path = context.invocation_request["path"].rstrip("/")
+ try:
+ rule, args = matcher.match(path, method=request.method, return_rule=True)
+ except (MethodNotAllowed, NotFound) as e:
+ # MethodNotAllowed (405) exception is raised if a path is matching, but the method does not.
+ # Our router might handle this as a 404, validate with AWS.
+ LOG.warning(
+ "API Gateway: No resource or method was found for: %s %s",
+ request.method,
+ path,
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
+ )
+ raise MissingAuthTokenError("Missing Authentication Token") from e
+
+ # post process the arg keys and values
+ # - the path param keys need to be "un-sanitized", i.e. sanitized rule variable names need to be reverted
+ # - the path param values might still be url-encoded
+ args = {post_process_arg_name(k): v for k, v in args.items()}
+
+ # extract the operation model from the rule
+ resource_id: str = rule.endpoint
+ resource = self._resources[resource_id]
+
+ return resource, args
+
+
+class InvocationRequestRouter(RestApiGatewayHandler):
+ def __call__(
+ self,
+ chain: RestApiGatewayHandlerChain,
+ context: RestApiInvocationContext,
+ response: Response,
+ ):
+ self.route_and_enrich(context)
+
+ def route_and_enrich(self, context: RestApiInvocationContext):
+ router = self.get_router_for_deployment(context.deployment)
+
+ resource, path_parameters = router.match(context)
+ resource: Resource
+
+ context.invocation_request["path_parameters"] = path_parameters
+ context.resource = resource
+
+ method = (
+ resource["resourceMethods"].get(context.request.method)
+ or resource["resourceMethods"]["ANY"]
+ )
+ context.resource_method = method
+ context.integration = method["methodIntegration"]
+
+ self.update_context_variables_with_resource(context.context_variables, resource)
+
+ @staticmethod
+ def update_context_variables_with_resource(
+ context_variables: ContextVariables, resource: Resource
+ ):
+ LOG.debug("Updating $context.resourcePath='%s'", resource["path"])
+ context_variables["resourcePath"] = resource["path"]
+ LOG.debug("Updating $context.resourceId='%s'", resource["id"])
+ context_variables["resourceId"] = resource["id"]
+
+ @staticmethod
+ @cache
+ def get_router_for_deployment(deployment: RestApiDeployment) -> RestAPIResourceRouter:
+ return RestAPIResourceRouter(deployment)
+
+
+def get_rule_map_for_resources(resources: Iterable[Resource]) -> Map:
+ rules = []
+ for resource in resources:
+ for method, resource_method in resource.get("resourceMethods", {}).items():
+ path = resource["path"]
+ # translate the requestUri to a Werkzeug rule string
+ rule_string = path_param_regex.sub(transform_path_params_to_rule_vars, path)
+ rules.append(
+ ApiGatewayMethodRule(string=rule_string, method=method, endpoint=resource["id"])
+ ) # type: ignore
+
+ return Map(
+ rules=rules,
+ # don't be strict about trailing slashes when matching
+ strict_slashes=False,
+ # we can't really use werkzeug's merge-slashes since it uses HTTP redirects to solve it
+ merge_slashes=False,
+ # get service-specific converters
+ converters={"path": GreedyPathConverter},
+ )
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/response_enricher.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/response_enricher.py
new file mode 100644
index 0000000000000..8b6308e7e3d2c
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/response_enricher.py
@@ -0,0 +1,30 @@
+from localstack.aws.api.apigateway import IntegrationType
+from localstack.http import Response
+from localstack.services.apigateway.next_gen.execute_api.api import (
+ RestApiGatewayHandler,
+ RestApiGatewayHandlerChain,
+)
+from localstack.services.apigateway.next_gen.execute_api.context import RestApiInvocationContext
+from localstack.utils.strings import short_uid
+
+
+class InvocationResponseEnricher(RestApiGatewayHandler):
+ def __call__(
+ self,
+ chain: RestApiGatewayHandlerChain,
+ context: RestApiInvocationContext,
+ response: Response,
+ ):
+ headers = response.headers
+
+ headers.set("x-amzn-RequestId", context.context_variables["requestId"])
+
+ # Todo, as we go into monitoring, we will want to have these values come from the context?
+ headers.set("x-amz-apigw-id", short_uid() + "=")
+ if (
+ context.integration
+ and context.integration["type"]
+ not in (IntegrationType.HTTP_PROXY, IntegrationType.MOCK)
+ and not context.context_variables.get("error")
+ ):
+ headers.set("X-Amzn-Trace-Id", context.trace_id)
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/header_utils.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/header_utils.py
new file mode 100644
index 0000000000000..1b1fcbfa3f35a
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/header_utils.py
@@ -0,0 +1,56 @@
+import logging
+from collections import defaultdict
+from typing import Iterable
+
+from werkzeug.datastructures.headers import Headers
+
+LOG = logging.getLogger(__name__)
+
+# Headers dropped at the request parsing. They will never make it to the invocation requests.
+# And won't be available for request mapping.
+DROPPED_FROM_REQUEST_COMMON = [
+ "Connection",
+ "Content-Length",
+ "Content-MD5",
+ "Expect",
+ "Max-Forwards",
+ "Proxy-Authenticate",
+ "Server",
+ "TE",
+ "Transfer-Encoding",
+ "Trailer",
+ "Upgrade",
+ "WWW-Authenticate",
+]
+DROPPED_FROM_REQUEST_COMMON_LOWER = [header.lower() for header in DROPPED_FROM_REQUEST_COMMON]
+
+
+def should_drop_header_from_invocation(header: str) -> bool:
+ """These headers are not making it to the invocation requests. Even Proxy integrations are not sending them."""
+ return header.lower() in DROPPED_FROM_REQUEST_COMMON_LOWER
+
+
+def build_multi_value_headers(headers: Headers) -> dict[str, list[str]]:
+ multi_value_headers = defaultdict(list)
+ for key, value in headers:
+ multi_value_headers[key].append(value)
+
+ return multi_value_headers
+
+
+def drop_headers(headers: Headers, to_drop: Iterable[str]):
+ """Will modify the provided headers in-place. Dropping matching headers from the provided list"""
+ dropped_headers = []
+
+ for header in to_drop:
+ if headers.get(header):
+ headers.remove(header)
+ dropped_headers.append(header)
+
+ LOG.debug("Dropping headers: %s", dropped_headers)
+
+
+def set_default_headers(headers: Headers, default_headers: dict[str, str]):
+ for header, value in default_headers.items():
+ if not headers.get(header):
+ headers.set(header, value)
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/helpers.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/helpers.py
new file mode 100644
index 0000000000000..33999b69ea1a9
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/helpers.py
@@ -0,0 +1,183 @@
+import copy
+import logging
+import random
+import re
+import time
+from secrets import token_hex
+from typing import Type, TypedDict
+
+from moto.apigateway.models import RestAPI as MotoRestAPI
+
+from localstack.services.apigateway.models import MergedRestApi, RestApiContainer, RestApiDeployment
+from localstack.utils.aws.arns import get_partition
+
+from .context import RestApiInvocationContext
+from .moto_helpers import get_resources_from_moto_rest_api
+
+LOG = logging.getLogger(__name__)
+
+_stage_variable_pattern = re.compile(r"\${stageVariables\.(?P.*?)}")
+
+
+def freeze_rest_api(
+ account_id: str, region: str, moto_rest_api: MotoRestAPI, localstack_rest_api: RestApiContainer
+) -> RestApiDeployment:
+ """
+ Snapshot a REST API in time to create a deployment
+ This will merge the Moto and LocalStack data into one `MergedRestApi`
+ """
+ moto_resources = get_resources_from_moto_rest_api(moto_rest_api)
+
+ rest_api = MergedRestApi.from_rest_api_container(
+ rest_api_container=localstack_rest_api,
+ resources=moto_resources,
+ )
+
+ return RestApiDeployment(
+ account_id=account_id,
+ region=region,
+ rest_api=copy.deepcopy(rest_api),
+ )
+
+
+def render_uri_with_stage_variables(
+ uri: str | None, stage_variables: dict[str, str] | None
+) -> str | None:
+ """
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/aws-api-gateway-stage-variables-reference.html#stage-variables-in-integration-HTTP-uris
+ URI=https://${stageVariables.}
+ This format is the same as VTL, but we're using a simplified version to only replace `${stageVariables. }`
+ values, as AWS will ignore `${path}` for example
+ """
+ if not uri:
+ return uri
+ stage_vars = stage_variables or {}
+
+ def replace_match(match_obj: re.Match) -> str:
+ return stage_vars.get(match_obj.group("varName"), "")
+
+ return _stage_variable_pattern.sub(replace_match, uri)
+
+
+def render_uri_with_path_parameters(uri: str | None, path_parameters: dict[str, str]) -> str | None:
+ if not uri:
+ return uri
+
+ for key, value in path_parameters.items():
+ uri = uri.replace(f"{{{key}}}", value)
+
+ return uri
+
+
+def render_integration_uri(
+ uri: str | None, path_parameters: dict[str, str], stage_variables: dict[str, str]
+) -> str:
+ """
+ A URI can contain different value to interpolate / render
+ It will have path parameters substitutions with this shape (can also add a querystring).
+ URI=http://myhost.test/rootpath/{path}
+
+ It can also have another format, for stage variables, documented here:
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/aws-api-gateway-stage-variables-reference.html#stage-variables-in-integration-HTTP-uris
+ URI=https://${stageVariables.}
+ This format is the same as VTL.
+
+ :param uri: the integration URI
+ :param path_parameters: the list of path parameters, coming from the parameters mapping and override
+ :param stage_variables: -
+ :return: the rendered URI
+ """
+ if not uri:
+ return ""
+
+ uri_with_path = render_uri_with_path_parameters(uri, path_parameters)
+ return render_uri_with_stage_variables(uri_with_path, stage_variables)
+
+
+def get_source_arn(context: RestApiInvocationContext):
+ method = context.resource_method["httpMethod"]
+ path = context.resource["path"]
+ return (
+ f"arn:{get_partition(context.region)}:execute-api"
+ f":{context.region}"
+ f":{context.account_id}"
+ f":{context.api_id}"
+ f"/{context.stage}/{method}{path}"
+ )
+
+
+def get_lambda_function_arn_from_invocation_uri(uri: str) -> str:
+ """
+ "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1:123456789012:function:SimpleLambda4ProxyResource/invocations",
+ :param uri: the integration URI value for a lambda function
+ :return: the lambda function ARN
+ """
+ return uri.split("functions/")[1].removesuffix("/invocations")
+
+
+def validate_sub_dict_of_typed_dict(typed_dict: Type[TypedDict], obj: dict) -> bool:
+ """
+ Validate that the object is a subset off the keys of a given `TypedDict`.
+ :param typed_dict: the `TypedDict` blueprint
+ :param obj: the object to validate
+ :return: True if it is a subset, False otherwise
+ """
+ typed_dict_keys = {*typed_dict.__required_keys__, *typed_dict.__optional_keys__}
+
+ return not bool(set(obj) - typed_dict_keys)
+
+
+def generate_trace_id():
+ """https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html#xray-api-traceids"""
+ original_request_epoch = int(time.time())
+ timestamp_hex = hex(original_request_epoch)[2:]
+ version_number = "1"
+ unique_id = token_hex(12)
+ return f"{version_number}-{timestamp_hex}-{unique_id}"
+
+
+def generate_trace_parent():
+ return token_hex(8)
+
+
+def parse_trace_id(trace_id: str) -> dict[str, str]:
+ split_trace = trace_id.split(";")
+ trace_values = {}
+ for trace_part in split_trace:
+ key_value = trace_part.split("=")
+ if len(key_value) == 2:
+ trace_values[key_value[0].capitalize()] = key_value[1]
+
+ return trace_values
+
+
+def mime_type_matches_binary_media_types(mime_type: str | None, binary_media_types: list[str]):
+ if not mime_type or not binary_media_types:
+ return False
+
+ mime_type_and_subtype = mime_type.split(",")[0].split(";")[0].split("/")
+ if len(mime_type_and_subtype) != 2:
+ return False
+ mime_type, mime_subtype = mime_type_and_subtype
+
+ for bmt in binary_media_types:
+ type_and_subtype = bmt.split(";")[0].split("/")
+ if len(type_and_subtype) != 2:
+ continue
+ _type, subtype = type_and_subtype
+ if _type == "*":
+ continue
+
+ if subtype == "*" and mime_type == _type:
+ return True
+
+ if mime_type == _type and mime_subtype == subtype:
+ return True
+
+ return False
+
+
+def should_divert_to_canary(percent_traffic: float) -> bool:
+ if int(percent_traffic) == 100:
+ return True
+ return percent_traffic > random.random() * 100
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/__init__.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/__init__.py
new file mode 100644
index 0000000000000..7900965784631
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/__init__.py
@@ -0,0 +1,15 @@
+from .aws import RestApiAwsIntegration, RestApiAwsProxyIntegration
+from .http import RestApiHttpIntegration, RestApiHttpProxyIntegration
+from .mock import RestApiMockIntegration
+
+REST_API_INTEGRATIONS = {
+ RestApiAwsIntegration.name: RestApiAwsIntegration(),
+ RestApiAwsProxyIntegration.name: RestApiAwsProxyIntegration(),
+ RestApiHttpIntegration.name: RestApiHttpIntegration(),
+ RestApiHttpProxyIntegration.name: RestApiHttpProxyIntegration(),
+ RestApiMockIntegration.name: RestApiMockIntegration(),
+}
+
+__all__ = [
+ "REST_API_INTEGRATIONS",
+]
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/aws.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/aws.py
new file mode 100644
index 0000000000000..5e65458ed4ac3
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/aws.py
@@ -0,0 +1,598 @@
+import base64
+import json
+import logging
+from functools import lru_cache
+from http import HTTPMethod
+from typing import Literal, Optional, TypedDict
+from urllib.parse import urlparse
+
+import requests
+from botocore.exceptions import ClientError
+from werkzeug.datastructures import Headers
+
+from localstack import config
+from localstack.aws.connect import (
+ INTERNAL_REQUEST_PARAMS_HEADER,
+ InternalRequestParameters,
+ connect_to,
+ dump_dto,
+)
+from localstack.aws.spec import get_service_catalog
+from localstack.constants import APPLICATION_JSON, INTERNAL_AWS_ACCESS_KEY_ID
+from localstack.utils.aws.arns import extract_region_from_arn
+from localstack.utils.aws.client_types import ServicePrincipal
+from localstack.utils.strings import to_bytes, to_str
+
+from ..context import (
+ EndpointResponse,
+ IntegrationRequest,
+ InvocationRequest,
+ RestApiInvocationContext,
+)
+from ..gateway_response import IntegrationFailureError, InternalServerError
+from ..header_utils import build_multi_value_headers
+from ..helpers import (
+ get_lambda_function_arn_from_invocation_uri,
+ get_source_arn,
+ mime_type_matches_binary_media_types,
+ render_uri_with_stage_variables,
+ validate_sub_dict_of_typed_dict,
+)
+from ..variables import ContextVariables
+from .core import RestApiIntegration
+
+LOG = logging.getLogger(__name__)
+
+NO_BODY_METHODS = {
+ HTTPMethod.OPTIONS,
+ HTTPMethod.GET,
+ HTTPMethod.HEAD,
+}
+
+
+class LambdaProxyResponse(TypedDict, total=False):
+ body: Optional[str]
+ statusCode: Optional[int | str]
+ headers: Optional[dict[str, str]]
+ isBase64Encoded: Optional[bool]
+ multiValueHeaders: Optional[dict[str, list[str]]]
+
+
+class LambdaInputEvent(TypedDict, total=False):
+ body: str
+ isBase64Encoded: bool
+ httpMethod: str | HTTPMethod
+ resource: str
+ path: str
+ headers: dict[str, str]
+ multiValueHeaders: dict[str, list[str]]
+ queryStringParameters: dict[str, str]
+ multiValueQueryStringParameters: dict[str, list[str]]
+ requestContext: ContextVariables
+ pathParameters: dict[str, str]
+ stageVariables: dict[str, str]
+
+
+class ParsedAwsIntegrationUri(TypedDict):
+ service_name: str
+ region_name: str
+ action_type: Literal["path", "action"]
+ path: str
+
+
+@lru_cache(maxsize=64)
+def get_service_factory(region_name: str, role_arn: str):
+ if role_arn:
+ return connect_to.with_assumed_role(
+ role_arn=role_arn,
+ region_name=region_name,
+ service_principal=ServicePrincipal.apigateway,
+ session_name="BackplaneAssumeRoleSession",
+ )
+ else:
+ return connect_to(region_name=region_name)
+
+
+@lru_cache(maxsize=64)
+def get_internal_mocked_headers(
+ service_name: str,
+ region_name: str,
+ source_arn: str,
+ role_arn: str | None,
+) -> dict[str, str]:
+ if role_arn:
+ access_key_id = (
+ connect_to()
+ .sts.request_metadata(service_principal=ServicePrincipal.apigateway)
+ .assume_role(RoleArn=role_arn, RoleSessionName="BackplaneAssumeRoleSession")[
+ "Credentials"
+ ]["AccessKeyId"]
+ )
+ else:
+ access_key_id = INTERNAL_AWS_ACCESS_KEY_ID
+
+ dto = InternalRequestParameters(
+ service_principal=ServicePrincipal.apigateway, source_arn=source_arn
+ )
+ # TODO: maybe use the localstack.utils.aws.client.SigningHttpClient instead of directly mocking the Authorization
+ # header (but will need to select the right signer depending on the service?)
+ headers = {
+ "Authorization": (
+ "AWS4-HMAC-SHA256 "
+ + f"Credential={access_key_id}/20160623/{region_name}/{service_name}/aws4_request, "
+ + "SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=1234"
+ ),
+ INTERNAL_REQUEST_PARAMS_HEADER: dump_dto(dto),
+ }
+
+ return headers
+
+
+@lru_cache(maxsize=64)
+def get_target_prefix_for_service(service_name: str) -> str | None:
+ return get_service_catalog().get(service_name).metadata.get("targetPrefix")
+
+
+class RestApiAwsIntegration(RestApiIntegration):
+ """
+ This is a REST API integration responsible to directly interact with AWS services. It uses the `uri` to
+ map the incoming request to the concerned AWS service, and can have 2 types.
+ - `path`: the request is targeting the direct URI of the AWS service, like you would with an HTTP client
+ example: For S3 GetObject call: arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}
+ - `action`: this is a simpler way, where you can pass the request parameters like you would do with an SDK, and you
+ can specify the service action (for ex. here S3 `GetObject`). It seems the request parameters can be pass as query
+ string parameters, JSON body and maybe more. TODO: verify, 2 documentation pages indicates divergent information.
+ (one indicates parameters through QS, one through request body)
+ example: arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key}
+
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/integration-request-basic-setup.html
+
+
+ TODO: it seems we can global AWS integration type, we should not need to subclass for each service
+ we just need to separate usage between the `path` URI type and the `action` URI type.
+ - `path`, we can simply pass along the full rendered request along with specific `mocked` AWS headers
+ that are dependant of the service (retrieving for the ARN in the uri)
+ - `action`, we might need either a full Boto call or use the Boto request serializer, as it seems the request
+ parameters are expected as parameters
+ """
+
+ name = "AWS"
+
+ # TODO: it seems in AWS, you don't need to manually set the `X-Amz-Target` header when using the `action` type.
+ # for now, we know `events` needs the user to manually add the header, but Kinesis and DynamoDB don't.
+ # Maybe reverse the list to exclude instead of include.
+ SERVICES_AUTO_TARGET = ["dynamodb", "kinesis", "ssm", "stepfunctions"]
+
+ # TODO: some services still target the Query protocol (validated with AWS), even though SSM for example is JSON for
+ # as long as the Boto SDK exists. We will need to emulate the Query protocol and translate it to JSON
+ SERVICES_LEGACY_QUERY_PROTOCOL = ["ssm"]
+
+ SERVICE_MAP = {
+ "states": "stepfunctions",
+ }
+
+ def __init__(self):
+ self._base_domain = config.internal_service_url()
+ self._base_host = ""
+ self._service_names = get_service_catalog().service_names
+
+ def invoke(self, context: RestApiInvocationContext) -> EndpointResponse:
+ integration_req: IntegrationRequest = context.integration_request
+ method = integration_req["http_method"]
+ parsed_uri = self.parse_aws_integration_uri(integration_req["uri"])
+ service_name = parsed_uri["service_name"]
+ integration_region = parsed_uri["region_name"]
+
+ if credentials := context.integration.get("credentials"):
+ credentials = render_uri_with_stage_variables(credentials, context.stage_variables)
+
+ headers = integration_req["headers"]
+ # Some integrations will use a special format for the service in the URI, like AppSync, and so those requests
+ # are not directed to a service directly, so need to add the Authorization header. It would fail parsing
+ # by our service name parser anyway
+ if service_name in self._service_names:
+ headers.update(
+ get_internal_mocked_headers(
+ service_name=service_name,
+ region_name=integration_region,
+ source_arn=get_source_arn(context),
+ role_arn=credentials,
+ )
+ )
+ query_params = integration_req["query_string_parameters"].copy()
+ data = integration_req["body"]
+
+ if parsed_uri["action_type"] == "path":
+ # the Path action type allows you to override the path the request is sent to, like you would send to AWS
+ path = f"/{parsed_uri['path']}"
+ else:
+ # Action passes the `Action` query string parameter
+ path = ""
+ action = parsed_uri["path"]
+
+ if target := self.get_action_service_target(service_name, action):
+ headers["X-Amz-Target"] = target
+
+ query_params["Action"] = action
+
+ if service_name in self.SERVICES_LEGACY_QUERY_PROTOCOL:
+ # this has been tested in AWS: for `ssm`, it fully overrides the body because SSM uses the Query
+ # protocol, so we simulate it that way
+ data = self.get_payload_from_query_string(query_params)
+
+ url = f"{self._base_domain}{path}"
+ headers["Host"] = self.get_internal_host_for_service(
+ service_name=service_name, region_name=integration_region
+ )
+
+ request_parameters = {
+ "method": method,
+ "url": url,
+ "params": query_params,
+ "headers": headers,
+ }
+
+ if method not in NO_BODY_METHODS:
+ request_parameters["data"] = data
+
+ request_response = requests.request(**request_parameters)
+ response_content = request_response.content
+
+ if (
+ parsed_uri["action_type"] == "action"
+ and service_name in self.SERVICES_LEGACY_QUERY_PROTOCOL
+ ):
+ response_content = self.format_response_content_legacy(
+ payload=response_content,
+ service_name=service_name,
+ action=parsed_uri["path"],
+ request_id=context.context_variables["requestId"],
+ )
+
+ return EndpointResponse(
+ body=response_content,
+ status_code=request_response.status_code,
+ headers=Headers(dict(request_response.headers)),
+ )
+
+ def parse_aws_integration_uri(self, uri: str) -> ParsedAwsIntegrationUri:
+ """
+ The URI can be of 2 shapes: Path or Action.
+ Path : arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}
+ Action: arn:aws:apigateway:us-east-1:kinesis:action/PutRecord
+ :param uri: the URI of the AWS integration
+ :return: a ParsedAwsIntegrationUri containing the service name, the region and the type of action
+ """
+ arn, _, path = uri.partition("/")
+ split_arn = arn.split(":", maxsplit=5)
+ *_, region_name, service_name, action_type = split_arn
+ boto_service_name = self.SERVICE_MAP.get(service_name, service_name)
+ return ParsedAwsIntegrationUri(
+ region_name=region_name,
+ service_name=boto_service_name,
+ action_type=action_type,
+ path=path,
+ )
+
+ def get_action_service_target(self, service_name: str, action: str) -> str | None:
+ if service_name not in self.SERVICES_AUTO_TARGET:
+ return None
+
+ target_prefix = get_target_prefix_for_service(service_name)
+ if not target_prefix:
+ return None
+
+ return f"{target_prefix}.{action}"
+
+ def get_internal_host_for_service(self, service_name: str, region_name: str):
+ url = self._base_domain
+ if service_name == "sqs":
+ # This follow the new SQS_ENDPOINT_STRATEGY=standard
+ url = config.external_service_url(subdomains=f"sqs.{region_name}")
+ elif "-api" in service_name:
+ # this could be an `.-api`, used by some services
+ url = config.external_service_url(subdomains=service_name)
+
+ return urlparse(url).netloc
+
+ @staticmethod
+ def get_payload_from_query_string(query_string_parameters: dict) -> str:
+ return json.dumps(query_string_parameters)
+
+ @staticmethod
+ def format_response_content_legacy(
+ service_name: str, action: str, payload: bytes, request_id: str
+ ) -> bytes:
+ # TODO: not sure how much we need to support this, this supports SSM for now, once we write more tests for
+ # `action` type, see if we can generalize more
+ data = json.loads(payload)
+ try:
+ # we try to populate the missing fields from the OperationModel of the operation
+ operation_model = get_service_catalog().get(service_name).operation_model(action)
+ for key in operation_model.output_shape.members:
+ if key not in data:
+ data[key] = None
+
+ except Exception:
+ # the operation above is only for parity reason, skips if it fails
+ pass
+
+ wrapped = {
+ f"{action}Response": {
+ f"{action}Result": data,
+ "ResponseMetadata": {
+ "RequestId": request_id,
+ },
+ }
+ }
+ return to_bytes(json.dumps(wrapped))
+
+
+class RestApiAwsProxyIntegration(RestApiIntegration):
+ """
+ This is a custom, simplified REST API integration focused only on the Lambda service, with minimal modification from
+ API Gateway. It passes the incoming request almost as is, in a custom created event payload, to the configured
+ Lambda function.
+
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
+ """
+
+ name = "AWS_PROXY"
+
+ def invoke(self, context: RestApiInvocationContext) -> EndpointResponse:
+ integration_req: IntegrationRequest = context.integration_request
+ method = integration_req["http_method"]
+
+ if method != HTTPMethod.POST:
+ LOG.warning(
+ "The 'AWS_PROXY' integration can only be used with the POST integration method.",
+ )
+ raise IntegrationFailureError("Internal server error")
+
+ input_event = self.create_lambda_input_event(context)
+
+ # TODO: verify stage variables rendering in AWS_PROXY
+ integration_uri = integration_req["uri"]
+
+ function_arn = get_lambda_function_arn_from_invocation_uri(integration_uri)
+ source_arn = get_source_arn(context)
+
+ # TODO: write test for credentials rendering
+ if credentials := context.integration.get("credentials"):
+ credentials = render_uri_with_stage_variables(credentials, context.stage_variables)
+
+ try:
+ lambda_payload = self.call_lambda(
+ function_arn=function_arn,
+ event=to_bytes(json.dumps(input_event)),
+ source_arn=source_arn,
+ credentials=credentials,
+ )
+
+ except ClientError as e:
+ LOG.warning(
+ "Exception during integration invocation: '%s'",
+ e,
+ )
+ status_code = 502
+ if e.response["Error"]["Code"] == "AccessDeniedException":
+ status_code = 500
+ raise IntegrationFailureError("Internal server error", status_code=status_code) from e
+
+ except Exception as e:
+ LOG.warning(
+ "Unexpected exception during integration invocation: '%s'",
+ e,
+ )
+ raise IntegrationFailureError("Internal server error", status_code=502) from e
+
+ lambda_response = self.parse_lambda_response(lambda_payload)
+
+ headers = Headers({"Content-Type": APPLICATION_JSON})
+
+ response_headers = self._merge_lambda_response_headers(lambda_response)
+ headers.update(response_headers)
+
+ # TODO: maybe centralize this flag inside the context, when we are also using it for other integration types
+ # AWS_PROXY behaves a bit differently, but this could checked only once earlier
+ binary_response_accepted = mime_type_matches_binary_media_types(
+ mime_type=context.invocation_request["headers"].get("Accept"),
+ binary_media_types=context.deployment.rest_api.rest_api.get("binaryMediaTypes", []),
+ )
+ body = self._parse_body(
+ body=lambda_response.get("body"),
+ is_base64_encoded=binary_response_accepted and lambda_response.get("isBase64Encoded"),
+ )
+
+ return EndpointResponse(
+ headers=headers,
+ body=body,
+ status_code=int(lambda_response.get("statusCode") or 200),
+ )
+
+ @staticmethod
+ def call_lambda(
+ function_arn: str,
+ event: bytes,
+ source_arn: str,
+ credentials: str = None,
+ ) -> bytes:
+ lambda_client = get_service_factory(
+ region_name=extract_region_from_arn(function_arn),
+ role_arn=credentials,
+ ).lambda_
+ inv_result = lambda_client.request_metadata(
+ service_principal=ServicePrincipal.apigateway,
+ source_arn=source_arn,
+ ).invoke(
+ FunctionName=function_arn,
+ Payload=event,
+ InvocationType="RequestResponse",
+ )
+ if payload := inv_result.get("Payload"):
+ return payload.read()
+ return b""
+
+ def parse_lambda_response(self, payload: bytes) -> LambdaProxyResponse:
+ try:
+ lambda_response = json.loads(payload)
+ except json.JSONDecodeError:
+ LOG.warning(
+ 'Lambda output should follow the next JSON format: { "isBase64Encoded": true|false, "statusCode": httpStatusCode, "headers": { "headerName": "headerValue", ... },"body": "..."} but was: %s',
+ payload,
+ )
+ LOG.debug(
+ "Execution failed due to configuration error: Malformed Lambda proxy response"
+ )
+ raise InternalServerError("Internal server error", status_code=502)
+
+ # none of the lambda response fields are mandatory, but you cannot return any other fields
+ if not self._is_lambda_response_valid(lambda_response):
+ if "errorMessage" in lambda_response:
+ LOG.debug(
+ "Lambda execution failed with status 200 due to customer function error: %s. Lambda request id: %s",
+ lambda_response["errorMessage"],
+ lambda_response.get("requestId", ""),
+ )
+ else:
+ LOG.warning(
+ 'Lambda output should follow the next JSON format: { "isBase64Encoded": true|false, "statusCode": httpStatusCode, "headers": { "headerName": "headerValue", ... },"body": "..."} but was: %s',
+ payload,
+ )
+ LOG.debug(
+ "Execution failed due to configuration error: Malformed Lambda proxy response"
+ )
+ raise InternalServerError("Internal server error", status_code=502)
+
+ def serialize_header(value: bool | str) -> str:
+ if isinstance(value, bool):
+ return "true" if value else "false"
+ return value
+
+ if headers := lambda_response.get("headers"):
+ lambda_response["headers"] = {k: serialize_header(v) for k, v in headers.items()}
+
+ if multi_value_headers := lambda_response.get("multiValueHeaders"):
+ lambda_response["multiValueHeaders"] = {
+ k: [serialize_header(v) for v in values]
+ for k, values in multi_value_headers.items()
+ }
+
+ return lambda_response
+
+ @staticmethod
+ def _is_lambda_response_valid(lambda_response: dict) -> bool:
+ if not isinstance(lambda_response, dict):
+ return False
+
+ if not validate_sub_dict_of_typed_dict(LambdaProxyResponse, lambda_response):
+ return False
+
+ if (headers := lambda_response.get("headers")) is not None:
+ if not isinstance(headers, dict):
+ return False
+ if any(not isinstance(header_value, (str, bool)) for header_value in headers.values()):
+ return False
+
+ if (multi_value_headers := lambda_response.get("multiValueHeaders")) is not None:
+ if not isinstance(multi_value_headers, dict):
+ return False
+ if any(
+ not isinstance(header_value, list) for header_value in multi_value_headers.values()
+ ):
+ return False
+
+ if "statusCode" in lambda_response:
+ try:
+ int(lambda_response["statusCode"])
+ except ValueError:
+ return False
+
+ # TODO: add more validations of the values' type
+ return True
+
+ def create_lambda_input_event(self, context: RestApiInvocationContext) -> LambdaInputEvent:
+ # https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
+ # for building the Lambda Payload, we need access to the Invocation Request, as some data is not available in
+ # the integration request and does not make sense for it
+ invocation_req: InvocationRequest = context.invocation_request
+ integration_req: IntegrationRequest = context.integration_request
+
+ body, is_b64_encoded = self._format_body(integration_req["body"])
+
+ if context.base_path:
+ path = context.context_variables["path"]
+ else:
+ path = invocation_req["path"]
+
+ input_event = LambdaInputEvent(
+ headers=self._format_headers(dict(integration_req["headers"])),
+ multiValueHeaders=self._format_headers(
+ build_multi_value_headers(integration_req["headers"])
+ ),
+ body=body or None,
+ isBase64Encoded=is_b64_encoded,
+ requestContext=context.context_variables,
+ stageVariables=context.stage_variables,
+ # still using the InvocationRequest query string parameters as the logic is the same, maybe refactor?
+ queryStringParameters=invocation_req["query_string_parameters"] or None,
+ multiValueQueryStringParameters=invocation_req["multi_value_query_string_parameters"]
+ or None,
+ pathParameters=invocation_req["path_parameters"] or None,
+ httpMethod=invocation_req["http_method"],
+ path=path,
+ resource=context.resource["path"],
+ )
+
+ return input_event
+
+ @staticmethod
+ def _format_headers(headers: dict[str, str | list[str]]) -> dict[str, str | list[str]]:
+ # Some headers get capitalized like in CloudFront, see
+ # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/add-origin-custom-headers.html#add-origin-custom-headers-forward-authorization
+ # It seems AWS_PROXY lambda integrations are behind CloudFront, as seen by the returned headers in AWS
+ to_capitalize: list[str] = ["authorization", "user-agent"] # some headers get capitalized
+ to_filter: list[str] = ["content-length", "connection"]
+ headers = {
+ k.title() if k.lower() in to_capitalize else k: v
+ for k, v in headers.items()
+ if k.lower() not in to_filter
+ }
+
+ return headers
+
+ @staticmethod
+ def _format_body(body: bytes) -> tuple[str, bool]:
+ try:
+ return body.decode("utf-8"), False
+ except UnicodeDecodeError:
+ return to_str(base64.b64encode(body)), True
+
+ @staticmethod
+ def _parse_body(body: str | None, is_base64_encoded: bool) -> bytes:
+ if not body:
+ return b""
+
+ if is_base64_encoded:
+ try:
+ return base64.b64decode(body)
+ except Exception:
+ raise InternalServerError("Internal server error", status_code=500)
+
+ return to_bytes(body)
+
+ @staticmethod
+ def _merge_lambda_response_headers(lambda_response: LambdaProxyResponse) -> dict:
+ headers = lambda_response.get("headers") or {}
+
+ if multi_value_headers := lambda_response.get("multiValueHeaders"):
+ # multiValueHeaders has the priority and will decide the casing of the final headers, as they are merged
+ headers_low_keys = {k.lower(): v for k, v in headers.items()}
+
+ for k, values in multi_value_headers.items():
+ if (k_lower := k.lower()) in headers_low_keys:
+ headers[k] = [*values, headers_low_keys[k_lower]]
+ else:
+ headers[k] = values
+
+ return headers
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/core.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/core.py
new file mode 100644
index 0000000000000..c65b1a9539d7f
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/core.py
@@ -0,0 +1,19 @@
+from abc import abstractmethod
+
+from ..api import RestApiInvocationContext
+from ..context import EndpointResponse
+
+
+class RestApiIntegration:
+ """
+ This REST API Integration exposes an API to invoke the specific Integration with a common interface.
+
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-integration-settings.html
+ TODO: Add more abstractmethods when starting to work on the Integration handler
+ """
+
+ name: str
+
+ @abstractmethod
+ def invoke(self, context: RestApiInvocationContext) -> EndpointResponse:
+ pass
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/http.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/http.py
new file mode 100644
index 0000000000000..fa0511072c9d1
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/http.py
@@ -0,0 +1,147 @@
+import logging
+from http import HTTPMethod
+from typing import Optional, TypedDict
+
+import requests
+from werkzeug.datastructures import Headers
+
+from localstack.aws.api.apigateway import Integration
+
+from ..context import EndpointResponse, IntegrationRequest, RestApiInvocationContext
+from ..gateway_response import ApiConfigurationError, IntegrationFailureError
+from ..header_utils import build_multi_value_headers
+from .core import RestApiIntegration
+
+LOG = logging.getLogger(__name__)
+
+NO_BODY_METHODS = {HTTPMethod.OPTIONS, HTTPMethod.GET, HTTPMethod.HEAD}
+
+
+class SimpleHttpRequest(TypedDict, total=False):
+ method: HTTPMethod | str
+ url: str
+ params: Optional[dict[str, str | list[str]]]
+ data: bytes
+ headers: Optional[dict[str, str]]
+ cookies: Optional[dict[str, str]]
+ timeout: Optional[int]
+ allow_redirects: Optional[bool]
+ stream: Optional[bool]
+ verify: Optional[bool]
+ # TODO: check if there was a situation where we'd pass certs?
+ cert: Optional[str | tuple[str, str]]
+
+
+class BaseRestApiHttpIntegration(RestApiIntegration):
+ @staticmethod
+ def _get_integration_timeout(integration: Integration) -> float:
+ return int(integration.get("timeoutInMillis", 29000)) / 1000
+
+
+class RestApiHttpIntegration(BaseRestApiHttpIntegration):
+ """
+ This is a REST API integration responsible to send a request to another HTTP API.
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/setup-http-integrations.html#api-gateway-set-up-http-proxy-integration-on-proxy-resource
+ """
+
+ name = "HTTP"
+
+ def invoke(self, context: RestApiInvocationContext) -> EndpointResponse:
+ integration_req: IntegrationRequest = context.integration_request
+ method = integration_req["http_method"]
+ uri = integration_req["uri"]
+
+ request_parameters: SimpleHttpRequest = {
+ "method": method,
+ "url": uri,
+ "params": integration_req["query_string_parameters"],
+ "headers": integration_req["headers"],
+ }
+
+ if method not in NO_BODY_METHODS:
+ request_parameters["data"] = integration_req["body"]
+
+ # TODO: configurable timeout (29 by default) (check type and default value in provider)
+ # integration: Integration = context.resource_method["methodIntegration"]
+ # request_parameters["timeout"] = self._get_integration_timeout(integration)
+ # TODO: check for redirects
+ # request_parameters["allow_redirects"] = False
+ try:
+ request_response = requests.request(**request_parameters)
+
+ except (requests.exceptions.InvalidURL, requests.exceptions.InvalidSchema) as e:
+ LOG.warning("Execution failed due to configuration error: Invalid endpoint address")
+ LOG.debug("The URI specified for the HTTP/HTTP_PROXY integration is invalid: %s", uri)
+ raise ApiConfigurationError("Internal server error") from e
+
+ except (requests.exceptions.Timeout, requests.exceptions.SSLError) as e:
+ # TODO make the exception catching more fine grained
+ # this can be reproduced in AWS if you try to hit an HTTP endpoint which is HTTPS only like lambda URL
+ LOG.warning("Execution failed due to a network error communicating with endpoint")
+ raise IntegrationFailureError("Network error communicating with endpoint") from e
+
+ except requests.exceptions.ConnectionError as e:
+ raise ApiConfigurationError("Internal server error") from e
+
+ return EndpointResponse(
+ body=request_response.content,
+ status_code=request_response.status_code,
+ headers=Headers(dict(request_response.headers)),
+ )
+
+
+class RestApiHttpProxyIntegration(BaseRestApiHttpIntegration):
+ """
+ This is a simplified REST API integration responsible to send a request to another HTTP API by proxying it almost
+ directly.
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/setup-http-integrations.html#api-gateway-set-up-http-proxy-integration-on-proxy-resource
+ """
+
+ name = "HTTP_PROXY"
+
+ def invoke(self, context: RestApiInvocationContext) -> EndpointResponse:
+ integration_req: IntegrationRequest = context.integration_request
+ method = integration_req["http_method"]
+ uri = integration_req["uri"]
+
+ multi_value_headers = build_multi_value_headers(integration_req["headers"])
+ request_headers = {key: ",".join(value) for key, value in multi_value_headers.items()}
+
+ request_parameters: SimpleHttpRequest = {
+ "method": method,
+ "url": uri,
+ "params": integration_req["query_string_parameters"],
+ "headers": request_headers,
+ }
+
+ # TODO: validate this for HTTP_PROXY
+ if method not in NO_BODY_METHODS:
+ request_parameters["data"] = integration_req["body"]
+
+ # TODO: configurable timeout (29 by default) (check type and default value in provider)
+ # integration: Integration = context.resource_method["methodIntegration"]
+ # request_parameters["timeout"] = self._get_integration_timeout(integration)
+ try:
+ request_response = requests.request(**request_parameters)
+
+ except (requests.exceptions.InvalidURL, requests.exceptions.InvalidSchema) as e:
+ LOG.warning("Execution failed due to configuration error: Invalid endpoint address")
+ LOG.debug("The URI specified for the HTTP/HTTP_PROXY integration is invalid: %s", uri)
+ raise ApiConfigurationError("Internal server error") from e
+
+ except (requests.exceptions.Timeout, requests.exceptions.SSLError):
+ # TODO make the exception catching more fine grained
+ # this can be reproduced in AWS if you try to hit an HTTP endpoint which is HTTPS only like lambda URL
+ LOG.warning("Execution failed due to a network error communicating with endpoint")
+ raise IntegrationFailureError("Network error communicating with endpoint")
+
+ except requests.exceptions.ConnectionError:
+ raise ApiConfigurationError("Internal server error")
+
+ response_headers = Headers(dict(request_response.headers))
+
+ return EndpointResponse(
+ body=request_response.content,
+ status_code=request_response.status_code,
+ headers=response_headers,
+ )
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/mock.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/mock.py
new file mode 100644
index 0000000000000..84ddecc05862e
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/mock.py
@@ -0,0 +1,108 @@
+import json
+import logging
+import re
+from json import JSONDecodeError
+
+from werkzeug.datastructures import Headers
+
+from localstack.utils.strings import to_str
+
+from ..context import EndpointResponse, IntegrationRequest, RestApiInvocationContext
+from ..gateway_response import InternalServerError
+from .core import RestApiIntegration
+
+LOG = logging.getLogger(__name__)
+
+
+class RestApiMockIntegration(RestApiIntegration):
+ """
+ This is a simple REST API integration but quite limited, allowing you to quickly test your APIs or return
+ hardcoded responses to the client.
+ This integration can never return a proper response, and all the work is done with integration request and response
+ mappings.
+ This can be used to set up CORS response for `OPTIONS` requests.
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-mock-integration.html
+ """
+
+ name = "MOCK"
+
+ def invoke(self, context: RestApiInvocationContext) -> EndpointResponse:
+ integration_req: IntegrationRequest = context.integration_request
+
+ status_code = self.get_status_code(integration_req)
+
+ if status_code is None:
+ LOG.debug(
+ "Execution failed due to configuration error: Unable to parse statusCode. "
+ "It should be an integer that is defined in the request template."
+ )
+ raise InternalServerError("Internal server error")
+
+ return EndpointResponse(status_code=status_code, body=b"", headers=Headers())
+
+ def get_status_code(self, integration_req: IntegrationRequest) -> int | None:
+ try:
+ body = json.loads(integration_req["body"])
+ except JSONDecodeError as e:
+ LOG.debug(
+ "Exception while JSON parsing integration request body: %s"
+ "Falling back to custom parser",
+ e,
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
+ )
+ body = self.parse_invalid_json(to_str(integration_req["body"]))
+
+ status_code = body.get("statusCode")
+ if not isinstance(status_code, int):
+ return
+
+ return status_code
+
+ def parse_invalid_json(self, body: str) -> dict:
+ """This is a quick fix to unblock cdk users setting cors policy for rest apis.
+ CDK creates a MOCK OPTIONS route with in valid json. `{statusCode: 200}`
+ Aws probably has a custom token parser. We can implement one
+ at some point if we have user requests for it"""
+
+ def convert_null_value(value) -> str:
+ if (value := value.strip()) in ("null", ""):
+ return '""'
+ return value
+
+ try:
+ statuscode = ""
+ matched = re.match(r"^\s*{(.+)}\s*$", body).group(1)
+ pairs = [m.strip() for m in matched.split(",")]
+ # TODO this is not right, but nested object would otherwise break the parsing
+ key_values = [s.split(":", maxsplit=1) for s in pairs if s]
+ for key_value in key_values:
+ assert len(key_value) == 2
+ key, value = [convert_null_value(el) for el in key_value]
+
+ if key in ("statusCode", "'statusCode'", '"statusCode"'):
+ statuscode = int(value)
+ continue
+
+ assert (leading_key_char := key[0]) not in "[{"
+ if leading_key_char in "'\"":
+ assert len(key) >= 2
+ assert key[-1] == leading_key_char
+
+ if (leading_value_char := value[0]) in "[{'\"":
+ assert len(value) >= 2
+ if leading_value_char == "{":
+ # TODO reparse objects
+ assert value[-1] == "}"
+ elif leading_value_char == "[":
+ # TODO validate arrays
+ assert value[-1] == "]"
+ else:
+ assert value[-1] == leading_value_char
+
+ return {"statusCode": statuscode}
+
+ except Exception as e:
+ LOG.debug(
+ "Error Parsing an invalid json, %s", e, exc_info=LOG.isEnabledFor(logging.DEBUG)
+ )
+ return {"statusCode": ""}
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/moto_helpers.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/moto_helpers.py
new file mode 100644
index 0000000000000..d54b25b560759
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/moto_helpers.py
@@ -0,0 +1,82 @@
+from moto.apigateway.models import APIGatewayBackend, apigateway_backends
+from moto.apigateway.models import RestAPI as MotoRestAPI
+
+from localstack.aws.api.apigateway import (
+ ApiKey,
+ ListOfUsagePlan,
+ ListOfUsagePlanKey,
+ Resource,
+ Stage,
+)
+
+
+def get_resources_from_moto_rest_api(moto_rest_api: MotoRestAPI) -> dict[str, Resource]:
+ """
+ This returns the `Resources` from a Moto REST API
+ This allows to decouple the underlying split of resources between Moto and LocalStack, and always return the right
+ format.
+ """
+ moto_resources = moto_rest_api.resources
+
+ resources: dict[str, Resource] = {}
+ for moto_resource in moto_resources.values():
+ resource = Resource(
+ id=moto_resource.id,
+ parentId=moto_resource.parent_id,
+ pathPart=moto_resource.path_part,
+ path=moto_resource.get_path(),
+ resourceMethods={
+ # TODO: check if resource_methods.to_json() returns everything we need/want
+ k: v.to_json()
+ for k, v in moto_resource.resource_methods.items()
+ },
+ )
+
+ resources[moto_resource.id] = resource
+
+ return resources
+
+
+def get_stage_variables(
+ account_id: str, region: str, api_id: str, stage_name: str
+) -> dict[str, str]:
+ apigateway_backend: APIGatewayBackend = apigateway_backends[account_id][region]
+ moto_rest_api = apigateway_backend.get_rest_api(api_id)
+ stage = moto_rest_api.stages[stage_name]
+ return stage.variables
+
+
+def get_stage_configuration(account_id: str, region: str, api_id: str, stage_name: str) -> Stage:
+ apigateway_backend: APIGatewayBackend = apigateway_backends[account_id][region]
+ moto_rest_api = apigateway_backend.get_rest_api(api_id)
+ stage = moto_rest_api.stages[stage_name]
+ return stage.to_json()
+
+
+def get_usage_plans(account_id: str, region_name: str) -> ListOfUsagePlan:
+ """
+ Will return a list of usage plans from the moto store.
+ """
+ apigateway_backend: APIGatewayBackend = apigateway_backends[account_id][region_name]
+ return [usage_plan.to_json() for usage_plan in apigateway_backend.usage_plans.values()]
+
+
+def get_api_key(api_key_id: str, account_id: str, region_name: str) -> ApiKey:
+ """
+ Will return an api key from the moto store.
+ """
+ apigateway_backend: APIGatewayBackend = apigateway_backends[account_id][region_name]
+ return apigateway_backend.keys[api_key_id].to_json()
+
+
+def get_usage_plan_keys(
+ usage_plan_id: str, account_id: str, region_name: str
+) -> ListOfUsagePlanKey:
+ """
+ Will return a list of usage plan keys from the moto store.
+ """
+ apigateway_backend: APIGatewayBackend = apigateway_backends[account_id][region_name]
+ return [
+ usage_plan_key.to_json()
+ for usage_plan_key in apigateway_backend.usage_plan_keys.get(usage_plan_id, {}).values()
+ ]
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/parameters_mapping.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/parameters_mapping.py
new file mode 100644
index 0000000000000..bb723e58ea4ef
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/parameters_mapping.py
@@ -0,0 +1,298 @@
+# > This section explains how to set up data mappings from an API's method request data, including other data
+# stored in context, stage, or util variables, to the corresponding integration request parameters and from an
+# integration response data, including the other data, to the method response parameters. The method request
+# data includes request parameters (path, query string, headers) and the body. The integration response data
+# includes response parameters (headers) and the body. For more information about using the stage variables,
+# see API Gateway stage variables reference.
+#
+# https://docs.aws.amazon.com/apigateway/latest/developerguide/request-response-data-mappings.html
+import json
+import logging
+from typing import Any, TypedDict
+
+from localstack.utils.json import extract_jsonpath
+from localstack.utils.strings import to_str
+
+from .context import EndpointResponse, InvocationRequest
+from .gateway_response import BadRequestException, InternalFailureException
+from .header_utils import build_multi_value_headers
+from .variables import ContextVariables
+
+LOG = logging.getLogger(__name__)
+
+
+class RequestDataMapping(TypedDict):
+ # Integration request parameters, in the form of path variables, query strings or headers, can be mapped from any
+ # defined method request parameters and the payload.
+ header: dict[str, str]
+ path: dict[str, str]
+ querystring: dict[str, str | list[str]]
+
+
+class ResponseDataMapping(TypedDict):
+ # Method response header parameters can be mapped from any integration response header or integration response body,
+ # $context variables, or static values.
+ header: dict[str, str]
+
+
+class ParametersMapper:
+ def map_integration_request(
+ self,
+ request_parameters: dict[str, str],
+ invocation_request: InvocationRequest,
+ context_variables: ContextVariables,
+ stage_variables: dict[str, str],
+ ) -> RequestDataMapping:
+ request_data_mapping = RequestDataMapping(
+ header={},
+ path={},
+ querystring={},
+ )
+ # storing the case-sensitive headers once, the mapping is strict
+ case_sensitive_headers = build_multi_value_headers(invocation_request["headers"])
+
+ for integration_mapping, request_mapping in request_parameters.items():
+ # TODO: remove this once the validation has been added to the provider, to avoid breaking
+ if not isinstance(integration_mapping, str) or not isinstance(request_mapping, str):
+ LOG.warning(
+ "Wrong parameter mapping value type: %s: %s. They should both be string. Skipping this mapping.",
+ integration_mapping,
+ request_mapping,
+ )
+ continue
+
+ integration_param_location, param_name = integration_mapping.removeprefix(
+ "integration.request."
+ ).split(".")
+
+ if request_mapping.startswith("method.request."):
+ method_req_expr = request_mapping.removeprefix("method.request.")
+ value = self._retrieve_parameter_from_invocation_request(
+ method_req_expr, invocation_request, case_sensitive_headers
+ )
+
+ else:
+ value = self._retrieve_parameter_from_variables_and_static(
+ mapping_value=request_mapping,
+ context_variables=context_variables,
+ stage_variables=stage_variables,
+ )
+
+ if value:
+ request_data_mapping[integration_param_location][param_name] = value
+
+ return request_data_mapping
+
+ def map_integration_response(
+ self,
+ response_parameters: dict[str, str],
+ integration_response: EndpointResponse,
+ context_variables: ContextVariables,
+ stage_variables: dict[str, str],
+ ) -> ResponseDataMapping:
+ response_data_mapping = ResponseDataMapping(header={})
+
+ # storing the case-sensitive headers once, the mapping is strict
+ case_sensitive_headers = build_multi_value_headers(integration_response["headers"])
+
+ for response_mapping, integration_mapping in response_parameters.items():
+ header_name = response_mapping.removeprefix("method.response.header.")
+
+ if integration_mapping.startswith("integration.response."):
+ method_req_expr = integration_mapping.removeprefix("integration.response.")
+ value = self._retrieve_parameter_from_integration_response(
+ method_req_expr, integration_response, case_sensitive_headers
+ )
+ else:
+ value = self._retrieve_parameter_from_variables_and_static(
+ mapping_value=integration_mapping,
+ context_variables=context_variables,
+ stage_variables=stage_variables,
+ )
+
+ if value:
+ response_data_mapping["header"][header_name] = value
+
+ return response_data_mapping
+
+ def _retrieve_parameter_from_variables_and_static(
+ self,
+ mapping_value: str,
+ context_variables: dict[str, Any],
+ stage_variables: dict[str, str],
+ ) -> str | None:
+ if mapping_value.startswith("context."):
+ context_var_expr = mapping_value.removeprefix("context.")
+ return self._retrieve_parameter_from_context_variables(
+ context_var_expr, context_variables
+ )
+
+ elif mapping_value.startswith("stageVariables."):
+ stage_var_name = mapping_value.removeprefix("stageVariables.")
+ return self._retrieve_parameter_from_stage_variables(stage_var_name, stage_variables)
+
+ elif mapping_value.startswith("'") and mapping_value.endswith("'"):
+ return mapping_value.strip("'")
+
+ else:
+ LOG.warning(
+ "Unrecognized parameter mapping value: '%s'. Skipping this mapping.",
+ mapping_value,
+ )
+ return None
+
+ def _retrieve_parameter_from_integration_response(
+ self,
+ expr: str,
+ integration_response: EndpointResponse,
+ case_sensitive_headers: dict[str, list[str]],
+ ) -> str | None:
+ """
+ See https://docs.aws.amazon.com/apigateway/latest/developerguide/request-response-data-mappings.html#mapping-response-parameters
+ :param expr: mapping expression stripped from `integration.response.`:
+ Can be of the following: `header.`, multivalueheader., `body` and
+ `body..`
+ :param integration_response: the Response to map parameters from
+ :return: the value to map in the ResponseDataMapping
+ """
+ if expr.startswith("body"):
+ body = integration_response.get("body") or b"{}"
+ body = body.strip()
+ try:
+ decoded_body = self._json_load(body)
+ except ValueError:
+ raise InternalFailureException(message="Internal server error")
+
+ if expr == "body":
+ return to_str(body)
+
+ elif expr.startswith("body."):
+ json_path = expr.removeprefix("body.")
+ return self._get_json_path_from_dict(decoded_body, json_path)
+ else:
+ LOG.warning(
+ "Unrecognized integration.response parameter: '%s'. Skipping the parameter mapping.",
+ expr,
+ )
+ return None
+
+ param_type, param_name = expr.split(".")
+
+ if param_type == "header":
+ if header := case_sensitive_headers.get(param_name):
+ return header[-1]
+
+ elif param_type == "multivalueheader":
+ if header := case_sensitive_headers.get(param_name):
+ return ",".join(header)
+
+ else:
+ LOG.warning(
+ "Unrecognized integration.response parameter: '%s'. Skipping the parameter mapping.",
+ expr,
+ )
+
+ def _retrieve_parameter_from_invocation_request(
+ self,
+ expr: str,
+ invocation_request: InvocationRequest,
+ case_sensitive_headers: dict[str, list[str]],
+ ) -> str | list[str] | None:
+ """
+ See https://docs.aws.amazon.com/apigateway/latest/developerguide/request-response-data-mappings.html#mapping-response-parameters
+ :param expr: mapping expression stripped from `method.request.`:
+ Can be of the following: `path.`, `querystring.`,
+ `multivaluequerystring.`, `header.`, `multivalueheader.`,
+ `body` and `body..`
+ :param invocation_request: the InvocationRequest to map parameters from
+ :return: the value to map in the RequestDataMapping
+ """
+ if expr.startswith("body"):
+ body = invocation_request["body"] or b"{}"
+ body = body.strip()
+ try:
+ decoded_body = self._json_load(body)
+ except ValueError:
+ raise BadRequestException(message="Invalid JSON in request body")
+
+ if expr == "body":
+ return to_str(body)
+
+ elif expr.startswith("body."):
+ json_path = expr.removeprefix("body.")
+ return self._get_json_path_from_dict(decoded_body, json_path)
+ else:
+ LOG.warning(
+ "Unrecognized method.request parameter: '%s'. Skipping the parameter mapping.",
+ expr,
+ )
+ return None
+
+ param_type, param_name = expr.split(".")
+ if param_type == "path":
+ return invocation_request["path_parameters"].get(param_name)
+
+ elif param_type == "querystring":
+ multi_qs_params = invocation_request["multi_value_query_string_parameters"].get(
+ param_name
+ )
+ if multi_qs_params:
+ return multi_qs_params[-1]
+
+ elif param_type == "multivaluequerystring":
+ multi_qs_params = invocation_request["multi_value_query_string_parameters"].get(
+ param_name
+ )
+ if len(multi_qs_params) == 1:
+ return multi_qs_params[0]
+ return multi_qs_params
+
+ elif param_type == "header":
+ if header := case_sensitive_headers.get(param_name):
+ return header[-1]
+
+ elif param_type == "multivalueheader":
+ if header := case_sensitive_headers.get(param_name):
+ return ",".join(header)
+
+ else:
+ LOG.warning(
+ "Unrecognized method.request parameter: '%s'. Skipping the parameter mapping.",
+ expr,
+ )
+
+ def _retrieve_parameter_from_context_variables(
+ self, expr: str, context_variables: dict[str, Any]
+ ) -> str | None:
+ # we're using JSON path here because we could access nested properties like `context.identity.sourceIp`
+ if (value := self._get_json_path_from_dict(context_variables, expr)) and isinstance(
+ value, str
+ ):
+ return value
+
+ @staticmethod
+ def _retrieve_parameter_from_stage_variables(
+ stage_var_name: str, stage_variables: dict[str, str]
+ ) -> str | None:
+ return stage_variables.get(stage_var_name)
+
+ @staticmethod
+ def _get_json_path_from_dict(body: dict, path: str) -> str | None:
+ # TODO: verify we don't have special cases
+ try:
+ return extract_jsonpath(body, f"$.{path}")
+ except KeyError:
+ return None
+
+ @staticmethod
+ def _json_load(body: bytes) -> dict | list:
+ """
+ AWS only tries to JSON decode the body if it starts with some leading characters ({, [, ", ')
+ otherwise, it ignores it
+ :param body:
+ :return:
+ """
+ if any(body.startswith(c) for c in (b"{", b"[", b"'", b'"')):
+ return json.loads(body)
+
+ return {}
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/router.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/router.py
new file mode 100644
index 0000000000000..6c0ca3245164b
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/router.py
@@ -0,0 +1,222 @@
+import logging
+from typing import TypedDict, Unpack
+
+from rolo import Request, Router
+from rolo.routing.handler import Handler
+from werkzeug.routing import Rule
+
+from localstack.aws.api.apigateway import Stage
+from localstack.constants import APPLICATION_JSON, AWS_REGION_US_EAST_1, DEFAULT_AWS_ACCOUNT_ID
+from localstack.deprecations import deprecated_endpoint
+from localstack.http import Response
+from localstack.services.apigateway.models import ApiGatewayStore, apigateway_stores
+from localstack.services.edge import ROUTER
+from localstack.services.stores import AccountRegionBundle
+
+from .context import RestApiInvocationContext
+from .gateway import RestApiGateway
+from .helpers import should_divert_to_canary
+from .moto_helpers import get_stage_configuration
+
+LOG = logging.getLogger(__name__)
+
+
+class RouteHostPathParameters(TypedDict, total=False):
+ """
+ Represents the kwargs typing for calling APIGatewayEndpoint.
+ Each field might be populated from the route host and path parameters, defined when registering a route.
+ """
+
+ api_id: str
+ path: str
+ port: int | None
+ server: str | None
+ stage: str | None
+ vpce_suffix: str | None
+
+
+class ApiGatewayEndpoint:
+ """
+ This class is the endpoint for API Gateway invocations of the `execute-api` route. It will take the incoming
+ invocation request, create a context from the API matching the route parameters, and dispatch the request to the
+ Gateway to be processed by the handler chain.
+ """
+
+ def __init__(self, rest_gateway: RestApiGateway = None, store: AccountRegionBundle = None):
+ self.rest_gateway = rest_gateway or RestApiGateway()
+ # we only access CrossAccount attributes in the handler, so we use a global store in default account and region
+ self._store = store or apigateway_stores
+
+ @property
+ def _global_store(self) -> ApiGatewayStore:
+ return self._store[DEFAULT_AWS_ACCOUNT_ID][AWS_REGION_US_EAST_1]
+
+ def __call__(self, request: Request, **kwargs: Unpack[RouteHostPathParameters]) -> Response:
+ """
+ :param request: the incoming Request object
+ :param kwargs: can contain all the field of RouteHostPathParameters. Those values are defined on the registered
+ routes in ApiGatewayRouter, through host and path parameters in the shape or only.
+ :return: the Response object to return to the client
+ """
+ # api_id can be cased because of custom-tag id
+ api_id, stage = kwargs.get("api_id", "").lower(), kwargs.get("stage")
+ if self.is_rest_api(api_id, stage):
+ context, response = self.prepare_rest_api_invocation(request, api_id, stage)
+ self.rest_gateway.process_with_context(context, response)
+ return response
+ else:
+ return self.create_not_found_response(api_id)
+
+ def prepare_rest_api_invocation(
+ self, request: Request, api_id: str, stage: str
+ ) -> tuple[RestApiInvocationContext, Response]:
+ LOG.debug("APIGW v1 Endpoint called")
+ response = self.create_response(request)
+ context = RestApiInvocationContext(request)
+ self.populate_rest_api_invocation_context(context, api_id, stage)
+
+ return context, response
+
+ def is_rest_api(self, api_id: str, stage: str):
+ return stage in self._global_store.active_deployments.get(api_id, {})
+
+ def populate_rest_api_invocation_context(
+ self, context: RestApiInvocationContext, api_id: str, stage: str
+ ):
+ try:
+ deployment_id = self._global_store.active_deployments[api_id][stage]
+ frozen_deployment = self._global_store.internal_deployments[api_id][deployment_id]
+
+ except KeyError:
+ # TODO: find proper error when trying to hit an API with no deployment/stage linked
+ return
+
+ stage_configuration = self.fetch_stage_configuration(
+ account_id=frozen_deployment.account_id,
+ region=frozen_deployment.region,
+ api_id=api_id,
+ stage_name=stage,
+ )
+ if canary_settings := stage_configuration.get("canarySettings"):
+ if should_divert_to_canary(canary_settings["percentTraffic"]):
+ deployment_id = canary_settings["deploymentId"]
+ frozen_deployment = self._global_store.internal_deployments[api_id][deployment_id]
+ context.is_canary = True
+ else:
+ context.is_canary = False
+
+ context.deployment = frozen_deployment
+ context.api_id = api_id
+ context.stage = stage
+ context.stage_configuration = stage_configuration
+ context.deployment_id = deployment_id
+
+ @staticmethod
+ def fetch_stage_configuration(
+ account_id: str, region: str, api_id: str, stage_name: str
+ ) -> Stage:
+ # this will be migrated once we move away from Moto, so we won't need the helper anymore and the logic will
+ # be implemented here
+ stage_variables = get_stage_configuration(
+ account_id=account_id,
+ region=region,
+ api_id=api_id,
+ stage_name=stage_name,
+ )
+
+ return stage_variables
+
+ @staticmethod
+ def create_response(request: Request) -> Response:
+ # Creates a default apigw response.
+ response = Response(headers={"Content-Type": APPLICATION_JSON})
+ if not (connection := request.headers.get("Connection")) or connection != "close":
+ # We only set the connection if it isn't close.
+ # There appears to be in issue in Localstack, where setting "close" will result in "close, close"
+ response.headers.set("Connection", "keep-alive")
+ return response
+
+ @staticmethod
+ def create_not_found_response(api_id: str) -> Response:
+ not_found = Response(status=404)
+ not_found.set_json(
+ {"message": f"The API id '{api_id}' does not correspond to a deployed API Gateway API"}
+ )
+ return not_found
+
+
+class ApiGatewayRouter:
+ router: Router[Handler]
+ handler: ApiGatewayEndpoint
+ EXECUTE_API_INTERNAL_PATH = "/_aws/execute-api"
+
+ def __init__(self, router: Router[Handler] = None, handler: ApiGatewayEndpoint = None):
+ self.router = router or ROUTER
+ self.handler = handler or ApiGatewayEndpoint()
+ self.registered_rules: list[Rule] = []
+
+ def register_routes(self) -> None:
+ LOG.debug("Registering API Gateway routes.")
+ host_pattern = ".execute-api."
+ deprecated_route_endpoint = deprecated_endpoint(
+ endpoint=self.handler,
+ previous_path="/restapis///_user_request_",
+ deprecation_version="3.8.0",
+ new_path=f"{self.EXECUTE_API_INTERNAL_PATH}//",
+ )
+ rules = [
+ self.router.add(
+ path="/",
+ host=host_pattern,
+ endpoint=self.handler,
+ defaults={"path": "", "stage": None},
+ strict_slashes=True,
+ ),
+ self.router.add(
+ path="//",
+ host=host_pattern,
+ endpoint=self.handler,
+ defaults={"path": ""},
+ strict_slashes=False,
+ ),
+ self.router.add(
+ path="//",
+ host=host_pattern,
+ endpoint=self.handler,
+ strict_slashes=True,
+ ),
+ # add the deprecated localstack-specific _user_request_ routes
+ self.router.add(
+ path="/restapis///_user_request_",
+ endpoint=deprecated_route_endpoint,
+ defaults={"path": "", "random": "?"},
+ ),
+ self.router.add(
+ path="/restapis///_user_request_/",
+ endpoint=deprecated_route_endpoint,
+ strict_slashes=True,
+ ),
+ # add the localstack-specific so-called "path-style" routes when DNS resolving is not possible
+ self.router.add(
+ path=f"{self.EXECUTE_API_INTERNAL_PATH}//",
+ endpoint=self.handler,
+ defaults={"path": "", "stage": None},
+ strict_slashes=True,
+ ),
+ self.router.add(
+ path=f"{self.EXECUTE_API_INTERNAL_PATH}///",
+ endpoint=self.handler,
+ defaults={"path": ""},
+ strict_slashes=False,
+ ),
+ self.router.add(
+ path=f"{self.EXECUTE_API_INTERNAL_PATH}///",
+ endpoint=self.handler,
+ strict_slashes=True,
+ ),
+ ]
+ for rule in rules:
+ self.registered_rules.append(rule)
+
+ def unregister_routes(self):
+ self.router.remove(self.registered_rules)
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/template_mapping.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/template_mapping.py
new file mode 100644
index 0000000000000..fd729f853d187
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/template_mapping.py
@@ -0,0 +1,315 @@
+# > In API Gateway, an API's method request or response can take a payload in a different format from the integration
+# request or response.
+#
+# You can transform your data to:
+# - Match the payload to an API-specified format.
+# - Override an API's request and response parameters and status codes.
+# - Return client selected response headers.
+# - Associate path parameters, query string parameters, or header parameters in the method request of HTTP proxy
+# or AWS service proxy. TODO: this is from the documentation. Can we use requestOverides for proxy integrations?
+# - Select which data to send using integration with AWS services, such as Amazon DynamoDB or Lambda functions,
+# or HTTP endpoints.
+#
+# You can use mapping templates to transform your data. A mapping template is a script expressed in Velocity Template
+# Language (VTL) and applied to the payload using JSONPath .
+#
+# https://docs.aws.amazon.com/apigateway/latest/developerguide/models-mappings.html
+import base64
+import copy
+import json
+import logging
+from typing import Any, TypedDict
+from urllib.parse import quote_plus, unquote_plus
+
+import airspeed
+from airspeed.operators import dict_to_string
+from jsonpath_rw import parse
+
+from localstack import config
+from localstack.services.apigateway.next_gen.execute_api.variables import (
+ ContextVariableOverrides,
+ ContextVariables,
+ ContextVarsResponseOverride,
+)
+from localstack.utils.aws.templating import APIGW_SOURCE, VelocityUtil, VtlTemplate
+from localstack.utils.json import json_safe
+
+LOG = logging.getLogger(__name__)
+
+
+class MappingTemplateParams(TypedDict, total=False):
+ path: dict[str, str]
+ querystring: dict[str, str]
+ header: dict[str, str]
+
+
+class MappingTemplateInput(TypedDict, total=False):
+ body: str
+ params: MappingTemplateParams
+
+
+class MappingTemplateVariables(TypedDict, total=False):
+ context: ContextVariables
+ input: MappingTemplateInput
+ stageVariables: dict[str, str]
+
+
+def cast_to_vtl_object(value):
+ if isinstance(value, dict):
+ return VTLMap(value)
+ if isinstance(value, list):
+ return [cast_to_vtl_object(item) for item in value]
+ return value
+
+
+def cast_to_vtl_json_object(value: Any) -> Any:
+ if isinstance(value, dict):
+ return VTLJsonDict(value)
+ if isinstance(value, list):
+ return VTLJsonList(value)
+ return value
+
+
+def extract_jsonpath(value: dict | list, path: str):
+ jsonpath_expr = parse(path)
+ result = [match.value for match in jsonpath_expr.find(value)]
+ if not result:
+ return None
+ result = result[0] if len(result) == 1 else result
+ return result
+
+
+class VTLMap(dict):
+ """Overrides __str__ of python dict (and all child dict) to return a Java like string representation"""
+
+ # TODO apply this class more generally through the template mappings
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.update(*args, **kwargs)
+
+ @staticmethod
+ def cast_factory(value: Any) -> Any:
+ return cast_to_vtl_object(value)
+
+ def update(self, *args, **kwargs):
+ for k, v in self.items():
+ self[k] = self.cast_factory(v)
+
+ def __str__(self) -> str:
+ return dict_to_string(self)
+
+
+class VTLJsonList(list):
+ """Some VTL List behave differently when being represented as string and everything
+ inside will be represented as a json string
+
+ Example: $input.path('$').b // Where path is {"a": 1, "b": [{"c": 5}]}
+ Results: '[{"c":5}]' // Where everything inside the list is a valid json object
+ """
+
+ def __init__(self, *args):
+ super(VTLJsonList, self).__init__(*args)
+ for idx, item in enumerate(self):
+ self[idx] = cast_to_vtl_json_object(item)
+
+ def __str__(self):
+ if isinstance(self, list):
+ return json.dumps(self, separators=(",", ":"))
+
+
+class VTLJsonDict(VTLMap):
+ """Some VTL Map behave differently when being represented as string and a list
+ encountered in the dictionary will be represented as a json string
+
+ Example: $input.path('$') // Where path is {"a": 1, "b": [{"c": 5}]}
+ Results: '{a=1, b=[{"c":5}]}' // Where everything inside the list is a valid json object
+ """
+
+ @staticmethod
+ def cast_factory(value: Any) -> Any:
+ return cast_to_vtl_json_object(value)
+
+
+class AttributeDict(dict):
+ """
+ Wrapper returned by VelocityUtilApiGateway.parseJson to allow access to dict values as attributes (dot notation),
+ e.g.: $util.parseJson('$.foo').bar
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(AttributeDict, self).__init__(*args, **kwargs)
+ for key, value in self.items():
+ if isinstance(value, dict):
+ self[key] = AttributeDict(value)
+
+ def __getattr__(self, name):
+ if name in self:
+ return self[name]
+ raise AttributeError(f"'AttributeDict' object has no attribute '{name}'")
+
+ def __setattr__(self, name, value):
+ self[name] = value
+
+ def __delattr__(self, name):
+ if name in self:
+ del self[name]
+ else:
+ raise AttributeError(f"'AttributeDict' object has no attribute '{name}'")
+
+
+class VelocityUtilApiGateway(VelocityUtil):
+ """
+ Simple class to mimic the behavior of variable '$util' in AWS API Gateway integration
+ velocity templates.
+ See: https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html
+ """
+
+ def base64Encode(self, s):
+ if not isinstance(s, str):
+ s = json.dumps(s)
+ encoded_str = s.encode(config.DEFAULT_ENCODING)
+ encoded_b64_str = base64.b64encode(encoded_str)
+ return encoded_b64_str.decode(config.DEFAULT_ENCODING)
+
+ def base64Decode(self, s):
+ if not isinstance(s, str):
+ s = json.dumps(s)
+ return base64.b64decode(s)
+
+ def toJson(self, obj):
+ return obj and json.dumps(obj)
+
+ def urlEncode(self, s):
+ return quote_plus(s)
+
+ def urlDecode(self, s):
+ return unquote_plus(s)
+
+ def escapeJavaScript(self, obj: Any) -> str:
+ """
+ Converts the given object to a string and escapes any regular single quotes (') into escaped ones (\').
+ JSON dumps will escape the single quotes.
+ https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html
+ """
+ if obj is None:
+ return "null"
+ if isinstance(obj, str):
+ # empty string escapes to empty object
+ if len(obj.strip()) == 0:
+ return "{}"
+ return json.dumps(obj)[1:-1]
+ if obj in (True, False):
+ return str(obj).lower()
+ return str(obj)
+
+ def parseJson(self, s: str):
+ obj = json.loads(s)
+ return AttributeDict(obj) if isinstance(obj, dict) else obj
+
+
+class VelocityInput:
+ """
+ Simple class to mimic the behavior of variable '$input' in AWS API Gateway integration
+ velocity templates.
+ See: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html
+ """
+
+ def __init__(self, body, params):
+ self.parameters = params or {}
+ self.value = body
+
+ def _extract_json_path(self, path):
+ if not self.value:
+ return None
+ if isinstance(self.value, dict):
+ value = self.value
+ else:
+ try:
+ value = json.loads(self.value)
+ except json.JSONDecodeError:
+ return None
+
+ return extract_jsonpath(value, path)
+
+ def path(self, path):
+ return cast_to_vtl_json_object(self._extract_json_path(path))
+
+ def json(self, path):
+ path = path or "$"
+ matching = self._extract_json_path(path)
+ if matching is None:
+ matching = ""
+ elif isinstance(matching, (list, dict)):
+ matching = json_safe(matching)
+ return json.dumps(matching)
+
+ @property
+ def body(self):
+ if not self.value:
+ return "{}"
+
+ return self.value
+
+ def params(self, name=None):
+ if not name:
+ return self.parameters
+ for k in ["path", "querystring", "header"]:
+ if val := self.parameters.get(k).get(name):
+ return val
+ return ""
+
+ def __getattr__(self, name):
+ return self.value.get(name)
+
+ def __repr__(self):
+ return "$input"
+
+
+class ApiGatewayVtlTemplate(VtlTemplate):
+ """Util class for rendering VTL templates with API Gateway specific extensions"""
+
+ def prepare_namespace(self, variables, source: str = APIGW_SOURCE) -> dict[str, Any]:
+ namespace = super().prepare_namespace(variables, source)
+ input_var = variables.get("input") or {}
+ variables = {
+ "input": VelocityInput(input_var.get("body"), input_var.get("params")),
+ "util": VelocityUtilApiGateway(),
+ }
+ namespace.update(variables)
+ return namespace
+
+ def render_request(
+ self,
+ template: str,
+ variables: MappingTemplateVariables,
+ context_overrides: ContextVariableOverrides,
+ ) -> tuple[str, ContextVariableOverrides]:
+ variables_copy: MappingTemplateVariables = copy.deepcopy(variables)
+ variables_copy["context"].update(copy.deepcopy(context_overrides))
+ result = self.render_vtl(template=template.strip(), variables=variables_copy)
+ return result, ContextVariableOverrides(
+ requestOverride=variables_copy["context"]["requestOverride"],
+ responseOverride=variables_copy["context"]["responseOverride"],
+ )
+
+ def render_response(
+ self,
+ template: str,
+ variables: MappingTemplateVariables,
+ context_overrides: ContextVariableOverrides,
+ ) -> tuple[str, ContextVarsResponseOverride]:
+ variables_copy: MappingTemplateVariables = copy.deepcopy(variables)
+ variables_copy["context"].update(copy.deepcopy(context_overrides))
+ result = self.render_vtl(template=template.strip(), variables=variables_copy)
+ return result, variables_copy["context"]["responseOverride"]
+
+
+# patches required to allow our custom class operations in VTL templates processed by airspeed
+airspeed.operators.__additional_methods__[VTLMap] = airspeed.operators.__additional_methods__[dict]
+airspeed.operators.__additional_methods__[VTLJsonDict] = airspeed.operators.__additional_methods__[
+ dict
+]
+airspeed.operators.__additional_methods__[VTLJsonList] = airspeed.operators.__additional_methods__[
+ list
+]
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/test_invoke.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/test_invoke.py
new file mode 100644
index 0000000000000..0d871077aa707
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/test_invoke.py
@@ -0,0 +1,214 @@
+import datetime
+from urllib.parse import parse_qs
+
+from rolo import Request
+from rolo.gateway.chain import HandlerChain
+from werkzeug.datastructures import Headers
+
+from localstack.aws.api.apigateway import TestInvokeMethodRequest, TestInvokeMethodResponse
+from localstack.constants import APPLICATION_JSON
+from localstack.http import Response
+from localstack.utils.strings import to_bytes, to_str
+
+from ...models import RestApiDeployment
+from . import handlers
+from .context import InvocationRequest, RestApiInvocationContext
+from .handlers.resource_router import RestAPIResourceRouter
+from .header_utils import build_multi_value_headers
+from .template_mapping import dict_to_string
+from .variables import (
+ ContextVariableOverrides,
+ ContextVarsRequestOverride,
+ ContextVarsResponseOverride,
+)
+
+# TODO: we probably need to write and populate those logs as part of the handler chain itself
+# and store it in the InvocationContext. That way, we could also retrieve in when calling TestInvoke
+
+TEST_INVOKE_TEMPLATE = """Execution log for request {request_id}
+{formatted_date} : Starting execution for request: {request_id}
+{formatted_date} : HTTP Method: {request_method}, Resource Path: {resource_path}
+{formatted_date} : Method request path: {method_request_path_parameters}
+{formatted_date} : Method request query string: {method_request_query_string}
+{formatted_date} : Method request headers: {method_request_headers}
+{formatted_date} : Method request body before transformations: {method_request_body}
+{formatted_date} : Endpoint request URI: {endpoint_uri}
+{formatted_date} : Endpoint request headers: {endpoint_request_headers}
+{formatted_date} : Endpoint request body after transformations: {endpoint_request_body}
+{formatted_date} : Sending request to {endpoint_uri}
+{formatted_date} : Received response. Status: {endpoint_response_status_code}, Integration latency: {endpoint_response_latency} ms
+{formatted_date} : Endpoint response headers: {endpoint_response_headers}
+{formatted_date} : Endpoint response body before transformations: {endpoint_response_body}
+{formatted_date} : Method response body after transformations: {method_response_body}
+{formatted_date} : Method response headers: {method_response_headers}
+{formatted_date} : Successfully completed execution
+{formatted_date} : Method completed with status: {method_response_status}
+"""
+
+
+def _dump_headers(headers: Headers) -> str:
+ if not headers:
+ return "{}"
+ multi_headers = {key: ",".join(headers.getlist(key)) for key in headers.keys()}
+ string_headers = dict_to_string(multi_headers)
+ if len(string_headers) > 998:
+ return f"{string_headers[:998]} [TRUNCATED]"
+
+ return string_headers
+
+
+def log_template(invocation_context: RestApiInvocationContext, response_headers: Headers) -> str:
+ # TODO: funny enough, in AWS for the `endpoint_response_headers` in AWS_PROXY, they log the response headers from
+ # lambda HTTP Invoke call even though we use the headers from the lambda response itself
+ formatted_date = datetime.datetime.now(tz=datetime.UTC).strftime("%a %b %d %H:%M:%S %Z %Y")
+ request = invocation_context.invocation_request
+ context_var = invocation_context.context_variables
+ integration_req = invocation_context.integration_request
+ endpoint_resp = invocation_context.endpoint_response
+ method_resp = invocation_context.invocation_response
+ # TODO: if endpoint_uri is an ARN, it means it's an AWS_PROXY integration
+ # this should be transformed to the true URL of a lambda invoke call
+ endpoint_uri = integration_req.get("uri", "")
+
+ return TEST_INVOKE_TEMPLATE.format(
+ formatted_date=formatted_date,
+ request_id=context_var["requestId"],
+ resource_path=request["path"],
+ request_method=request["http_method"],
+ method_request_path_parameters=dict_to_string(request["path_parameters"]),
+ method_request_query_string=dict_to_string(request["query_string_parameters"]),
+ method_request_headers=_dump_headers(request.get("headers")),
+ method_request_body=to_str(request.get("body", "")),
+ endpoint_uri=endpoint_uri,
+ endpoint_request_headers=_dump_headers(integration_req.get("headers")),
+ endpoint_request_body=to_str(integration_req.get("body", "")),
+ # TODO: measure integration latency
+ endpoint_response_latency=150,
+ endpoint_response_status_code=endpoint_resp.get("status_code"),
+ endpoint_response_body=to_str(endpoint_resp.get("body", "")),
+ endpoint_response_headers=_dump_headers(endpoint_resp.get("headers")),
+ method_response_status=method_resp.get("status_code"),
+ method_response_body=to_str(method_resp.get("body", "")),
+ method_response_headers=_dump_headers(response_headers),
+ )
+
+
+def create_test_chain() -> HandlerChain[RestApiInvocationContext]:
+ return HandlerChain(
+ request_handlers=[
+ handlers.method_request_handler,
+ handlers.integration_request_handler,
+ handlers.integration_handler,
+ handlers.integration_response_handler,
+ handlers.method_response_handler,
+ ],
+ exception_handlers=[
+ handlers.gateway_exception_handler,
+ ],
+ )
+
+
+def create_test_invocation_context(
+ test_request: TestInvokeMethodRequest,
+ deployment: RestApiDeployment,
+) -> RestApiInvocationContext:
+ parse_handler = handlers.parse_request
+ http_method = test_request["httpMethod"]
+
+ # we do not need a true HTTP request for the context, as we are skipping all the parsing steps and using the
+ # provider data
+ invocation_context = RestApiInvocationContext(
+ request=Request(method=http_method),
+ )
+ path_query = test_request.get("pathWithQueryString", "/").split("?")
+ path = path_query[0]
+ multi_query_args: dict[str, list[str]] = {}
+
+ if len(path_query) > 1:
+ multi_query_args = parse_qs(path_query[1])
+
+ # for the single value parameters, AWS only keeps the last value of the list
+ single_query_args = {k: v[-1] for k, v in multi_query_args.items()}
+
+ invocation_request = InvocationRequest(
+ http_method=http_method,
+ path=path,
+ raw_path=path,
+ query_string_parameters=single_query_args,
+ multi_value_query_string_parameters=multi_query_args,
+ headers=Headers(test_request.get("headers")),
+ # TODO: handle multiValueHeaders
+ body=to_bytes(test_request.get("body") or ""),
+ )
+ invocation_context.invocation_request = invocation_request
+
+ _, path_parameters = RestAPIResourceRouter(deployment).match(invocation_context)
+ invocation_request["path_parameters"] = path_parameters
+
+ invocation_context.deployment = deployment
+ invocation_context.api_id = test_request["restApiId"]
+ invocation_context.stage = None
+ invocation_context.deployment_id = ""
+ invocation_context.account_id = deployment.account_id
+ invocation_context.region = deployment.region
+ invocation_context.stage_variables = test_request.get("stageVariables", {})
+ invocation_context.context_variables = parse_handler.create_context_variables(
+ invocation_context
+ )
+ invocation_context.context_variable_overrides = ContextVariableOverrides(
+ requestOverride=ContextVarsRequestOverride(header={}, path={}, querystring={}),
+ responseOverride=ContextVarsResponseOverride(header={}, status=0),
+ )
+ invocation_context.trace_id = parse_handler.populate_trace_id({})
+ resource = deployment.rest_api.resources[test_request["resourceId"]]
+ resource_method = resource["resourceMethods"][http_method]
+ invocation_context.resource = resource
+ invocation_context.resource_method = resource_method
+ invocation_context.integration = resource_method["methodIntegration"]
+ handlers.route_request.update_context_variables_with_resource(
+ invocation_context.context_variables, resource
+ )
+
+ return invocation_context
+
+
+def run_test_invocation(
+ test_request: TestInvokeMethodRequest, deployment: RestApiDeployment
+) -> TestInvokeMethodResponse:
+ # validate resource exists in deployment
+ invocation_context = create_test_invocation_context(test_request, deployment)
+
+ test_chain = create_test_chain()
+ # header order is important
+ if invocation_context.integration["type"] == "MOCK":
+ base_headers = {"Content-Type": APPLICATION_JSON}
+ else:
+ # we manually add the trace-id, as it is normally added by handlers.response_enricher which adds to much data
+ # for the TestInvoke. It needs to be first
+ base_headers = {
+ "X-Amzn-Trace-Id": invocation_context.trace_id,
+ "Content-Type": APPLICATION_JSON,
+ }
+
+ test_response = Response(headers=base_headers)
+ start_time = datetime.datetime.now()
+ test_chain.handle(context=invocation_context, response=test_response)
+ end_time = datetime.datetime.now()
+
+ response_headers = test_response.headers.copy()
+ # AWS does not return the Content-Length for TestInvokeMethod
+ response_headers.remove("Content-Length")
+
+ log = log_template(invocation_context, response_headers)
+
+ headers = dict(response_headers)
+ multi_value_headers = build_multi_value_headers(response_headers)
+
+ return TestInvokeMethodResponse(
+ log=log,
+ status=test_response.status_code,
+ body=test_response.get_data(as_text=True),
+ headers=headers,
+ multiValueHeaders=multi_value_headers,
+ latency=int((end_time - start_time).total_seconds()),
+ )
diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/variables.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/variables.py
new file mode 100644
index 0000000000000..e457c61180353
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/variables.py
@@ -0,0 +1,195 @@
+from typing import Optional, TypedDict
+
+
+class ContextVarsAuthorizer(TypedDict, total=False):
+ # this is merged with the Context returned by the Authorizer, which can attach any property to this dict in string
+ # format
+
+ # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html
+ claims: Optional[dict[str, str]]
+ """Claims returned from the Amazon Cognito user pool after the method caller is successfully authenticated"""
+ principalId: Optional[str]
+ """The principal user identification associated with the token sent by the client and returned from an API Gateway Lambda authorizer"""
+
+
+class ContextVarsIdentityClientCertValidity(TypedDict, total=False):
+ notBefore: str
+ notAfter: str
+
+
+class ContextVarsIdentityClientCert(TypedDict, total=False):
+ """Certificate that a client presents. Present only in access logs if mutual TLS authentication fails."""
+
+ clientCertPem: str
+ subjectDN: str
+ issuerDN: str
+ serialNumber: str
+ validity: ContextVarsIdentityClientCertValidity
+
+
+class ContextVarsIdentity(TypedDict, total=False):
+ # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html
+ accountId: Optional[str]
+ """The AWS account ID associated with the request."""
+ accessKey: Optional[str]
+ """The AWS access key associated with the request."""
+ apiKey: Optional[str]
+ """For API methods that require an API key, this variable is the API key associated with the method request."""
+ apiKeyId: Optional[str]
+ """The API key ID associated with an API request that requires an API key."""
+ caller: Optional[str]
+ """The principal identifier of the caller that signed the request. Supported for resources that use IAM authorization."""
+ cognitoAuthenticationProvider: Optional[str]
+ """A comma-separated list of the Amazon Cognito authentication providers used by the caller making the request"""
+ cognitoAuthenticationType: Optional[str]
+ """The Amazon Cognito authentication type of the caller making the request"""
+ cognitoIdentityId: Optional[str]
+ """The Amazon Cognito identity ID of the caller making the request"""
+ cognitoIdentityPoolId: Optional[str]
+ """The Amazon Cognito identity pool ID of the caller making the request"""
+ principalOrgId: Optional[str]
+ """The AWS organization ID."""
+ sourceIp: Optional[str]
+ """The source IP address of the immediate TCP connection making the request to the API Gateway endpoint"""
+ clientCert: ContextVarsIdentityClientCert
+ vpcId: Optional[str]
+ """The VPC ID of the VPC making the request to the API Gateway endpoint."""
+ vpceId: Optional[str]
+ """The VPC endpoint ID of the VPC endpoint making the request to the API Gateway endpoint."""
+ user: Optional[str]
+ """The principal identifier of the user that will be authorized against resource access for resources that use IAM authorization."""
+ userAgent: Optional[str]
+ """The User-Agent header of the API caller."""
+ userArn: Optional[str]
+ """The Amazon Resource Name (ARN) of the effective user identified after authentication."""
+
+
+class ContextVarsRequestOverride(TypedDict, total=False):
+ header: dict[str, str]
+ path: dict[str, str]
+ querystring: dict[str, str]
+
+
+class ContextVarsResponseOverride(TypedDict):
+ header: dict[str, str]
+ status: int
+
+
+class ContextVariableOverrides(TypedDict):
+ requestOverride: ContextVarsRequestOverride
+ responseOverride: ContextVarsResponseOverride
+
+
+class GatewayResponseContextVarsError(TypedDict, total=False):
+ # This variable can only be used for simple variable substitution in a GatewayResponse body-mapping template,
+ # which is not processed by the Velocity Template Language engine, and in access logging.
+ message: str
+ messageString: str
+ responseType: str
+ validationErrorString: str
+
+
+class ContextVariables(TypedDict, total=False):
+ # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference
+ accountId: str
+ """The API owner's AWS account ID."""
+ apiId: str
+ """The identifier API Gateway assigns to your API."""
+ authorizer: Optional[ContextVarsAuthorizer]
+ """The principal user identification associated with the token."""
+ awsEndpointRequestId: Optional[str]
+ """The AWS endpoint's request ID."""
+ deploymentId: str
+ """The ID of the API deployment."""
+ domainName: str
+ """The full domain name used to invoke the API. This should be the same as the incoming Host header."""
+ domainPrefix: str
+ """The first label of the $context.domainName."""
+ error: GatewayResponseContextVarsError
+ """The error context variables."""
+ extendedRequestId: str
+ """The extended ID that API Gateway generates and assigns to the API request. """
+ httpMethod: str
+ """The HTTP method used"""
+ identity: Optional[ContextVarsIdentity]
+ isCanaryRequest: Optional[bool]
+ """Indicates if the request was directed to the canary"""
+ path: str
+ """The request path."""
+ protocol: str
+ """The request protocol"""
+ requestId: str
+ """An ID for the request. Clients can override this request ID. """
+ requestOverride: Optional[ContextVarsRequestOverride]
+ """Request override. Only exists for request mapping template"""
+ requestTime: str
+ """The CLF-formatted request time (dd/MMM/yyyy:HH:mm:ss +-hhmm)."""
+ requestTimeEpoch: int
+ """The Epoch-formatted request time, in milliseconds."""
+ resourceId: Optional[str]
+ """The identifier that API Gateway assigns to your resource."""
+ resourcePath: Optional[str]
+ """The path to your resource"""
+ responseOverride: Optional[ContextVarsResponseOverride]
+ """Response override. Only exists for response mapping template"""
+ stage: str
+ """The deployment stage of the API request """
+ wafResponseCode: Optional[str]
+ """The response received from AWS WAF: WAF_ALLOW or WAF_BLOCK. Will not be set if the stage is not associated with a web ACL"""
+ webaclArn: Optional[str]
+ """The complete ARN of the web ACL that is used to decide whether to allow or block the request. Will not be set if the stage is not associated with a web ACL."""
+
+
+class LoggingContextVarsAuthorize(TypedDict, total=False):
+ error: Optional[str]
+ latency: Optional[str]
+ status: Optional[str]
+
+
+class LoggingContextVarsAuthorizer(TypedDict, total=False):
+ error: Optional[str]
+ integrationLatency: Optional[str]
+ integrationStatus: Optional[str]
+ latency: Optional[str]
+ requestId: Optional[str]
+ status: Optional[str]
+
+
+class LoggingContextVarsAuthenticate(TypedDict, total=False):
+ error: Optional[str]
+ latency: Optional[str]
+ status: Optional[str]
+
+
+class LoggingContextVarsCustomDomain(TypedDict, total=False):
+ basePathMatched: Optional[str]
+
+
+class LoggingContextVarsIntegration(TypedDict, total=False):
+ error: Optional[str]
+ integrationStatus: Optional[str]
+ latency: Optional[str]
+ requestId: Optional[str]
+ status: Optional[str]
+
+
+class LoggingContextVarsWaf(TypedDict, total=False):
+ error: Optional[str]
+ latency: Optional[str]
+ status: Optional[str]
+
+
+class LoggingContextVariables(TypedDict, total=False):
+ authorize: Optional[LoggingContextVarsAuthorize]
+ authorizer: Optional[LoggingContextVarsAuthorizer]
+ authenticate: Optional[LoggingContextVarsAuthenticate]
+ customDomain: Optional[LoggingContextVarsCustomDomain]
+ endpointType: Optional[str]
+ integration: Optional[LoggingContextVarsIntegration]
+ integrationLatency: Optional[str]
+ integrationStatus: Optional[str]
+ responseLatency: Optional[str]
+ responseLength: Optional[str]
+ status: Optional[str]
+ waf: Optional[LoggingContextVarsWaf]
+ xrayTraceId: Optional[str]
diff --git a/localstack-core/localstack/services/apigateway/next_gen/provider.py b/localstack-core/localstack/services/apigateway/next_gen/provider.py
new file mode 100644
index 0000000000000..5153463c60a4c
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/next_gen/provider.py
@@ -0,0 +1,490 @@
+import copy
+import datetime
+import re
+
+from localstack.aws.api import CommonServiceException, RequestContext, handler
+from localstack.aws.api.apigateway import (
+ BadRequestException,
+ CacheClusterSize,
+ CreateStageRequest,
+ Deployment,
+ DeploymentCanarySettings,
+ GatewayResponse,
+ GatewayResponses,
+ GatewayResponseType,
+ ListOfPatchOperation,
+ MapOfStringToString,
+ NotFoundException,
+ NullableBoolean,
+ NullableInteger,
+ Stage,
+ StatusCode,
+ String,
+ TestInvokeMethodRequest,
+ TestInvokeMethodResponse,
+)
+from localstack.services.apigateway.helpers import (
+ get_apigateway_store,
+ get_moto_rest_api,
+ get_rest_api_container,
+)
+from localstack.services.apigateway.legacy.provider import (
+ STAGE_UPDATE_PATHS,
+ ApigatewayProvider,
+ patch_api_gateway_entity,
+)
+from localstack.services.apigateway.patches import apply_patches
+from localstack.services.edge import ROUTER
+from localstack.services.moto import call_moto
+
+from ..models import apigateway_stores
+from .execute_api.gateway_response import (
+ DEFAULT_GATEWAY_RESPONSES,
+ GatewayResponseCode,
+ build_gateway_response,
+ get_gateway_response_or_default,
+)
+from .execute_api.helpers import freeze_rest_api
+from .execute_api.router import ApiGatewayEndpoint, ApiGatewayRouter
+from .execute_api.test_invoke import run_test_invocation
+
+
+class ApigatewayNextGenProvider(ApigatewayProvider):
+ router: ApiGatewayRouter
+
+ def __init__(self, router: ApiGatewayRouter = None):
+ # we initialize the route handler with a global store with default account and region, because it only ever
+ # access values with CrossAccount attributes
+ if not router:
+ route_handler = ApiGatewayEndpoint(store=apigateway_stores)
+ router = ApiGatewayRouter(ROUTER, handler=route_handler)
+
+ super().__init__(router=router)
+
+ def on_after_init(self):
+ apply_patches()
+ self.router.register_routes()
+
+ @handler("DeleteRestApi")
+ def delete_rest_api(self, context: RequestContext, rest_api_id: String, **kwargs) -> None:
+ super().delete_rest_api(context, rest_api_id, **kwargs)
+ store = get_apigateway_store(context=context)
+ api_id_lower = rest_api_id.lower()
+ store.active_deployments.pop(api_id_lower, None)
+ store.internal_deployments.pop(api_id_lower, None)
+
+ @handler("CreateStage", expand=False)
+ def create_stage(self, context: RequestContext, request: CreateStageRequest) -> Stage:
+ # TODO: we need to internalize Stages and Deployments in LocalStack, we have a lot of split logic
+ super().create_stage(context, request)
+ rest_api_id = request["restApiId"].lower()
+ stage_name = request["stageName"]
+ moto_api = get_moto_rest_api(context, rest_api_id)
+ stage = moto_api.stages[stage_name]
+
+ if canary_settings := request.get("canarySettings"):
+ if (
+ deployment_id := canary_settings.get("deploymentId")
+ ) and deployment_id not in moto_api.deployments:
+ raise BadRequestException("Deployment id does not exist")
+
+ default_settings = {
+ "deploymentId": stage.deployment_id,
+ "percentTraffic": 0.0,
+ "useStageCache": False,
+ }
+ default_settings.update(canary_settings)
+ stage.canary_settings = default_settings
+ else:
+ stage.canary_settings = None
+
+ store = get_apigateway_store(context=context)
+
+ store.active_deployments.setdefault(rest_api_id, {})
+ store.active_deployments[rest_api_id][stage_name] = request["deploymentId"]
+ response: Stage = stage.to_json()
+ self._patch_stage_response(response)
+ return response
+
+ @handler("UpdateStage")
+ def update_stage(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ stage_name: String,
+ patch_operations: ListOfPatchOperation = None,
+ **kwargs,
+ ) -> Stage:
+ moto_rest_api = get_moto_rest_api(context, rest_api_id)
+ if not (moto_stage := moto_rest_api.stages.get(stage_name)):
+ raise NotFoundException("Invalid Stage identifier specified")
+
+ # construct list of path regexes for validation
+ path_regexes = [re.sub("{[^}]+}", ".+", path) for path in STAGE_UPDATE_PATHS]
+
+ # copy the patch operations to not mutate them, so that we're logging the correct input
+ patch_operations = copy.deepcopy(patch_operations) or []
+ # we are only passing a subset of operations to Moto as it does not handle properly all of them
+ moto_patch_operations = []
+ moto_stage_copy = copy.deepcopy(moto_stage)
+ for patch_operation in patch_operations:
+ skip_moto_apply = False
+ patch_path = patch_operation["path"]
+ patch_op = patch_operation["op"]
+
+ # special case: handle updates (op=remove) for wildcard method settings
+ patch_path_stripped = patch_path.strip("/")
+ if patch_path_stripped == "*/*" and patch_op == "remove":
+ if not moto_stage.method_settings.pop(patch_path_stripped, None):
+ raise BadRequestException(
+ "Cannot remove method setting */* because there is no method setting for this method "
+ )
+ response = moto_stage.to_json()
+ self._patch_stage_response(response)
+ return response
+
+ path_valid = patch_path in STAGE_UPDATE_PATHS or any(
+ re.match(regex, patch_path) for regex in path_regexes
+ )
+ if is_canary := patch_path.startswith("/canarySettings"):
+ skip_moto_apply = True
+ path_valid = is_canary_settings_update_patch_valid(op=patch_op, path=patch_path)
+ # it seems our JSON Patch utility does not handle replace properly if the value does not exists before
+ # it seems to maybe be a Stage-only thing, so replacing it here
+ if patch_op == "replace":
+ patch_operation["op"] = "add"
+
+ if patch_op == "copy":
+ copy_from = patch_operation.get("from")
+ if patch_path not in ("/deploymentId", "/variables") or copy_from not in (
+ "/canarySettings/deploymentId",
+ "/canarySettings/stageVariableOverrides",
+ ):
+ raise BadRequestException(
+ "Invalid copy operation with path: /canarySettings/stageVariableOverrides and from /variables. Valid copy:path are [/deploymentId, /variables] and valid copy:from are [/canarySettings/deploymentId, /canarySettings/stageVariableOverrides]"
+ )
+
+ if copy_from.startswith("/canarySettings") and not getattr(
+ moto_stage_copy, "canary_settings", None
+ ):
+ raise BadRequestException("Promotion not available. Canary does not exist.")
+
+ if patch_path == "/variables":
+ moto_stage_copy.variables.update(
+ moto_stage_copy.canary_settings.get("stageVariableOverrides", {})
+ )
+ elif patch_path == "/deploymentId":
+ moto_stage_copy.deployment_id = moto_stage_copy.canary_settings["deploymentId"]
+
+ # we manually assign `copy` ops, no need to apply them
+ continue
+
+ if not path_valid:
+ valid_paths = f"[{', '.join(STAGE_UPDATE_PATHS)}]"
+ # note: weird formatting in AWS - required for snapshot testing
+ valid_paths = valid_paths.replace(
+ "/{resourcePath}/{httpMethod}/throttling/burstLimit, /{resourcePath}/{httpMethod}/throttling/rateLimit, /{resourcePath}/{httpMethod}/caching/ttlInSeconds",
+ "/{resourcePath}/{httpMethod}/throttling/burstLimit/{resourcePath}/{httpMethod}/throttling/rateLimit/{resourcePath}/{httpMethod}/caching/ttlInSeconds",
+ )
+ valid_paths = valid_paths.replace("/burstLimit, /", "/burstLimit /")
+ valid_paths = valid_paths.replace("/rateLimit, /", "/rateLimit /")
+ raise BadRequestException(
+ f"Invalid method setting path: {patch_operation['path']}. Must be one of: {valid_paths}"
+ )
+
+ # TODO: check if there are other boolean, maybe add a global step in _patch_api_gateway_entity
+ if patch_path == "/tracingEnabled" and (value := patch_operation.get("value")):
+ patch_operation["value"] = value and value.lower() == "true" or False
+
+ elif patch_path in ("/canarySettings/deploymentId", "/deploymentId"):
+ if patch_op != "copy" and not moto_rest_api.deployments.get(
+ patch_operation.get("value")
+ ):
+ raise BadRequestException("Deployment id does not exist")
+
+ if not skip_moto_apply:
+ # we need to copy the patch operation because `_patch_api_gateway_entity` is mutating it in place
+ moto_patch_operations.append(dict(patch_operation))
+
+ # we need to apply patch operation individually to be able to validate the logic
+ # TODO: rework the patching logic
+ patch_api_gateway_entity(moto_stage_copy, [patch_operation])
+ if is_canary and (canary_settings := getattr(moto_stage_copy, "canary_settings", None)):
+ default_canary_settings = {
+ "deploymentId": moto_stage_copy.deployment_id,
+ "percentTraffic": 0.0,
+ "useStageCache": False,
+ }
+ default_canary_settings.update(canary_settings)
+ default_canary_settings["percentTraffic"] = float(
+ default_canary_settings["percentTraffic"]
+ )
+ moto_stage_copy.canary_settings = default_canary_settings
+
+ moto_rest_api.stages[stage_name] = moto_stage_copy
+ moto_stage_copy.apply_operations(moto_patch_operations)
+ if moto_stage.deployment_id != moto_stage_copy.deployment_id:
+ store = get_apigateway_store(context=context)
+ store.active_deployments.setdefault(rest_api_id.lower(), {})[stage_name] = (
+ moto_stage_copy.deployment_id
+ )
+
+ moto_stage_copy.last_updated_date = datetime.datetime.now(tz=datetime.UTC)
+
+ response = moto_stage_copy.to_json()
+ self._patch_stage_response(response)
+ return response
+
+ def delete_stage(
+ self, context: RequestContext, rest_api_id: String, stage_name: String, **kwargs
+ ) -> None:
+ call_moto(context)
+ store = get_apigateway_store(context=context)
+ store.active_deployments[rest_api_id.lower()].pop(stage_name, None)
+
+ def create_deployment(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ stage_name: String = None,
+ stage_description: String = None,
+ description: String = None,
+ cache_cluster_enabled: NullableBoolean = None,
+ cache_cluster_size: CacheClusterSize = None,
+ variables: MapOfStringToString = None,
+ canary_settings: DeploymentCanarySettings = None,
+ tracing_enabled: NullableBoolean = None,
+ **kwargs,
+ ) -> Deployment:
+ moto_rest_api = get_moto_rest_api(context, rest_api_id)
+ if canary_settings:
+ # TODO: add validation to the canary settings
+ if not stage_name:
+ error_stage = stage_name if stage_name is not None else "null"
+ raise BadRequestException(
+ f"Invalid deployment content specified.Non null and non empty stageName must be provided for canary deployment. Provided value is {error_stage}"
+ )
+ if stage_name not in moto_rest_api.stages:
+ raise BadRequestException(
+ "Invalid deployment content specified.Stage non-existing must already be created before making a canary release deployment"
+ )
+
+ # FIXME: moto has an issue and is not handling canarySettings, hence overwriting the current stage with the
+ # canary deployment
+ current_stage = None
+ if stage_name:
+ current_stage = copy.deepcopy(moto_rest_api.stages.get(stage_name))
+
+ # TODO: if the REST API does not contain any method, we should raise an exception
+ deployment: Deployment = call_moto(context)
+ # https://docs.aws.amazon.com/apigateway/latest/developerguide/updating-api.html
+ # TODO: the deployment is not accessible until it is linked to a stage
+ # you can combine a stage or later update the deployment with a stage id
+ store = get_apigateway_store(context=context)
+ rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id)
+ frozen_deployment = freeze_rest_api(
+ account_id=context.account_id,
+ region=context.region,
+ moto_rest_api=moto_rest_api,
+ localstack_rest_api=rest_api_container,
+ )
+ router_api_id = rest_api_id.lower()
+ deployment_id = deployment["id"]
+ store.internal_deployments.setdefault(router_api_id, {})[deployment_id] = frozen_deployment
+
+ if stage_name:
+ moto_stage = moto_rest_api.stages[stage_name]
+ if canary_settings:
+ moto_stage = current_stage
+ moto_rest_api.stages[stage_name] = current_stage
+
+ default_settings = {
+ "deploymentId": deployment_id,
+ "percentTraffic": 0.0,
+ "useStageCache": False,
+ }
+ default_settings.update(canary_settings)
+ moto_stage.canary_settings = default_settings
+ else:
+ store.active_deployments.setdefault(router_api_id, {})[stage_name] = deployment_id
+ moto_stage.canary_settings = None
+
+ if variables:
+ moto_stage.variables = variables
+
+ moto_stage.description = stage_description or moto_stage.description or None
+
+ if cache_cluster_enabled is not None:
+ moto_stage.cache_cluster_enabled = cache_cluster_enabled
+
+ if cache_cluster_size is not None:
+ moto_stage.cache_cluster_size = cache_cluster_size
+
+ if tracing_enabled is not None:
+ moto_stage.tracing_enabled = tracing_enabled
+
+ return deployment
+
+ def delete_deployment(
+ self, context: RequestContext, rest_api_id: String, deployment_id: String, **kwargs
+ ) -> None:
+ call_moto(context)
+ store = get_apigateway_store(context=context)
+ store.internal_deployments.get(rest_api_id.lower(), {}).pop(deployment_id, None)
+
+ def put_gateway_response(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ response_type: GatewayResponseType,
+ status_code: StatusCode = None,
+ response_parameters: MapOfStringToString = None,
+ response_templates: MapOfStringToString = None,
+ **kwargs,
+ ) -> GatewayResponse:
+ store = get_apigateway_store(context=context)
+ if not (rest_api_container := store.rest_apis.get(rest_api_id)):
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ )
+
+ if response_type not in DEFAULT_GATEWAY_RESPONSES:
+ raise CommonServiceException(
+ code="ValidationException",
+ message=f"1 validation error detected: Value '{response_type}' at 'responseType' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(DEFAULT_GATEWAY_RESPONSES)}]",
+ )
+
+ gateway_response = build_gateway_response(
+ status_code=status_code,
+ response_parameters=response_parameters,
+ response_templates=response_templates,
+ response_type=response_type,
+ default_response=False,
+ )
+
+ rest_api_container.gateway_responses[response_type] = gateway_response
+
+ # The CRUD provider has a weird behavior: for some responses (for now, INTEGRATION_FAILURE), it sets the default
+ # status code to `504`. However, in the actual invocation logic, it returns 500. To deal with the inconsistency,
+ # we need to set the value to None if not provided by the user, so that the invocation logic can properly return
+ # 500, and the CRUD layer can still return 504 even though it is technically wrong.
+ response = gateway_response.copy()
+ if response.get("statusCode") is None:
+ response["statusCode"] = GatewayResponseCode[response_type]
+
+ return response
+
+ def get_gateway_response(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ response_type: GatewayResponseType,
+ **kwargs,
+ ) -> GatewayResponse:
+ store = get_apigateway_store(context=context)
+ if not (rest_api_container := store.rest_apis.get(rest_api_id)):
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ )
+
+ if response_type not in DEFAULT_GATEWAY_RESPONSES:
+ raise CommonServiceException(
+ code="ValidationException",
+ message=f"1 validation error detected: Value '{response_type}' at 'responseType' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(DEFAULT_GATEWAY_RESPONSES)}]",
+ )
+
+ gateway_response = _get_gateway_response_or_default(
+ response_type, rest_api_container.gateway_responses
+ )
+ # TODO: add validation with the parameters? seems like it validated client side? how to try?
+ return gateway_response
+
+ def get_gateway_responses(
+ self,
+ context: RequestContext,
+ rest_api_id: String,
+ position: String = None,
+ limit: NullableInteger = None,
+ **kwargs,
+ ) -> GatewayResponses:
+ store = get_apigateway_store(context=context)
+ if not (rest_api_container := store.rest_apis.get(rest_api_id)):
+ raise NotFoundException(
+ f"Invalid API identifier specified {context.account_id}:{rest_api_id}"
+ )
+
+ user_gateway_resp = rest_api_container.gateway_responses
+ gateway_responses = [
+ _get_gateway_response_or_default(response_type, user_gateway_resp)
+ for response_type in DEFAULT_GATEWAY_RESPONSES
+ ]
+ return GatewayResponses(items=gateway_responses)
+
+ def test_invoke_method(
+ self, context: RequestContext, request: TestInvokeMethodRequest
+ ) -> TestInvokeMethodResponse:
+ rest_api_id = request["restApiId"]
+ moto_rest_api = get_moto_rest_api(context=context, rest_api_id=rest_api_id)
+ resource = moto_rest_api.resources.get(request["resourceId"])
+ if not resource:
+ raise NotFoundException("Invalid Resource identifier specified")
+
+ # test httpMethod
+
+ rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id)
+ frozen_deployment = freeze_rest_api(
+ account_id=context.account_id,
+ region=context.region,
+ moto_rest_api=moto_rest_api,
+ localstack_rest_api=rest_api_container,
+ )
+
+ response = run_test_invocation(
+ test_request=request,
+ deployment=frozen_deployment,
+ )
+
+ return response
+
+
+def is_canary_settings_update_patch_valid(op: str, path: str) -> bool:
+ path_regexes = (
+ r"\/canarySettings\/percentTraffic",
+ r"\/canarySettings\/deploymentId",
+ r"\/canarySettings\/stageVariableOverrides\/.+",
+ r"\/canarySettings\/useStageCache",
+ )
+ if path == "/canarySettings" and op == "remove":
+ return True
+
+ matches_path = any(re.match(regex, path) for regex in path_regexes)
+
+ if op not in ("replace", "copy"):
+ if matches_path:
+ raise BadRequestException(f"Invalid {op} operation with path: {path}")
+
+ raise BadRequestException(
+ f"Cannot {op} method setting {path.lstrip('/')} because there is no method setting for this method "
+ )
+
+ # stageVariableOverrides is a bit special as it's nested, it doesn't return the same error message
+ if not matches_path and path != "/canarySettings/stageVariableOverrides":
+ return False
+
+ return True
+
+
+def _get_gateway_response_or_default(
+ response_type: GatewayResponseType,
+ gateway_responses: dict[GatewayResponseType, GatewayResponse],
+) -> GatewayResponse:
+ """
+ Utility function that overrides the behavior of `get_gateway_response_or_default` by setting a default status code
+ from the `GatewayResponseCode` values. In reality, some default values in the invocation layer are different from
+ what the CRUD layer of API Gateway is returning.
+ """
+ response = get_gateway_response_or_default(response_type, gateway_responses)
+ if response.get("statusCode") is None and (status_code := GatewayResponseCode[response_type]):
+ response["statusCode"] = status_code
+
+ return response
diff --git a/localstack-core/localstack/services/apigateway/patches.py b/localstack-core/localstack/services/apigateway/patches.py
new file mode 100644
index 0000000000000..ca12f96284fff
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/patches.py
@@ -0,0 +1,207 @@
+import datetime
+import json
+import logging
+
+from moto.apigateway import models as apigateway_models
+from moto.apigateway.exceptions import (
+ DeploymentNotFoundException,
+ NoIntegrationDefined,
+ RestAPINotFound,
+ StageStillActive,
+)
+from moto.apigateway.responses import APIGatewayResponse
+from moto.core.utils import camelcase_to_underscores
+
+from localstack.constants import TAG_KEY_CUSTOM_ID
+from localstack.services.apigateway.helpers import apply_json_patch_safe
+from localstack.utils.common import str_to_bool
+from localstack.utils.patch import patch
+
+LOG = logging.getLogger(__name__)
+
+
+def apply_patches():
+ # TODO refactor patches in this module (e.g., use @patch decorator, simplify, ...)
+
+ def apigateway_models_Stage_init(
+ self, cacheClusterEnabled=False, cacheClusterSize=None, **kwargs
+ ):
+ apigateway_models_Stage_init_orig(
+ self,
+ cacheClusterEnabled=cacheClusterEnabled,
+ cacheClusterSize=cacheClusterSize,
+ **kwargs,
+ )
+
+ if (cacheClusterSize or cacheClusterEnabled) and not self.cache_cluster_status:
+ self.cache_cluster_status = "AVAILABLE"
+
+ now = datetime.datetime.now(tz=datetime.UTC)
+ self.created_date = now
+ self.last_updated_date = now
+
+ apigateway_models_Stage_init_orig = apigateway_models.Stage.__init__
+ apigateway_models.Stage.__init__ = apigateway_models_Stage_init
+
+ @patch(APIGatewayResponse.put_integration)
+ def apigateway_put_integration(fn, self, *args, **kwargs):
+ # TODO: verify if this patch is still necessary, this might have been fixed upstream
+ fn(self, *args, **kwargs)
+
+ url_path_parts = self.path.split("/")
+ function_id = url_path_parts[2]
+ resource_id = url_path_parts[4]
+ method_type = url_path_parts[6]
+ integration = self.backend.get_integration(function_id, resource_id, method_type)
+
+ timeout_milliseconds = self._get_param("timeoutInMillis")
+ cache_key_parameters = self._get_param("cacheKeyParameters") or []
+ content_handling = self._get_param("contentHandling")
+ integration.cache_namespace = resource_id
+ integration.timeout_in_millis = timeout_milliseconds
+ integration.cache_key_parameters = cache_key_parameters
+ integration.content_handling = content_handling
+ return 201, {}, json.dumps(integration.to_json())
+
+ # define json-patch operations for backend models
+
+ def backend_model_apply_operations(self, patch_operations):
+ # run pre-actions
+ if isinstance(self, apigateway_models.Stage) and [
+ op for op in patch_operations if "/accessLogSettings" in op.get("path", "")
+ ]:
+ self.access_log_settings = self.access_log_settings or {}
+ # apply patches
+ apply_json_patch_safe(self, patch_operations, in_place=True)
+ # run post-actions
+ if isinstance(self, apigateway_models.Stage):
+ bool_params = ["cacheClusterEnabled", "tracingEnabled"]
+ for bool_param in bool_params:
+ if getattr(self, camelcase_to_underscores(bool_param), None):
+ value = getattr(self, camelcase_to_underscores(bool_param), None)
+ setattr(self, camelcase_to_underscores(bool_param), str_to_bool(value))
+ return self
+
+ model_classes = [
+ apigateway_models.Authorizer,
+ apigateway_models.DomainName,
+ apigateway_models.MethodResponse,
+ ]
+ for model_class in model_classes:
+ model_class.apply_operations = model_class.apply_patch_operations = (
+ backend_model_apply_operations
+ )
+
+ # fix data types for some json-patch operation values
+
+ @patch(apigateway_models.Stage._get_default_method_settings)
+ def _get_default_method_settings(fn, self):
+ result = fn(self)
+ default_settings = self.method_settings.get("*/*", {})
+ result["cacheDataEncrypted"] = default_settings.get("cacheDataEncrypted", False)
+ result["throttlingRateLimit"] = default_settings.get("throttlingRateLimit", 10000.0)
+ result["throttlingBurstLimit"] = default_settings.get("throttlingBurstLimit", 5000)
+ result["metricsEnabled"] = default_settings.get("metricsEnabled", False)
+ result["dataTraceEnabled"] = default_settings.get("dataTraceEnabled", False)
+ result["unauthorizedCacheControlHeaderStrategy"] = default_settings.get(
+ "unauthorizedCacheControlHeaderStrategy", "SUCCEED_WITH_RESPONSE_HEADER"
+ )
+ result["cacheTtlInSeconds"] = default_settings.get("cacheTtlInSeconds", 300)
+ result["cachingEnabled"] = default_settings.get("cachingEnabled", False)
+ result["requireAuthorizationForCacheControl"] = default_settings.get(
+ "requireAuthorizationForCacheControl", True
+ )
+ return result
+
+ # patch integration error responses
+ @patch(apigateway_models.Resource.get_integration)
+ def apigateway_models_resource_get_integration(fn, self, method_type):
+ resource_method = self.resource_methods.get(method_type, {})
+ if not resource_method.method_integration:
+ raise NoIntegrationDefined()
+ return resource_method.method_integration
+
+ @patch(apigateway_models.RestAPI.to_dict)
+ def apigateway_models_rest_api_to_dict(fn, self):
+ resp = fn(self)
+ resp["policy"] = None
+ if self.policy:
+ # Strip whitespaces for TF compatibility (not entirely sure why we need double-dumps,
+ # but otherwise: "error normalizing policy JSON: invalid character 'V' after top-level value")
+ resp["policy"] = json.dumps(json.dumps(json.loads(self.policy), separators=(",", ":")))[
+ 1:-1
+ ]
+
+ if not self.tags:
+ resp["tags"] = None
+
+ resp["disableExecuteApiEndpoint"] = (
+ str(resp.get("disableExecuteApiEndpoint")).lower() == "true"
+ )
+
+ return resp
+
+ @patch(apigateway_models.Stage.to_json)
+ def apigateway_models_stage_to_json(fn, self):
+ result = fn(self)
+
+ if "documentationVersion" not in result:
+ result["documentationVersion"] = getattr(self, "documentation_version", None)
+
+ if "canarySettings" not in result:
+ result["canarySettings"] = getattr(self, "canary_settings", None)
+
+ if "createdDate" not in result:
+ created_date = getattr(self, "created_date", None)
+ if created_date:
+ created_date = int(created_date.timestamp())
+ result["createdDate"] = created_date
+
+ if "lastUpdatedDate" not in result:
+ last_updated_date = getattr(self, "last_updated_date", None)
+ if last_updated_date:
+ last_updated_date = int(last_updated_date.timestamp())
+ result["lastUpdatedDate"] = last_updated_date
+
+ return result
+
+ @patch(apigateway_models.Stage._str2bool, pass_target=False)
+ def apigateway_models_stage_str_to_bool(self, v: bool | str) -> bool:
+ return str_to_bool(v)
+
+ # TODO remove this patch when the behavior is implemented in moto
+ @patch(apigateway_models.APIGatewayBackend.create_rest_api)
+ def create_rest_api(fn, self, *args, tags=None, **kwargs):
+ """
+ https://github.com/localstack/localstack/pull/4413/files
+ Add ability to specify custom IDs for API GW REST APIs via tags
+ """
+ tags = tags or {}
+ result = fn(self, *args, tags=tags, **kwargs)
+ # TODO: lower the custom_id when getting it from the tags, as AWS is case insensitive
+ if custom_id := tags.get(TAG_KEY_CUSTOM_ID):
+ self.apis.pop(result.id)
+ result.id = custom_id
+ self.apis[custom_id] = result
+ return result
+
+ @patch(apigateway_models.APIGatewayBackend.get_rest_api, pass_target=False)
+ def get_rest_api(self, function_id):
+ for key in self.apis.keys():
+ if key.lower() == function_id.lower():
+ return self.apis[key]
+ raise RestAPINotFound()
+
+ @patch(apigateway_models.RestAPI.delete_deployment, pass_target=False)
+ def patch_delete_deployment(self, deployment_id: str) -> apigateway_models.Deployment:
+ if deployment_id not in self.deployments:
+ raise DeploymentNotFoundException()
+ deployment = self.deployments[deployment_id]
+ if deployment.stage_name and (
+ (stage := self.stages.get(deployment.stage_name))
+ and stage.deployment_id == deployment.id
+ ):
+ # Stage is still active
+ raise StageStillActive()
+
+ return self.deployments.pop(deployment_id)
diff --git a/localstack/utils/kinesis/__init__.py b/localstack-core/localstack/services/apigateway/resource_providers/__init__.py
similarity index 100%
rename from localstack/utils/kinesis/__init__.py
rename to localstack-core/localstack/services/apigateway/resource_providers/__init__.py
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account.py
new file mode 100644
index 0000000000000..8c78925a5a8b8
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account.py
@@ -0,0 +1,110 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+
+
+class ApiGatewayAccountProperties(TypedDict):
+ CloudWatchRoleArn: Optional[str]
+ Id: Optional[str]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayAccountProvider(ResourceProvider[ApiGatewayAccountProperties]):
+ TYPE = "AWS::ApiGateway::Account" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayAccountProperties],
+ ) -> ProgressEvent[ApiGatewayAccountProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/Id
+
+
+
+
+
+ Read-only properties:
+ - /properties/Id
+
+ IAM permissions required:
+ - apigateway:PATCH
+ - iam:GetRole
+ - iam:PassRole
+
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ role_arn = model["CloudWatchRoleArn"]
+ apigw.update_account(
+ patchOperations=[{"op": "replace", "path": "/cloudwatchRoleArn", "value": role_arn}]
+ )
+
+ model["Id"] = util.generate_default_name(
+ stack_name=request.stack_name, logical_resource_id=request.logical_resource_id
+ )
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayAccountProperties],
+ ) -> ProgressEvent[ApiGatewayAccountProperties]:
+ """
+ Fetch resource information
+
+ IAM permissions required:
+ - apigateway:GET
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayAccountProperties],
+ ) -> ProgressEvent[ApiGatewayAccountProperties]:
+ """
+ Delete a resource
+
+
+ """
+ model = request.desired_state
+
+ # note: deletion of accounts is currently a no-op
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayAccountProperties],
+ ) -> ProgressEvent[ApiGatewayAccountProperties]:
+ """
+ Update a resource
+
+ IAM permissions required:
+ - apigateway:PATCH
+ - iam:GetRole
+ - iam:PassRole
+ """
+ raise NotImplementedError
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account.schema.json
new file mode 100644
index 0000000000000..3192ca8c3b443
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account.schema.json
@@ -0,0 +1,46 @@
+{
+ "typeName": "AWS::ApiGateway::Account",
+ "description": "Resource Type definition for AWS::ApiGateway::Account",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway",
+ "additionalProperties": false,
+ "properties": {
+ "Id": {
+ "description": "Primary identifier which is manually generated.",
+ "type": "string"
+ },
+ "CloudWatchRoleArn": {
+ "description": "The Amazon Resource Name (ARN) of an IAM role that has write access to CloudWatch Logs in your account.",
+ "type": "string"
+ }
+ },
+ "primaryIdentifier": [
+ "/properties/Id"
+ ],
+ "readOnlyProperties": [
+ "/properties/Id"
+ ],
+ "handlers": {
+ "create": {
+ "permissions": [
+ "apigateway:PATCH",
+ "iam:GetRole",
+ "iam:PassRole"
+ ]
+ },
+ "read": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ },
+ "update": {
+ "permissions": [
+ "apigateway:PATCH",
+ "iam:GetRole",
+ "iam:PassRole"
+ ]
+ },
+ "delete": {
+ "permissions": []
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account_plugin.py
new file mode 100644
index 0000000000000..d7dc5c91ce0d1
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayAccountProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::Account"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_account import (
+ ApiGatewayAccountProvider,
+ )
+
+ self.factory = ApiGatewayAccountProvider
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey.py
new file mode 100644
index 0000000000000..1385cd6c5d01c
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey.py
@@ -0,0 +1,136 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+from localstack.utils.objects import keys_to_lower
+
+
+class ApiGatewayApiKeyProperties(TypedDict):
+ APIKeyId: Optional[str]
+ CustomerId: Optional[str]
+ Description: Optional[str]
+ Enabled: Optional[bool]
+ GenerateDistinctId: Optional[bool]
+ Name: Optional[str]
+ StageKeys: Optional[list[StageKey]]
+ Tags: Optional[list[Tag]]
+ Value: Optional[str]
+
+
+class StageKey(TypedDict):
+ RestApiId: Optional[str]
+ StageName: Optional[str]
+
+
+class Tag(TypedDict):
+ Key: Optional[str]
+ Value: Optional[str]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayApiKeyProvider(ResourceProvider[ApiGatewayApiKeyProperties]):
+ TYPE = "AWS::ApiGateway::ApiKey" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayApiKeyProperties],
+ ) -> ProgressEvent[ApiGatewayApiKeyProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/APIKeyId
+
+
+ Create-only properties:
+ - /properties/GenerateDistinctId
+ - /properties/Name
+ - /properties/Value
+
+ Read-only properties:
+ - /properties/APIKeyId
+
+ IAM permissions required:
+ - apigateway:POST
+ - apigateway:GET
+
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ params = util.select_attributes(
+ model, ["Description", "CustomerId", "Name", "Value", "Enabled", "StageKeys"]
+ )
+ params = keys_to_lower(params.copy())
+ if "enabled" in params:
+ params["enabled"] = bool(params["enabled"])
+
+ if model.get("Tags"):
+ params["tags"] = {tag["Key"]: tag["Value"] for tag in model["Tags"]}
+
+ response = apigw.create_api_key(**params)
+ model["APIKeyId"] = response["id"]
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayApiKeyProperties],
+ ) -> ProgressEvent[ApiGatewayApiKeyProperties]:
+ """
+ Fetch resource information
+
+ IAM permissions required:
+ - apigateway:GET
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayApiKeyProperties],
+ ) -> ProgressEvent[ApiGatewayApiKeyProperties]:
+ """
+ Delete a resource
+
+ IAM permissions required:
+ - apigateway:DELETE
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ apigw.delete_api_key(apiKey=model["APIKeyId"])
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayApiKeyProperties],
+ ) -> ProgressEvent[ApiGatewayApiKeyProperties]:
+ """
+ Update a resource
+
+ IAM permissions required:
+ - apigateway:GET
+ - apigateway:PATCH
+ - apigateway:PUT
+ - apigateway:DELETE
+ """
+ raise NotImplementedError
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey.schema.json
new file mode 100644
index 0000000000000..4d58557451ff8
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey.schema.json
@@ -0,0 +1,135 @@
+{
+ "typeName": "AWS::ApiGateway::ApiKey",
+ "description": "Resource Type definition for AWS::ApiGateway::ApiKey",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway",
+ "additionalProperties": false,
+ "properties": {
+ "APIKeyId": {
+ "description": "A Unique Key ID which identifies the API Key. Generated by the Create API and returned by the Read and List APIs ",
+ "type": "string"
+ },
+ "CustomerId": {
+ "description": "An AWS Marketplace customer identifier to use when integrating with the AWS SaaS Marketplace.",
+ "type": "string"
+ },
+ "Description": {
+ "description": "A description of the purpose of the API key.",
+ "type": "string"
+ },
+ "Enabled": {
+ "description": "Indicates whether the API key can be used by clients.",
+ "default": false,
+ "type": "boolean"
+ },
+ "GenerateDistinctId": {
+ "description": "Specifies whether the key identifier is distinct from the created API key value. This parameter is deprecated and should not be used.",
+ "type": "boolean"
+ },
+ "Name": {
+ "description": "A name for the API key. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the API key name.",
+ "type": "string"
+ },
+ "StageKeys": {
+ "description": "A list of stages to associate with this API key.",
+ "type": "array",
+ "uniqueItems": true,
+ "items": {
+ "$ref": "#/definitions/StageKey"
+ }
+ },
+ "Tags": {
+ "description": "An array of arbitrary tags (key-value pairs) to associate with the API key.",
+ "type": "array",
+ "uniqueItems": false,
+ "items": {
+ "$ref": "#/definitions/Tag"
+ }
+ },
+ "Value": {
+ "description": "The value of the API key. Must be at least 20 characters long.",
+ "type": "string"
+ }
+ },
+ "definitions": {
+ "StageKey": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "RestApiId": {
+ "description": "The ID of a RestApi resource that includes the stage with which you want to associate the API key.",
+ "type": "string"
+ },
+ "StageName": {
+ "description": "The name of the stage with which to associate the API key. The stage must be included in the RestApi resource that you specified in the RestApiId property. ",
+ "type": "string"
+ }
+ }
+ },
+ "Tag": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "Key": {
+ "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.",
+ "type": "string",
+ "minLength": 1,
+ "maxLength": 128
+ },
+ "Value": {
+ "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. ",
+ "type": "string",
+ "maxLength": 256
+ }
+ },
+ "required": [
+ "Value",
+ "Key"
+ ]
+ }
+ },
+ "createOnlyProperties": [
+ "/properties/GenerateDistinctId",
+ "/properties/Name",
+ "/properties/Value"
+ ],
+ "writeOnlyProperties": [
+ "/properties/GenerateDistinctId"
+ ],
+ "primaryIdentifier": [
+ "/properties/APIKeyId"
+ ],
+ "readOnlyProperties": [
+ "/properties/APIKeyId"
+ ],
+ "handlers": {
+ "create": {
+ "permissions": [
+ "apigateway:POST",
+ "apigateway:GET"
+ ]
+ },
+ "read": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ },
+ "update": {
+ "permissions": [
+ "apigateway:GET",
+ "apigateway:PATCH",
+ "apigateway:PUT",
+ "apigateway:DELETE"
+ ]
+ },
+ "delete": {
+ "permissions": [
+ "apigateway:DELETE"
+ ]
+ },
+ "list": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey_plugin.py
new file mode 100644
index 0000000000000..352ec19eec4d3
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayApiKeyProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::ApiKey"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_apikey import (
+ ApiGatewayApiKeyProvider,
+ )
+
+ self.factory = ApiGatewayApiKeyProvider
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping.py
new file mode 100644
index 0000000000000..51debd7811631
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping.py
@@ -0,0 +1,122 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+
+
+class ApiGatewayBasePathMappingProperties(TypedDict):
+ DomainName: Optional[str]
+ BasePath: Optional[str]
+ RestApiId: Optional[str]
+ Stage: Optional[str]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayBasePathMappingProvider(ResourceProvider[ApiGatewayBasePathMappingProperties]):
+ TYPE = "AWS::ApiGateway::BasePathMapping" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayBasePathMappingProperties],
+ ) -> ProgressEvent[ApiGatewayBasePathMappingProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/DomainName
+ - /properties/BasePath
+
+ Required properties:
+ - DomainName
+
+ Create-only properties:
+ - /properties/DomainName
+ - /properties/BasePath
+
+
+
+ IAM permissions required:
+ - apigateway:POST
+ - apigateway:GET
+
+ """
+
+ # TODO we are using restApiId for PhysicalResourceId
+ # check if we need to change it
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ params = {
+ "domainName": model.get("DomainName"),
+ "restApiId": model.get("RestApiId"),
+ **({"basePath": model.get("BasePath")} if model.get("BasePath") else {}),
+ **({"stage": model.get("Stage")} if model.get("Stage") else {}),
+ }
+ response = apigw.create_base_path_mapping(**params)
+ model["RestApiId"] = response["restApiId"]
+ # TODO: validations
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayBasePathMappingProperties],
+ ) -> ProgressEvent[ApiGatewayBasePathMappingProperties]:
+ """
+ Fetch resource information
+
+ IAM permissions required:
+ - apigateway:GET
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayBasePathMappingProperties],
+ ) -> ProgressEvent[ApiGatewayBasePathMappingProperties]:
+ """
+ Delete a resource
+
+ IAM permissions required:
+ - apigateway:DELETE
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ apigw.delete_base_path_mapping(domainName=model["DomainName"], basePath=model["BasePath"])
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayBasePathMappingProperties],
+ ) -> ProgressEvent[ApiGatewayBasePathMappingProperties]:
+ """
+ Update a resource
+
+ IAM permissions required:
+ - apigateway:GET
+ - apigateway:DELETE
+ - apigateway:PATCH
+ """
+ raise NotImplementedError
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping.schema.json
new file mode 100644
index 0000000000000..ded5541adedac
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping.schema.json
@@ -0,0 +1,81 @@
+{
+ "typeName": "AWS::ApiGateway::BasePathMapping",
+ "description": "Resource Type definition for AWS::ApiGateway::BasePathMapping",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway",
+ "additionalProperties": false,
+ "properties": {
+ "BasePath": {
+ "type": "string",
+ "description": "The base path name that callers of the API must provide in the URL after the domain name."
+ },
+ "DomainName": {
+ "type": "string",
+ "description": "The DomainName of an AWS::ApiGateway::DomainName resource."
+ },
+ "RestApiId": {
+ "type": "string",
+ "description": "The ID of the API."
+ },
+ "Stage": {
+ "type": "string",
+ "description": "The name of the API's stage."
+ }
+ },
+ "required": [
+ "DomainName"
+ ],
+ "createOnlyProperties": [
+ "/properties/DomainName",
+ "/properties/BasePath"
+ ],
+ "primaryIdentifier": [
+ "/properties/DomainName",
+ "/properties/BasePath"
+ ],
+ "tagging": {
+ "taggable": false,
+ "tagOnCreate": false,
+ "tagUpdatable": false,
+ "cloudFormationSystemTags": false
+ },
+ "handlers": {
+ "create": {
+ "permissions": [
+ "apigateway:POST",
+ "apigateway:GET"
+ ]
+ },
+ "read": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ },
+ "update": {
+ "permissions": [
+ "apigateway:GET",
+ "apigateway:DELETE",
+ "apigateway:PATCH"
+ ]
+ },
+ "delete": {
+ "permissions": [
+ "apigateway:DELETE"
+ ]
+ },
+ "list": {
+ "handlerSchema": {
+ "properties": {
+ "DomainName": {
+ "$ref": "resource-schema.json#/properties/DomainName"
+ }
+ },
+ "required": [
+ "DomainName"
+ ]
+ },
+ "permissions": [
+ "apigateway:GET"
+ ]
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping_plugin.py
new file mode 100644
index 0000000000000..2dcb4b036e9ef
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayBasePathMappingProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::BasePathMapping"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_basepathmapping import (
+ ApiGatewayBasePathMappingProvider,
+ )
+
+ self.factory = ApiGatewayBasePathMappingProvider
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment.py
new file mode 100644
index 0000000000000..68bae12d2af24
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment.py
@@ -0,0 +1,196 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+import json
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+
+
+class ApiGatewayDeploymentProperties(TypedDict):
+ RestApiId: Optional[str]
+ DeploymentCanarySettings: Optional[DeploymentCanarySettings]
+ DeploymentId: Optional[str]
+ Description: Optional[str]
+ StageDescription: Optional[StageDescription]
+ StageName: Optional[str]
+
+
+class DeploymentCanarySettings(TypedDict):
+ PercentTraffic: Optional[float]
+ StageVariableOverrides: Optional[dict]
+ UseStageCache: Optional[bool]
+
+
+class AccessLogSetting(TypedDict):
+ DestinationArn: Optional[str]
+ Format: Optional[str]
+
+
+class CanarySetting(TypedDict):
+ PercentTraffic: Optional[float]
+ StageVariableOverrides: Optional[dict]
+ UseStageCache: Optional[bool]
+
+
+class MethodSetting(TypedDict):
+ CacheDataEncrypted: Optional[bool]
+ CacheTtlInSeconds: Optional[int]
+ CachingEnabled: Optional[bool]
+ DataTraceEnabled: Optional[bool]
+ HttpMethod: Optional[str]
+ LoggingLevel: Optional[str]
+ MetricsEnabled: Optional[bool]
+ ResourcePath: Optional[str]
+ ThrottlingBurstLimit: Optional[int]
+ ThrottlingRateLimit: Optional[float]
+
+
+class Tag(TypedDict):
+ Key: Optional[str]
+ Value: Optional[str]
+
+
+class StageDescription(TypedDict):
+ AccessLogSetting: Optional[AccessLogSetting]
+ CacheClusterEnabled: Optional[bool]
+ CacheClusterSize: Optional[str]
+ CacheDataEncrypted: Optional[bool]
+ CacheTtlInSeconds: Optional[int]
+ CachingEnabled: Optional[bool]
+ CanarySetting: Optional[CanarySetting]
+ ClientCertificateId: Optional[str]
+ DataTraceEnabled: Optional[bool]
+ Description: Optional[str]
+ DocumentationVersion: Optional[str]
+ LoggingLevel: Optional[str]
+ MethodSettings: Optional[list[MethodSetting]]
+ MetricsEnabled: Optional[bool]
+ Tags: Optional[list[Tag]]
+ ThrottlingBurstLimit: Optional[int]
+ ThrottlingRateLimit: Optional[float]
+ TracingEnabled: Optional[bool]
+ Variables: Optional[dict]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayDeploymentProvider(ResourceProvider[ApiGatewayDeploymentProperties]):
+ TYPE = "AWS::ApiGateway::Deployment" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayDeploymentProperties],
+ ) -> ProgressEvent[ApiGatewayDeploymentProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/DeploymentId
+ - /properties/RestApiId
+
+ Required properties:
+ - RestApiId
+
+ Create-only properties:
+ - /properties/DeploymentCanarySettings
+ - /properties/RestApiId
+
+ Read-only properties:
+ - /properties/DeploymentId
+
+ IAM permissions required:
+ - apigateway:POST
+
+ """
+ model = request.desired_state
+ api = request.aws_client_factory.apigateway
+
+ params = {"restApiId": model["RestApiId"]}
+
+ if model.get("StageName"):
+ params["stageName"] = model["StageName"]
+
+ if model.get("StageDescription"):
+ params["stageDescription"] = json.dumps(model["StageDescription"])
+
+ if model.get("Description"):
+ params["description"] = model["Description"]
+
+ response = api.create_deployment(**params)
+
+ model["DeploymentId"] = response["id"]
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayDeploymentProperties],
+ ) -> ProgressEvent[ApiGatewayDeploymentProperties]:
+ """
+ Fetch resource information
+
+ IAM permissions required:
+ - apigateway:GET
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayDeploymentProperties],
+ ) -> ProgressEvent[ApiGatewayDeploymentProperties]:
+ """
+ Delete a resource
+
+ IAM permissions required:
+ - apigateway:GET
+ - apigateway:DELETE
+ """
+ model = request.desired_state
+ api = request.aws_client_factory.apigateway
+
+ try:
+ # TODO: verify if AWS behaves the same?
+ get_stages = api.get_stages(
+ restApiId=model["RestApiId"], deploymentId=model["DeploymentId"]
+ )
+ if stages := get_stages["item"]:
+ for stage in stages:
+ api.delete_stage(restApiId=model["RestApiId"], stageName=stage["stageName"])
+
+ api.delete_deployment(restApiId=model["RestApiId"], deploymentId=model["DeploymentId"])
+ except api.exceptions.NotFoundException:
+ pass
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayDeploymentProperties],
+ ) -> ProgressEvent[ApiGatewayDeploymentProperties]:
+ """
+ Update a resource
+
+ IAM permissions required:
+ - apigateway:PATCH
+ - apigateway:GET
+ - apigateway:PUT
+ """
+ raise NotImplementedError
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment.schema.json
new file mode 100644
index 0000000000000..ab10bbf5e2a7a
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment.schema.json
@@ -0,0 +1,318 @@
+{
+ "typeName": "AWS::ApiGateway::Deployment",
+ "description": "Resource Type definition for AWS::ApiGateway::Deployment",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway",
+ "additionalProperties": false,
+ "properties": {
+ "DeploymentId": {
+ "type": "string",
+ "description": "Primary Id for this resource"
+ },
+ "DeploymentCanarySettings": {
+ "$ref": "#/definitions/DeploymentCanarySettings",
+ "description": "Specifies settings for the canary deployment."
+ },
+ "Description": {
+ "type": "string",
+ "description": "A description of the purpose of the API Gateway deployment."
+ },
+ "RestApiId": {
+ "type": "string",
+ "description": "The ID of the RestApi resource to deploy. "
+ },
+ "StageDescription": {
+ "$ref": "#/definitions/StageDescription",
+ "description": "Configures the stage that API Gateway creates with this deployment."
+ },
+ "StageName": {
+ "type": "string",
+ "description": "A name for the stage that API Gateway creates with this deployment. Use only alphanumeric characters."
+ }
+ },
+ "definitions": {
+ "StageDescription": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "AccessLogSetting": {
+ "description": "Specifies settings for logging access in this stage.",
+ "$ref": "#/definitions/AccessLogSetting"
+ },
+ "CacheClusterEnabled": {
+ "description": "Indicates whether cache clustering is enabled for the stage.",
+ "type": "boolean"
+ },
+ "CacheClusterSize": {
+ "description": "The size of the stage's cache cluster.",
+ "type": "string"
+ },
+ "CacheDataEncrypted": {
+ "description": "The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. ",
+ "type": "boolean"
+ },
+ "CacheTtlInSeconds": {
+ "description": "The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. ",
+ "type": "integer"
+ },
+ "CachingEnabled": {
+ "description": "Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.",
+ "type": "boolean"
+ },
+ "CanarySetting": {
+ "description": "Specifies settings for the canary deployment in this stage.",
+ "$ref": "#/definitions/CanarySetting"
+ },
+ "ClientCertificateId": {
+ "description": "The identifier of the client certificate that API Gateway uses to call your integration endpoints in the stage. ",
+ "type": "string"
+ },
+ "DataTraceEnabled": {
+ "description": "Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs. ",
+ "type": "boolean"
+ },
+ "Description": {
+ "description": "A description of the purpose of the stage.",
+ "type": "string"
+ },
+ "DocumentationVersion": {
+ "description": "The version identifier of the API documentation snapshot.",
+ "type": "string"
+ },
+ "LoggingLevel": {
+ "description": "The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference. ",
+ "type": "string"
+ },
+ "MethodSettings": {
+ "description": "Configures settings for all of the stage's methods.",
+ "type": "array",
+ "uniqueItems": true,
+ "insertionOrder": false,
+ "items": {
+ "$ref": "#/definitions/MethodSetting"
+ }
+ },
+ "MetricsEnabled": {
+ "description": "Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.",
+ "type": "boolean"
+ },
+ "Tags": {
+ "description": "An array of arbitrary tags (key-value pairs) to associate with the stage.",
+ "type": "array",
+ "uniqueItems": false,
+ "insertionOrder": false,
+ "items": {
+ "$ref": "#/definitions/Tag"
+ }
+ },
+ "ThrottlingBurstLimit": {
+ "description": "The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.",
+ "type": "integer"
+ },
+ "ThrottlingRateLimit": {
+ "description": "The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.",
+ "type": "number"
+ },
+ "TracingEnabled": {
+ "description": "Specifies whether active tracing with X-ray is enabled for this stage.",
+ "type": "boolean"
+ },
+ "Variables": {
+ "description": "A map that defines the stage variables. Variable names must consist of alphanumeric characters, and the values must match the following regular expression: [A-Za-z0-9-._~:/?#&=,]+. ",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "DeploymentCanarySettings": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "PercentTraffic": {
+ "description": "The percentage (0-100) of traffic diverted to a canary deployment.",
+ "type": "number"
+ },
+ "StageVariableOverrides": {
+ "description": "Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. Duplicates are not allowed.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ },
+ "UseStageCache": {
+ "description": "Whether the canary deployment uses the stage cache.",
+ "type": "boolean"
+ }
+ }
+ },
+ "AccessLogSetting": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "DestinationArn": {
+ "description": "The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. ",
+ "type": "string"
+ },
+ "Format": {
+ "description": "A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId. ",
+ "type": "string"
+ }
+ }
+ },
+ "CanarySetting": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "PercentTraffic": {
+ "description": "The percent (0-100) of traffic diverted to a canary deployment.",
+ "type": "number"
+ },
+ "StageVariableOverrides": {
+ "description": "Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. ",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ },
+ "UseStageCache": {
+ "description": "Whether the canary deployment uses the stage cache or not.",
+ "type": "boolean"
+ }
+ }
+ },
+ "Tag": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "Key": {
+ "description": "The key name of the tag",
+ "type": "string"
+ },
+ "Value": {
+ "description": "The value for the tag",
+ "type": "string"
+ }
+ },
+ "required": [
+ "Value",
+ "Key"
+ ]
+ },
+ "MethodSetting": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "CacheDataEncrypted": {
+ "description": "Indicates whether the cached responses are encrypted",
+ "type": "boolean"
+ },
+ "CacheTtlInSeconds": {
+ "description": "The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. ",
+ "type": "integer"
+ },
+ "CachingEnabled": {
+ "description": "Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.",
+ "type": "boolean"
+ },
+ "DataTraceEnabled": {
+ "description": "Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs. ",
+ "type": "boolean"
+ },
+ "HttpMethod": {
+ "description": "The HTTP method.",
+ "type": "string"
+ },
+ "LoggingLevel": {
+ "description": "The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference. ",
+ "type": "string"
+ },
+ "MetricsEnabled": {
+ "description": "Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.",
+ "type": "boolean"
+ },
+ "ResourcePath": {
+ "description": "The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. ",
+ "type": "string"
+ },
+ "ThrottlingBurstLimit": {
+ "description": "The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.",
+ "type": "integer"
+ },
+ "ThrottlingRateLimit": {
+ "description": "The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.",
+ "type": "number"
+ }
+ }
+ }
+ },
+ "taggable": true,
+ "required": [
+ "RestApiId"
+ ],
+ "createOnlyProperties": [
+ "/properties/DeploymentCanarySettings",
+ "/properties/RestApiId"
+ ],
+ "primaryIdentifier": [
+ "/properties/DeploymentId",
+ "/properties/RestApiId"
+ ],
+ "readOnlyProperties": [
+ "/properties/DeploymentId"
+ ],
+ "writeOnlyProperties": [
+ "/properties/StageName",
+ "/properties/StageDescription",
+ "/properties/DeploymentCanarySettings"
+ ],
+ "handlers": {
+ "create": {
+ "permissions": [
+ "apigateway:POST"
+ ]
+ },
+ "read": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ },
+ "update": {
+ "permissions": [
+ "apigateway:PATCH",
+ "apigateway:GET",
+ "apigateway:PUT"
+ ]
+ },
+ "delete": {
+ "permissions": [
+ "apigateway:GET",
+ "apigateway:DELETE"
+ ]
+ },
+ "list": {
+ "handlerSchema": {
+ "properties": {
+ "RestApiId": {
+ "$ref": "resource-schema.json#/properties/RestApiId"
+ }
+ },
+ "required": [
+ "RestApiId"
+ ]
+ },
+ "permissions": [
+ "apigateway:GET"
+ ]
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment_plugin.py
new file mode 100644
index 0000000000000..80ff9801a1ed5
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayDeploymentProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::Deployment"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_deployment import (
+ ApiGatewayDeploymentProvider,
+ )
+
+ self.factory = ApiGatewayDeploymentProvider
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname.py
new file mode 100644
index 0000000000000..778ec9da3cbf8
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname.py
@@ -0,0 +1,164 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+from localstack.utils.objects import keys_to_lower
+
+
+class ApiGatewayDomainNameProperties(TypedDict):
+ CertificateArn: Optional[str]
+ DistributionDomainName: Optional[str]
+ DistributionHostedZoneId: Optional[str]
+ DomainName: Optional[str]
+ EndpointConfiguration: Optional[EndpointConfiguration]
+ MutualTlsAuthentication: Optional[MutualTlsAuthentication]
+ OwnershipVerificationCertificateArn: Optional[str]
+ RegionalCertificateArn: Optional[str]
+ RegionalDomainName: Optional[str]
+ RegionalHostedZoneId: Optional[str]
+ SecurityPolicy: Optional[str]
+ Tags: Optional[list[Tag]]
+
+
+class EndpointConfiguration(TypedDict):
+ Types: Optional[list[str]]
+
+
+class MutualTlsAuthentication(TypedDict):
+ TruststoreUri: Optional[str]
+ TruststoreVersion: Optional[str]
+
+
+class Tag(TypedDict):
+ Key: Optional[str]
+ Value: Optional[str]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayDomainNameProvider(ResourceProvider[ApiGatewayDomainNameProperties]):
+ TYPE = "AWS::ApiGateway::DomainName" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayDomainNameProperties],
+ ) -> ProgressEvent[ApiGatewayDomainNameProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/DomainName
+
+ Create-only properties:
+ - /properties/DomainName
+
+ Read-only properties:
+ - /properties/RegionalHostedZoneId
+ - /properties/DistributionDomainName
+ - /properties/RegionalDomainName
+ - /properties/DistributionHostedZoneId
+
+ IAM permissions required:
+ - apigateway:*
+
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ params = keys_to_lower(model.copy())
+ param_names = [
+ "certificateArn",
+ "domainName",
+ "endpointConfiguration",
+ "mutualTlsAuthentication",
+ "ownershipVerificationCertificateArn",
+ "regionalCertificateArn",
+ "securityPolicy",
+ ]
+ params = util.select_attributes(params, param_names)
+ if model.get("Tags"):
+ params["tags"] = {tag["key"]: tag["value"] for tag in model["Tags"]}
+
+ result = apigw.create_domain_name(**params)
+
+ hosted_zones = request.aws_client_factory.route53.list_hosted_zones()
+ """
+ The hardcoded value is the only one that should be returned but due limitations it is not possible to
+ use it.
+ """
+ if hosted_zones["HostedZones"]:
+ model["DistributionHostedZoneId"] = hosted_zones["HostedZones"][0]["Id"]
+ else:
+ model["DistributionHostedZoneId"] = "Z2FDTNDATAQYW2"
+
+ model["DistributionDomainName"] = result.get("distributionDomainName") or result.get(
+ "domainName"
+ )
+ model["RegionalDomainName"] = (
+ result.get("regionalDomainName") or model["DistributionDomainName"]
+ )
+ model["RegionalHostedZoneId"] = (
+ result.get("regionalHostedZoneId") or model["DistributionHostedZoneId"]
+ )
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayDomainNameProperties],
+ ) -> ProgressEvent[ApiGatewayDomainNameProperties]:
+ """
+ Fetch resource information
+
+ IAM permissions required:
+ - apigateway:*
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayDomainNameProperties],
+ ) -> ProgressEvent[ApiGatewayDomainNameProperties]:
+ """
+ Delete a resource
+
+ IAM permissions required:
+ - apigateway:*
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ apigw.delete_domain_name(domainName=model["DomainName"])
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayDomainNameProperties],
+ ) -> ProgressEvent[ApiGatewayDomainNameProperties]:
+ """
+ Update a resource
+
+ IAM permissions required:
+ - apigateway:*
+ """
+ raise NotImplementedError
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname.schema.json
new file mode 100644
index 0000000000000..c0b50b24f2c33
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname.schema.json
@@ -0,0 +1,124 @@
+{
+ "typeName": "AWS::ApiGateway::DomainName",
+ "description": "Resource Type definition for AWS::ApiGateway::DomainName.",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git",
+ "definitions": {
+ "EndpointConfiguration": {
+ "type": "object",
+ "properties": {
+ "Types": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "MutualTlsAuthentication": {
+ "type": "object",
+ "properties": {
+ "TruststoreUri": {
+ "type": "string"
+ },
+ "TruststoreVersion": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ "Tag": {
+ "type": "object",
+ "properties": {
+ "Key": {
+ "type": "string"
+ },
+ "Value": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "properties": {
+ "DomainName": {
+ "type": "string"
+ },
+ "DistributionDomainName": {
+ "type": "string"
+ },
+ "DistributionHostedZoneId": {
+ "type": "string"
+ },
+ "EndpointConfiguration": {
+ "$ref": "#/definitions/EndpointConfiguration"
+ },
+ "MutualTlsAuthentication": {
+ "$ref": "#/definitions/MutualTlsAuthentication"
+ },
+ "RegionalDomainName": {
+ "type": "string"
+ },
+ "RegionalHostedZoneId": {
+ "type": "string"
+ },
+ "CertificateArn": {
+ "type": "string"
+ },
+ "RegionalCertificateArn": {
+ "type": "string"
+ },
+ "OwnershipVerificationCertificateArn": {
+ "type": "string"
+ },
+ "SecurityPolicy": {
+ "type": "string"
+ },
+ "Tags": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/Tag"
+ }
+ }
+ },
+ "additionalProperties": false,
+ "primaryIdentifier": [
+ "/properties/DomainName"
+ ],
+ "createOnlyProperties": [
+ "/properties/DomainName"
+ ],
+ "readOnlyProperties": [
+ "/properties/RegionalHostedZoneId",
+ "/properties/DistributionDomainName",
+ "/properties/RegionalDomainName",
+ "/properties/DistributionHostedZoneId"
+ ],
+ "handlers": {
+ "create": {
+ "permissions": [
+ "apigateway:*"
+ ]
+ },
+ "read": {
+ "permissions": [
+ "apigateway:*"
+ ]
+ },
+ "update": {
+ "permissions": [
+ "apigateway:*"
+ ]
+ },
+ "delete": {
+ "permissions": [
+ "apigateway:*"
+ ]
+ },
+ "list": {
+ "permissions": [
+ "apigateway:*"
+ ]
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname_plugin.py
new file mode 100644
index 0000000000000..49e6db22f12d8
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayDomainNameProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::DomainName"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_domainname import (
+ ApiGatewayDomainNameProvider,
+ )
+
+ self.factory = ApiGatewayDomainNameProvider
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse.py
new file mode 100644
index 0000000000000..bb52d43256e7b
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse.py
@@ -0,0 +1,122 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+from localstack.utils.objects import keys_to_lower
+
+
+class ApiGatewayGatewayResponseProperties(TypedDict):
+ ResponseType: Optional[str]
+ RestApiId: Optional[str]
+ Id: Optional[str]
+ ResponseParameters: Optional[dict]
+ ResponseTemplates: Optional[dict]
+ StatusCode: Optional[str]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayGatewayResponseProvider(ResourceProvider[ApiGatewayGatewayResponseProperties]):
+ TYPE = "AWS::ApiGateway::GatewayResponse" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayGatewayResponseProperties],
+ ) -> ProgressEvent[ApiGatewayGatewayResponseProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/Id
+
+ Required properties:
+ - ResponseType
+ - RestApiId
+
+ Create-only properties:
+ - /properties/ResponseType
+ - /properties/RestApiId
+
+ Read-only properties:
+ - /properties/Id
+
+ IAM permissions required:
+ - apigateway:PUT
+ - apigateway:GET
+
+ """
+ model = request.desired_state
+ api = request.aws_client_factory.apigateway
+ # TODO: validations
+ model["Id"] = util.generate_default_name_without_stack(request.logical_resource_id)
+
+ params = util.select_attributes(
+ model,
+ ["RestApiId", "ResponseType", "StatusCode", "ResponseParameters", "ResponseTemplates"],
+ )
+ params = keys_to_lower(params.copy())
+
+ api.put_gateway_response(**params)
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayGatewayResponseProperties],
+ ) -> ProgressEvent[ApiGatewayGatewayResponseProperties]:
+ """
+ Fetch resource information
+
+
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayGatewayResponseProperties],
+ ) -> ProgressEvent[ApiGatewayGatewayResponseProperties]:
+ """
+ Delete a resource
+
+ IAM permissions required:
+ - apigateway:GET
+ - apigateway:DELETE
+ """
+ model = request.desired_state
+ api = request.aws_client_factory.apigateway
+
+ api.delete_gateway_response(
+ restApiId=model["RestApiId"], responseType=model["ResponseType"]
+ )
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayGatewayResponseProperties],
+ ) -> ProgressEvent[ApiGatewayGatewayResponseProperties]:
+ """
+ Update a resource
+
+ IAM permissions required:
+ - apigateway:GET
+ - apigateway:PUT
+ """
+ raise NotImplementedError
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse.schema.json
new file mode 100644
index 0000000000000..063b2c6c91ca4
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse.schema.json
@@ -0,0 +1,84 @@
+{
+ "typeName": "AWS::ApiGateway::GatewayResponse",
+ "description": "Resource Type definition for AWS::ApiGateway::GatewayResponse",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git",
+ "additionalProperties": false,
+ "properties": {
+ "Id": {
+ "description": "A Cloudformation auto generated ID.",
+ "type": "string"
+ },
+ "RestApiId": {
+ "description": "The identifier of the API.",
+ "type": "string"
+ },
+ "ResponseType": {
+ "description": "The type of the Gateway Response.",
+ "type": "string"
+ },
+ "StatusCode": {
+ "description": "The HTTP status code for the response.",
+ "type": "string"
+ },
+ "ResponseParameters": {
+ "description": "The response parameters (paths, query strings, and headers) for the response.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ },
+ "ResponseTemplates": {
+ "description": "The response templates for the response.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "required": [
+ "ResponseType",
+ "RestApiId"
+ ],
+ "createOnlyProperties": [
+ "/properties/ResponseType",
+ "/properties/RestApiId"
+ ],
+ "primaryIdentifier": [
+ "/properties/Id"
+ ],
+ "readOnlyProperties": [
+ "/properties/Id"
+ ],
+ "taggable": false,
+ "handlers": {
+ "create": {
+ "permissions": [
+ "apigateway:PUT",
+ "apigateway:GET"
+ ]
+ },
+ "update": {
+ "permissions": [
+ "apigateway:GET",
+ "apigateway:PUT"
+ ]
+ },
+ "delete": {
+ "permissions": [
+ "apigateway:GET",
+ "apigateway:DELETE"
+ ]
+ },
+ "list": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse_plugin.py
new file mode 100644
index 0000000000000..86f43d46cdd21
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayGatewayResponseProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::GatewayResponse"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_gatewayresponse import (
+ ApiGatewayGatewayResponseProvider,
+ )
+
+ self.factory = ApiGatewayGatewayResponseProvider
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method.py
new file mode 100644
index 0000000000000..64598a4463898
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method.py
@@ -0,0 +1,234 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+from copy import deepcopy
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+
+
+class ApiGatewayMethodProperties(TypedDict):
+ HttpMethod: Optional[str]
+ ResourceId: Optional[str]
+ RestApiId: Optional[str]
+ ApiKeyRequired: Optional[bool]
+ AuthorizationScopes: Optional[list[str]]
+ AuthorizationType: Optional[str]
+ AuthorizerId: Optional[str]
+ Integration: Optional[Integration]
+ MethodResponses: Optional[list[MethodResponse]]
+ OperationName: Optional[str]
+ RequestModels: Optional[dict]
+ RequestParameters: Optional[dict]
+ RequestValidatorId: Optional[str]
+
+
+class IntegrationResponse(TypedDict):
+ StatusCode: Optional[str]
+ ContentHandling: Optional[str]
+ ResponseParameters: Optional[dict]
+ ResponseTemplates: Optional[dict]
+ SelectionPattern: Optional[str]
+
+
+class Integration(TypedDict):
+ Type: Optional[str]
+ CacheKeyParameters: Optional[list[str]]
+ CacheNamespace: Optional[str]
+ ConnectionId: Optional[str]
+ ConnectionType: Optional[str]
+ ContentHandling: Optional[str]
+ Credentials: Optional[str]
+ IntegrationHttpMethod: Optional[str]
+ IntegrationResponses: Optional[list[IntegrationResponse]]
+ PassthroughBehavior: Optional[str]
+ RequestParameters: Optional[dict]
+ RequestTemplates: Optional[dict]
+ TimeoutInMillis: Optional[int]
+ Uri: Optional[str]
+
+
+class MethodResponse(TypedDict):
+ StatusCode: Optional[str]
+ ResponseModels: Optional[dict]
+ ResponseParameters: Optional[dict]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayMethodProvider(ResourceProvider[ApiGatewayMethodProperties]):
+ TYPE = "AWS::ApiGateway::Method" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayMethodProperties],
+ ) -> ProgressEvent[ApiGatewayMethodProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/RestApiId
+ - /properties/ResourceId
+ - /properties/HttpMethod
+
+ Required properties:
+ - RestApiId
+ - ResourceId
+ - HttpMethod
+
+ Create-only properties:
+ - /properties/RestApiId
+ - /properties/ResourceId
+ - /properties/HttpMethod
+
+
+
+ IAM permissions required:
+ - apigateway:PUT
+ - apigateway:GET
+
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+ operation_model = apigw.meta.service_model.operation_model
+
+ apigw.put_method(
+ **util.convert_request_kwargs(model, operation_model("PutMethod").input_shape)
+ )
+
+ # setting up integrations
+ integration = model.get("Integration")
+ if integration:
+ apigw.put_integration(
+ restApiId=model.get("RestApiId"),
+ resourceId=model.get("ResourceId"),
+ httpMethod=model.get("HttpMethod"),
+ **util.convert_request_kwargs(
+ integration, operation_model("PutIntegration").input_shape
+ ),
+ )
+
+ integration_responses = integration.pop("IntegrationResponses", [])
+ for integration_response in integration_responses:
+ apigw.put_integration_response(
+ restApiId=model.get("RestApiId"),
+ resourceId=model.get("ResourceId"),
+ httpMethod=model.get("HttpMethod"),
+ **util.convert_request_kwargs(
+ integration_response, operation_model("PutIntegrationResponse").input_shape
+ ),
+ )
+
+ responses = model.get("MethodResponses", [])
+ for response in responses:
+ apigw.put_method_response(
+ restApiId=model.get("RestApiId"),
+ resourceId=model.get("ResourceId"),
+ httpMethod=model.get("HttpMethod"),
+ **util.convert_request_kwargs(
+ response, operation_model("PutMethodResponse").input_shape
+ ),
+ )
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayMethodProperties],
+ ) -> ProgressEvent[ApiGatewayMethodProperties]:
+ """
+ Fetch resource information
+
+ IAM permissions required:
+ - apigateway:GET
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayMethodProperties],
+ ) -> ProgressEvent[ApiGatewayMethodProperties]:
+ """
+ Delete a resource
+
+ IAM permissions required:
+ - apigateway:DELETE
+ """
+
+ # FIXME we sometimes get warnings when calling this method, probably because
+ # restAPI or resource has been already deleted
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ try:
+ apigw.delete_method(
+ **util.convert_request_kwargs(
+ model, apigw.meta.service_model.operation_model("DeleteMethod").input_shape
+ )
+ )
+ except apigw.exceptions.NotFoundException:
+ pass
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayMethodProperties],
+ ) -> ProgressEvent[ApiGatewayMethodProperties]:
+ """
+ Update a resource
+
+ IAM permissions required:
+ - apigateway:GET
+ - apigateway:DELETE
+ - apigateway:PUT
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+ operation_model = apigw.meta.service_model.operation_model
+
+ must_params = util.select_attributes(
+ model,
+ [
+ "RestApiId",
+ "ResourceId",
+ "HttpMethod",
+ ],
+ )
+
+ if integration := deepcopy(model.get("Integration")):
+ integration.update(must_params)
+ apigw.put_integration(
+ **util.convert_request_kwargs(
+ integration, operation_model("PutIntegration").input_shape
+ )
+ )
+
+ else:
+ must_params.update({"AuthorizationType": model.get("AuthorizationType")})
+ apigw.put_method(
+ **util.convert_request_kwargs(must_params, operation_model("PutMethod").input_shape)
+ )
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method.schema.json
new file mode 100644
index 0000000000000..1b64f208e9c6d
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method.schema.json
@@ -0,0 +1,318 @@
+{
+ "typeName": "AWS::ApiGateway::Method",
+ "description": "Resource Type definition for AWS::ApiGateway::Method",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway.git",
+ "definitions": {
+ "Integration": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "CacheKeyParameters": {
+ "description": "A list of request parameters whose values API Gateway caches.",
+ "type": "array",
+ "uniqueItems": true,
+ "items": {
+ "type": "string"
+ }
+ },
+ "CacheNamespace": {
+ "description": "An API-specific tag group of related cached parameters.",
+ "type": "string"
+ },
+ "ConnectionId": {
+ "description": "The ID of the VpcLink used for the integration when connectionType=VPC_LINK, otherwise undefined.",
+ "type": "string"
+ },
+ "ConnectionType": {
+ "description": "The type of the network connection to the integration endpoint.",
+ "type": "string",
+ "enum": [
+ "INTERNET",
+ "VPC_LINK"
+ ]
+ },
+ "ContentHandling": {
+ "description": "Specifies how to handle request payload content type conversions.",
+ "type": "string",
+ "enum": [
+ "CONVERT_TO_BINARY",
+ "CONVERT_TO_TEXT"
+ ]
+ },
+ "Credentials": {
+ "description": "The credentials that are required for the integration.",
+ "type": "string"
+ },
+ "IntegrationHttpMethod": {
+ "description": "The integration's HTTP method type.",
+ "type": "string"
+ },
+ "IntegrationResponses": {
+ "description": "The response that API Gateway provides after a method's backend completes processing a request.",
+ "type": "array",
+ "uniqueItems": true,
+ "items": {
+ "$ref": "#/definitions/IntegrationResponse"
+ }
+ },
+ "PassthroughBehavior": {
+ "description": "Indicates when API Gateway passes requests to the targeted backend.",
+ "type": "string",
+ "enum": [
+ "WHEN_NO_MATCH",
+ "WHEN_NO_TEMPLATES",
+ "NEVER"
+ ]
+ },
+ "RequestParameters": {
+ "description": "The request parameters that API Gateway sends with the backend request.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ },
+ "RequestTemplates": {
+ "description": "A map of Apache Velocity templates that are applied on the request payload.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ },
+ "TimeoutInMillis": {
+ "description": "Custom timeout between 50 and 29,000 milliseconds.",
+ "type": "integer",
+ "minimum": 50,
+ "maximum": 29000
+ },
+ "Type": {
+ "description": "The type of backend that your method is running.",
+ "type": "string",
+ "enum": [
+ "AWS",
+ "AWS_PROXY",
+ "HTTP",
+ "HTTP_PROXY",
+ "MOCK"
+ ]
+ },
+ "Uri": {
+ "description": "The Uniform Resource Identifier (URI) for the integration.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "Type"
+ ]
+ },
+ "MethodResponse": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "ResponseModels": {
+ "description": "The resources used for the response's content type. Specify response models as key-value pairs (string-to-string maps), with a content type as the key and a Model resource name as the value.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ },
+ "ResponseParameters": {
+ "description": "Response parameters that API Gateway sends to the client that called a method. Specify response parameters as key-value pairs (string-to-Boolean maps), with a destination as the key and a Boolean as the value.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "boolean"
+ }
+ }
+ },
+ "StatusCode": {
+ "description": "The method response's status code, which you map to an IntegrationResponse.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "StatusCode"
+ ]
+ },
+ "IntegrationResponse": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "ContentHandling": {
+ "description": "Specifies how to handle request payload content type conversions.",
+ "type": "string",
+ "enum": [
+ "CONVERT_TO_BINARY",
+ "CONVERT_TO_TEXT"
+ ]
+ },
+ "ResponseParameters": {
+ "description": "The response parameters from the backend response that API Gateway sends to the method response.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ },
+ "ResponseTemplates": {
+ "description": "The templates that are used to transform the integration response body. Specify templates as key-value pairs (string-to-string mappings), with a content type as the key and a template as the value.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ },
+ "SelectionPattern": {
+ "description": "A regular expression that specifies which error strings or status codes from the backend map to the integration response.",
+ "type": "string"
+ },
+ "StatusCode": {
+ "description": "The status code that API Gateway uses to map the integration response to a MethodResponse status code.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "StatusCode"
+ ]
+ }
+ },
+ "properties": {
+ "ApiKeyRequired": {
+ "description": "Indicates whether the method requires clients to submit a valid API key.",
+ "type": "boolean"
+ },
+ "AuthorizationScopes": {
+ "description": "A list of authorization scopes configured on the method.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "AuthorizationType": {
+ "description": "The method's authorization type.",
+ "type": "string",
+ "enum": [
+ "NONE",
+ "AWS_IAM",
+ "CUSTOM",
+ "COGNITO_USER_POOLS"
+ ]
+ },
+ "AuthorizerId": {
+ "description": "The identifier of the authorizer to use on this method.",
+ "type": "string"
+ },
+ "HttpMethod": {
+ "description": "The backend system that the method calls when it receives a request.",
+ "type": "string"
+ },
+ "Integration": {
+ "description": "The backend system that the method calls when it receives a request.",
+ "$ref": "#/definitions/Integration"
+ },
+ "MethodResponses": {
+ "description": "The responses that can be sent to the client who calls the method.",
+ "type": "array",
+ "uniqueItems": true,
+ "items": {
+ "$ref": "#/definitions/MethodResponse"
+ }
+ },
+ "OperationName": {
+ "description": "A friendly operation name for the method.",
+ "type": "string"
+ },
+ "RequestModels": {
+ "description": "The resources that are used for the request's content type. Specify request models as key-value pairs (string-to-string mapping), with a content type as the key and a Model resource name as the value.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ },
+ "RequestParameters": {
+ "description": "The request parameters that API Gateway accepts. Specify request parameters as key-value pairs (string-to-Boolean mapping), with a source as the key and a Boolean as the value.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "boolean"
+ }
+ }
+ },
+ "RequestValidatorId": {
+ "description": "The ID of the associated request validator.",
+ "type": "string"
+ },
+ "ResourceId": {
+ "description": "The ID of an API Gateway resource.",
+ "type": "string"
+ },
+ "RestApiId": {
+ "description": "The ID of the RestApi resource in which API Gateway creates the method.",
+ "type": "string"
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "RestApiId",
+ "ResourceId",
+ "HttpMethod"
+ ],
+ "primaryIdentifier": [
+ "/properties/RestApiId",
+ "/properties/ResourceId",
+ "/properties/HttpMethod"
+ ],
+ "createOnlyProperties": [
+ "/properties/RestApiId",
+ "/properties/ResourceId",
+ "/properties/HttpMethod"
+ ],
+ "tagging": {
+ "taggable": false,
+ "tagOnCreate": false,
+ "tagUpdatable": false,
+ "cloudFormationSystemTags": false
+ },
+ "handlers": {
+ "create": {
+ "permissions": [
+ "apigateway:PUT",
+ "apigateway:GET"
+ ]
+ },
+ "read": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ },
+ "update": {
+ "permissions": [
+ "apigateway:GET",
+ "apigateway:DELETE",
+ "apigateway:PUT"
+ ]
+ },
+ "delete": {
+ "permissions": [
+ "apigateway:DELETE"
+ ]
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method_plugin.py
new file mode 100644
index 0000000000000..34e0cec7971a9
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayMethodProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::Method"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_method import (
+ ApiGatewayMethodProvider,
+ )
+
+ self.factory = ApiGatewayMethodProvider
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model.py
new file mode 100644
index 0000000000000..07883e62983ca
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model.py
@@ -0,0 +1,134 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+import json
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+
+
+class ApiGatewayModelProperties(TypedDict):
+ RestApiId: Optional[str]
+ ContentType: Optional[str]
+ Description: Optional[str]
+ Name: Optional[str]
+ Schema: Optional[dict | str]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayModelProvider(ResourceProvider[ApiGatewayModelProperties]):
+ TYPE = "AWS::ApiGateway::Model" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayModelProperties],
+ ) -> ProgressEvent[ApiGatewayModelProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/RestApiId
+ - /properties/Name
+
+ Required properties:
+ - RestApiId
+
+ Create-only properties:
+ - /properties/ContentType
+ - /properties/Name
+ - /properties/RestApiId
+
+
+
+ IAM permissions required:
+ - apigateway:POST
+ - apigateway:GET
+
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ if not model.get("Name"):
+ model["Name"] = util.generate_default_name(
+ stack_name=request.stack_name, logical_resource_id=request.logical_resource_id
+ )
+
+ if not model.get("ContentType"):
+ model["ContentType"] = "application/json"
+
+ schema = json.dumps(model.get("Schema", {}))
+
+ apigw.create_model(
+ restApiId=model["RestApiId"],
+ name=model["Name"],
+ contentType=model["ContentType"],
+ schema=schema,
+ )
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayModelProperties],
+ ) -> ProgressEvent[ApiGatewayModelProperties]:
+ """
+ Fetch resource information
+
+ IAM permissions required:
+ - apigateway:GET
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayModelProperties],
+ ) -> ProgressEvent[ApiGatewayModelProperties]:
+ """
+ Delete a resource
+
+ IAM permissions required:
+ - apigateway:GET
+ - apigateway:DELETE
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+ try:
+ apigw.delete_model(modelName=model["Name"], restApiId=model["RestApiId"])
+ except apigw.exceptions.NotFoundException:
+ # We are using try/except since at the moment
+ # CFN doesn't properly resolve dependency between resources
+ # so this resource could be deleted if parent resource was deleted first
+ pass
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayModelProperties],
+ ) -> ProgressEvent[ApiGatewayModelProperties]:
+ """
+ Update a resource
+
+ IAM permissions required:
+ - apigateway:PATCH
+ - apigateway:GET
+ """
+ raise NotImplementedError
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model.schema.json
new file mode 100644
index 0000000000000..7196fd5cc44b0
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model.schema.json
@@ -0,0 +1,83 @@
+{
+ "typeName": "AWS::ApiGateway::Model",
+ "description": "Resource Type definition for AWS::ApiGateway::Model",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway",
+ "additionalProperties": false,
+ "properties": {
+ "ContentType": {
+ "type": "string",
+ "description": "The content type for the model."
+ },
+ "Description": {
+ "type": "string",
+ "description": "A description that identifies this model."
+ },
+ "Name": {
+ "type": "string",
+ "description": "A name for the model. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the model name."
+ },
+ "RestApiId": {
+ "type": "string",
+ "description": "The ID of a REST API with which to associate this model."
+ },
+ "Schema": {
+ "description": "The schema to use to transform data to one or more output formats. Specify null ({}) if you don't want to specify a schema.",
+ "type": [
+ "object",
+ "string"
+ ]
+ }
+ },
+ "required": [
+ "RestApiId"
+ ],
+ "createOnlyProperties": [
+ "/properties/ContentType",
+ "/properties/Name",
+ "/properties/RestApiId"
+ ],
+ "primaryIdentifier": [
+ "/properties/RestApiId",
+ "/properties/Name"
+ ],
+ "handlers": {
+ "create": {
+ "permissions": [
+ "apigateway:POST",
+ "apigateway:GET"
+ ]
+ },
+ "read": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ },
+ "update": {
+ "permissions": [
+ "apigateway:PATCH",
+ "apigateway:GET"
+ ]
+ },
+ "delete": {
+ "permissions": [
+ "apigateway:GET",
+ "apigateway:DELETE"
+ ]
+ },
+ "list": {
+ "handlerSchema": {
+ "properties": {
+ "RestApiId": {
+ "$ref": "resource-schema.json#/properties/RestApiId"
+ }
+ },
+ "required": [
+ "RestApiId"
+ ]
+ },
+ "permissions": [
+ "apigateway:GET"
+ ]
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model_plugin.py
new file mode 100644
index 0000000000000..d1bd727b602e5
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayModelProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::Model"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_model import (
+ ApiGatewayModelProvider,
+ )
+
+ self.factory = ApiGatewayModelProvider
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator.py
new file mode 100644
index 0000000000000..55d2a3bc4964e
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator.py
@@ -0,0 +1,125 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+
+
+class ApiGatewayRequestValidatorProperties(TypedDict):
+ RestApiId: Optional[str]
+ Name: Optional[str]
+ RequestValidatorId: Optional[str]
+ ValidateRequestBody: Optional[bool]
+ ValidateRequestParameters: Optional[bool]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayRequestValidatorProvider(ResourceProvider[ApiGatewayRequestValidatorProperties]):
+ TYPE = "AWS::ApiGateway::RequestValidator" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayRequestValidatorProperties],
+ ) -> ProgressEvent[ApiGatewayRequestValidatorProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/RestApiId
+ - /properties/RequestValidatorId
+
+ Required properties:
+ - RestApiId
+
+ Create-only properties:
+ - /properties/Name
+ - /properties/RestApiId
+
+ Read-only properties:
+ - /properties/RequestValidatorId
+
+ IAM permissions required:
+ - apigateway:POST
+ - apigateway:GET
+
+ """
+ model = request.desired_state
+ api = request.aws_client_factory.apigateway
+
+ if not model.get("Name"):
+ model["Name"] = util.generate_default_name(
+ request.stack_name, request.logical_resource_id
+ )
+ response = api.create_request_validator(
+ name=model["Name"],
+ restApiId=model["RestApiId"],
+ validateRequestBody=model.get("ValidateRequestBody", False),
+ validateRequestParameters=model.get("ValidateRequestParameters", False),
+ )
+ model["RequestValidatorId"] = response["id"]
+ # FIXME error happens when other resources try to reference this one
+ # "An error occurred (BadRequestException) when calling the PutMethod operation:
+ # Invalid Request Validator identifier specified"
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayRequestValidatorProperties],
+ ) -> ProgressEvent[ApiGatewayRequestValidatorProperties]:
+ """
+ Fetch resource information
+
+ IAM permissions required:
+ - apigateway:GET
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayRequestValidatorProperties],
+ ) -> ProgressEvent[ApiGatewayRequestValidatorProperties]:
+ """
+ Delete a resource
+
+ IAM permissions required:
+ - apigateway:DELETE
+ """
+ model = request.desired_state
+ api = request.aws_client_factory.apigateway
+
+ api.delete_request_validator(
+ restApiId=model["RestApiId"], requestValidatorId=model["RequestValidatorId"]
+ )
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayRequestValidatorProperties],
+ ) -> ProgressEvent[ApiGatewayRequestValidatorProperties]:
+ """
+ Update a resource
+
+ IAM permissions required:
+ - apigateway:PATCH
+ """
+ raise NotImplementedError
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator.schema.json
new file mode 100644
index 0000000000000..39d00e7be7d6d
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator.schema.json
@@ -0,0 +1,80 @@
+{
+ "typeName": "AWS::ApiGateway::RequestValidator",
+ "description": "Resource Type definition for AWS::ApiGateway::RequestValidator",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway",
+ "additionalProperties": false,
+ "properties": {
+ "RequestValidatorId": {
+ "description": "ID of the request validator.",
+ "type": "string"
+ },
+ "Name": {
+ "description": "Name of the request validator.",
+ "type": "string"
+ },
+ "RestApiId": {
+ "description": "The identifier of the targeted API entity.",
+ "type": "string"
+ },
+ "ValidateRequestBody": {
+ "description": "Indicates whether to validate the request body according to the configured schema for the targeted API and method. ",
+ "type": "boolean"
+ },
+ "ValidateRequestParameters": {
+ "description": "Indicates whether to validate request parameters.",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "RestApiId"
+ ],
+ "createOnlyProperties": [
+ "/properties/Name",
+ "/properties/RestApiId"
+ ],
+ "readOnlyProperties": [
+ "/properties/RequestValidatorId"
+ ],
+ "primaryIdentifier": [
+ "/properties/RestApiId",
+ "/properties/RequestValidatorId"
+ ],
+ "handlers": {
+ "create": {
+ "permissions": [
+ "apigateway:POST",
+ "apigateway:GET"
+ ]
+ },
+ "update": {
+ "permissions": [
+ "apigateway:PATCH"
+ ]
+ },
+ "delete": {
+ "permissions": [
+ "apigateway:DELETE"
+ ]
+ },
+ "read": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ },
+ "list": {
+ "handlerSchema": {
+ "properties": {
+ "RestApiId": {
+ "$ref": "resource-schema.json#/properties/RestApiId"
+ }
+ },
+ "required": [
+ "RestApiId"
+ ]
+ },
+ "permissions": [
+ "apigateway:GET"
+ ]
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator_plugin.py
new file mode 100644
index 0000000000000..41175341a69de
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayRequestValidatorProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::RequestValidator"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_requestvalidator import (
+ ApiGatewayRequestValidatorProvider,
+ )
+
+ self.factory = ApiGatewayRequestValidatorProvider
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource.py
new file mode 100644
index 0000000000000..89b868306e68d
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource.py
@@ -0,0 +1,168 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Optional, TypedDict
+
+from botocore.exceptions import ClientError
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.aws.api.cloudcontrol import InvalidRequestException, ResourceNotFoundException
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+
+
+class ApiGatewayResourceProperties(TypedDict):
+ ParentId: Optional[str]
+ PathPart: Optional[str]
+ RestApiId: Optional[str]
+ ResourceId: Optional[str]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayResourceProvider(ResourceProvider[ApiGatewayResourceProperties]):
+ TYPE = "AWS::ApiGateway::Resource" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayResourceProperties],
+ ) -> ProgressEvent[ApiGatewayResourceProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/RestApiId
+ - /properties/ResourceId
+
+ Required properties:
+ - ParentId
+ - PathPart
+ - RestApiId
+
+ Create-only properties:
+ - /properties/PathPart
+ - /properties/ParentId
+ - /properties/RestApiId
+
+ Read-only properties:
+ - /properties/ResourceId
+
+ IAM permissions required:
+ - apigateway:POST
+
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ params = {
+ "restApiId": model.get("RestApiId"),
+ "pathPart": model.get("PathPart"),
+ "parentId": model.get("ParentId"),
+ }
+ if not params.get("parentId"):
+ # get root resource id
+ resources = apigw.get_resources(restApiId=params["restApiId"])["items"]
+ root_resource = ([r for r in resources if r["path"] == "/"] or [None])[0]
+ if not root_resource:
+ raise Exception(
+ "Unable to find root resource for REST API %s" % params["restApiId"]
+ )
+ params["parentId"] = root_resource["id"]
+ response = apigw.create_resource(**params)
+
+ model["ResourceId"] = response["id"]
+ model["ParentId"] = response["parentId"]
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayResourceProperties],
+ ) -> ProgressEvent[ApiGatewayResourceProperties]:
+ """
+ Fetch resource information
+
+ IAM permissions required:
+ - apigateway:GET
+ """
+ raise NotImplementedError
+
+ def list(
+ self,
+ request: ResourceRequest[ApiGatewayResourceProperties],
+ ) -> ProgressEvent[ApiGatewayResourceProperties]:
+ if "RestApiId" not in request.desired_state:
+ # TODO: parity
+ raise InvalidRequestException(
+ f"Missing or invalid ResourceModel property in {self.TYPE} list handler request input: 'RestApiId'"
+ )
+
+ rest_api_id = request.desired_state["RestApiId"]
+ try:
+ resources = request.aws_client_factory.apigateway.get_resources(restApiId=rest_api_id)[
+ "items"
+ ]
+ except ClientError as exc:
+ if exc.response.get("Error", {}).get("Code", {}) == "NotFoundException":
+ raise ResourceNotFoundException(f"Invalid API identifier specified: {rest_api_id}")
+ raise
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_models=[
+ ApiGatewayResourceProperties(
+ RestApiId=rest_api_id,
+ ResourceId=resource["id"],
+ ParentId=resource.get("parentId"),
+ PathPart=resource.get("path"),
+ )
+ for resource in resources
+ ],
+ )
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayResourceProperties],
+ ) -> ProgressEvent[ApiGatewayResourceProperties]:
+ """
+ Delete a resource
+
+ IAM permissions required:
+ - apigateway:DELETE
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ try:
+ apigw.delete_resource(restApiId=model["RestApiId"], resourceId=model["ResourceId"])
+ except apigw.exceptions.NotFoundException:
+ pass
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayResourceProperties],
+ ) -> ProgressEvent[ApiGatewayResourceProperties]:
+ """
+ Update a resource
+
+ IAM permissions required:
+ - apigateway:GET
+ - apigateway:PATCH
+ """
+ raise NotImplementedError
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource.schema.json
new file mode 100644
index 0000000000000..7eaa8175b1827
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource.schema.json
@@ -0,0 +1,80 @@
+{
+ "typeName": "AWS::ApiGateway::Resource",
+ "description": "Resource Type definition for AWS::ApiGateway::Resource",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway",
+ "additionalProperties": false,
+ "properties": {
+ "ResourceId": {
+ "description": "A unique primary identifier for a Resource",
+ "type": "string"
+ },
+ "RestApiId": {
+ "description": "The ID of the RestApi resource in which you want to create this resource..",
+ "type": "string"
+ },
+ "ParentId": {
+ "description": "The parent resource's identifier.",
+ "type": "string"
+ },
+ "PathPart": {
+ "description": "The last path segment for this resource.",
+ "type": "string"
+ }
+ },
+ "taggable": false,
+ "required": [
+ "ParentId",
+ "PathPart",
+ "RestApiId"
+ ],
+ "createOnlyProperties": [
+ "/properties/PathPart",
+ "/properties/ParentId",
+ "/properties/RestApiId"
+ ],
+ "primaryIdentifier": [
+ "/properties/RestApiId",
+ "/properties/ResourceId"
+ ],
+ "readOnlyProperties": [
+ "/properties/ResourceId"
+ ],
+ "handlers": {
+ "read": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ },
+ "create": {
+ "permissions": [
+ "apigateway:POST"
+ ]
+ },
+ "update": {
+ "permissions": [
+ "apigateway:GET",
+ "apigateway:PATCH"
+ ]
+ },
+ "delete": {
+ "permissions": [
+ "apigateway:DELETE"
+ ]
+ },
+ "list": {
+ "handlerSchema": {
+ "properties": {
+ "RestApiId": {
+ "$ref": "resource-schema.json#/properties/RestApiId"
+ }
+ },
+ "required": [
+ "RestApiId"
+ ]
+ },
+ "permissions": [
+ "apigateway:GET"
+ ]
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource_plugin.py
new file mode 100644
index 0000000000000..f7ece7204435d
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayResourceProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::Resource"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_resource import (
+ ApiGatewayResourceProvider,
+ )
+
+ self.factory = ApiGatewayResourceProvider
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi.py
new file mode 100644
index 0000000000000..c90e2b36f328b
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi.py
@@ -0,0 +1,245 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+import json
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+from localstack.utils.objects import keys_to_lower
+from localstack.utils.strings import to_bytes
+
+
+class ApiGatewayRestApiProperties(TypedDict):
+ ApiKeySourceType: Optional[str]
+ BinaryMediaTypes: Optional[list[str]]
+ Body: Optional[dict | str]
+ BodyS3Location: Optional[S3Location]
+ CloneFrom: Optional[str]
+ Description: Optional[str]
+ DisableExecuteApiEndpoint: Optional[bool]
+ EndpointConfiguration: Optional[EndpointConfiguration]
+ FailOnWarnings: Optional[bool]
+ MinimumCompressionSize: Optional[int]
+ Mode: Optional[str]
+ Name: Optional[str]
+ Parameters: Optional[dict | str]
+ Policy: Optional[dict | str]
+ RestApiId: Optional[str]
+ RootResourceId: Optional[str]
+ Tags: Optional[list[Tag]]
+
+
+class S3Location(TypedDict):
+ Bucket: Optional[str]
+ ETag: Optional[str]
+ Key: Optional[str]
+ Version: Optional[str]
+
+
+class EndpointConfiguration(TypedDict):
+ Types: Optional[list[str]]
+ VpcEndpointIds: Optional[list[str]]
+
+
+class Tag(TypedDict):
+ Key: Optional[str]
+ Value: Optional[str]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayRestApiProvider(ResourceProvider[ApiGatewayRestApiProperties]):
+ TYPE = "AWS::ApiGateway::RestApi" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayRestApiProperties],
+ ) -> ProgressEvent[ApiGatewayRestApiProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/RestApiId
+
+
+ Read-only properties:
+ - /properties/RestApiId
+ - /properties/RootResourceId
+
+ IAM permissions required:
+ - apigateway:GET
+ - apigateway:POST
+ - apigateway:UpdateRestApiPolicy
+ - s3:GetObject
+ - iam:PassRole
+
+ """
+ model = request.desired_state
+ api = request.aws_client_factory.apigateway
+
+ # FIXME: this is only when Body or BodyS3Location is set, otherwise the deployment should fail without a name
+ role_name = model.get("Name")
+ if not role_name:
+ model["Name"] = util.generate_default_name(
+ request.stack_name, request.logical_resource_id
+ )
+ params = util.select_attributes(
+ model,
+ [
+ "Name",
+ "Description",
+ "Version",
+ "CloneFrom",
+ "BinaryMediaTypes",
+ "MinimumCompressionSize",
+ "ApiKeySource",
+ "EndpointConfiguration",
+ "Policy",
+ "Tags",
+ "DisableExecuteApiEndpoint",
+ ],
+ )
+ params = keys_to_lower(params, skip_children_of=["policy"])
+ params["tags"] = {tag["key"]: tag["value"] for tag in params.get("tags", [])}
+
+ cfn_client = request.aws_client_factory.cloudformation
+ stack_id = cfn_client.describe_stacks(StackName=request.stack_name)["Stacks"][0]["StackId"]
+ params["tags"].update(
+ {
+ "aws:cloudformation:logical-id": request.logical_resource_id,
+ "aws:cloudformation:stack-name": request.stack_name,
+ "aws:cloudformation:stack-id": stack_id,
+ }
+ )
+ if isinstance(params.get("policy"), dict):
+ params["policy"] = json.dumps(params["policy"])
+
+ result = api.create_rest_api(**params)
+ model["RestApiId"] = result["id"]
+
+ body = model.get("Body")
+ s3_body_location = model.get("BodyS3Location")
+ if body or s3_body_location:
+ # the default behavior for imports via CFn is basepath=ignore (validated against AWS)
+ import_parameters = model.get("Parameters", {})
+ import_parameters.setdefault("basepath", "ignore")
+
+ if body:
+ body = json.dumps(body) if isinstance(body, dict) else body
+ else:
+ get_obj_kwargs = {}
+ if version_id := s3_body_location.get("Version"):
+ get_obj_kwargs["VersionId"] = version_id
+
+ # what is the approach when client call fail? Do we bubble it up?
+ s3_client = request.aws_client_factory.s3
+ get_obj_req = s3_client.get_object(
+ Bucket=s3_body_location.get("Bucket"),
+ Key=s3_body_location.get("Key"),
+ **get_obj_kwargs,
+ )
+ if etag := s3_body_location.get("ETag"):
+ if etag != get_obj_req["ETag"]:
+ # TODO: validate the exception message
+ raise Exception(
+ "The ETag provided for the S3BodyLocation does not match the S3 Object"
+ )
+ body = get_obj_req["Body"].read()
+
+ put_kwargs = {}
+ if import_mode := model.get("Mode"):
+ put_kwargs["mode"] = import_mode
+ if fail_on_warnings_mode := model.get("FailOnWarnings"):
+ put_kwargs["failOnWarnings"] = fail_on_warnings_mode
+
+ api.put_rest_api(
+ restApiId=result["id"],
+ body=to_bytes(body),
+ parameters=import_parameters,
+ **put_kwargs,
+ )
+
+ resources = api.get_resources(restApiId=result["id"])["items"]
+ for res in resources:
+ if res["path"] == "/" and not res.get("parentId"):
+ model["RootResourceId"] = res["id"]
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayRestApiProperties],
+ ) -> ProgressEvent[ApiGatewayRestApiProperties]:
+ """
+ Fetch resource information
+
+ IAM permissions required:
+ - apigateway:GET
+ """
+ raise NotImplementedError
+
+ def list(
+ self,
+ request: ResourceRequest[ApiGatewayRestApiProperties],
+ ) -> ProgressEvent[ApiGatewayRestApiProperties]:
+ # TODO: pagination
+ resources = request.aws_client_factory.apigateway.get_rest_apis()["items"]
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_models=[
+ ApiGatewayRestApiProperties(RestApiId=resource["id"], Name=resource["name"])
+ for resource in resources
+ ],
+ )
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayRestApiProperties],
+ ) -> ProgressEvent[ApiGatewayRestApiProperties]:
+ """
+ Delete a resource
+
+ IAM permissions required:
+ - apigateway:DELETE
+ """
+ model = request.desired_state
+ api = request.aws_client_factory.apigateway
+
+ api.delete_rest_api(restApiId=model["RestApiId"])
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayRestApiProperties],
+ ) -> ProgressEvent[ApiGatewayRestApiProperties]:
+ """
+ Update a resource
+
+ IAM permissions required:
+ - apigateway:GET
+ - apigateway:DELETE
+ - apigateway:PATCH
+ - apigateway:PUT
+ - apigateway:UpdateRestApiPolicy
+ - s3:GetObject
+ - iam:PassRole
+ """
+ raise NotImplementedError
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi.schema.json
new file mode 100644
index 0000000000000..73e6f5dda9447
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi.schema.json
@@ -0,0 +1,197 @@
+{
+ "typeName": "AWS::ApiGateway::RestApi",
+ "description": "Resource Type definition for AWS::ApiGateway::RestApi.",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git",
+ "additionalProperties": false,
+ "definitions": {
+ "EndpointConfiguration": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "Types": {
+ "type": "array",
+ "uniqueItems": true,
+ "items": {
+ "type": "string"
+ }
+ },
+ "VpcEndpointIds": {
+ "type": "array",
+ "uniqueItems": true,
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "Tag": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "Key": {
+ "type": "string"
+ },
+ "Value": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "Key",
+ "Value"
+ ]
+ },
+ "S3Location": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "Bucket": {
+ "type": "string"
+ },
+ "ETag": {
+ "type": "string"
+ },
+ "Version": {
+ "type": "string"
+ },
+ "Key": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "properties": {
+ "RestApiId": {
+ "type": "string"
+ },
+ "RootResourceId": {
+ "type": "string"
+ },
+ "ApiKeySourceType": {
+ "type": "string"
+ },
+ "BinaryMediaTypes": {
+ "type": "array",
+ "uniqueItems": true,
+ "items": {
+ "type": "string"
+ }
+ },
+ "Body": {
+ "type": [
+ "object",
+ "string"
+ ]
+ },
+ "BodyS3Location": {
+ "$ref": "#/definitions/S3Location"
+ },
+ "CloneFrom": {
+ "type": "string"
+ },
+ "EndpointConfiguration": {
+ "$ref": "#/definitions/EndpointConfiguration"
+ },
+ "Description": {
+ "type": "string"
+ },
+ "DisableExecuteApiEndpoint": {
+ "type": "boolean"
+ },
+ "FailOnWarnings": {
+ "type": "boolean"
+ },
+ "Name": {
+ "type": "string"
+ },
+ "MinimumCompressionSize": {
+ "type": "integer"
+ },
+ "Mode": {
+ "type": "string"
+ },
+ "Policy": {
+ "type": [
+ "object",
+ "string"
+ ]
+ },
+ "Parameters": {
+ "type": [
+ "object",
+ "string"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ },
+ "Tags": {
+ "type": "array",
+ "uniqueItems": false,
+ "items": {
+ "$ref": "#/definitions/Tag"
+ }
+ }
+ },
+ "tagging": {
+ "taggable": true,
+ "tagOnCreate": true,
+ "tagUpdatable": true,
+ "cloudFormationSystemTags": true,
+ "tagProperty": "/properties/Tags"
+ },
+ "primaryIdentifier": [
+ "/properties/RestApiId"
+ ],
+ "readOnlyProperties": [
+ "/properties/RestApiId",
+ "/properties/RootResourceId"
+ ],
+ "writeOnlyProperties": [
+ "/properties/Body",
+ "/properties/BodyS3Location",
+ "/properties/CloneFrom",
+ "/properties/FailOnWarnings",
+ "/properties/Mode",
+ "/properties/Parameters"
+ ],
+ "handlers": {
+ "create": {
+ "permissions": [
+ "apigateway:GET",
+ "apigateway:POST",
+ "apigateway:UpdateRestApiPolicy",
+ "s3:GetObject",
+ "iam:PassRole"
+ ]
+ },
+ "read": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ },
+ "update": {
+ "permissions": [
+ "apigateway:GET",
+ "apigateway:DELETE",
+ "apigateway:PATCH",
+ "apigateway:PUT",
+ "apigateway:UpdateRestApiPolicy",
+ "s3:GetObject",
+ "iam:PassRole"
+ ]
+ },
+ "delete": {
+ "permissions": [
+ "apigateway:DELETE"
+ ]
+ },
+ "list": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi_plugin.py
new file mode 100644
index 0000000000000..e53c4a4d8205f
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayRestApiProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::RestApi"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_restapi import (
+ ApiGatewayRestApiProvider,
+ )
+
+ self.factory = ApiGatewayRestApiProvider
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage.py
new file mode 100644
index 0000000000000..b2b98bc715455
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage.py
@@ -0,0 +1,183 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+import copy
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+from localstack.utils.objects import keys_to_lower
+
+
+class ApiGatewayStageProperties(TypedDict):
+ RestApiId: Optional[str]
+ AccessLogSetting: Optional[AccessLogSetting]
+ CacheClusterEnabled: Optional[bool]
+ CacheClusterSize: Optional[str]
+ CanarySetting: Optional[CanarySetting]
+ ClientCertificateId: Optional[str]
+ DeploymentId: Optional[str]
+ Description: Optional[str]
+ DocumentationVersion: Optional[str]
+ MethodSettings: Optional[list[MethodSetting]]
+ StageName: Optional[str]
+ Tags: Optional[list[Tag]]
+ TracingEnabled: Optional[bool]
+ Variables: Optional[dict]
+
+
+class AccessLogSetting(TypedDict):
+ DestinationArn: Optional[str]
+ Format: Optional[str]
+
+
+class CanarySetting(TypedDict):
+ DeploymentId: Optional[str]
+ PercentTraffic: Optional[float]
+ StageVariableOverrides: Optional[dict]
+ UseStageCache: Optional[bool]
+
+
+class MethodSetting(TypedDict):
+ CacheDataEncrypted: Optional[bool]
+ CacheTtlInSeconds: Optional[int]
+ CachingEnabled: Optional[bool]
+ DataTraceEnabled: Optional[bool]
+ HttpMethod: Optional[str]
+ LoggingLevel: Optional[str]
+ MetricsEnabled: Optional[bool]
+ ResourcePath: Optional[str]
+ ThrottlingBurstLimit: Optional[int]
+ ThrottlingRateLimit: Optional[float]
+
+
+class Tag(TypedDict):
+ Key: Optional[str]
+ Value: Optional[str]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayStageProvider(ResourceProvider[ApiGatewayStageProperties]):
+ TYPE = "AWS::ApiGateway::Stage" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayStageProperties],
+ ) -> ProgressEvent[ApiGatewayStageProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/RestApiId
+ - /properties/StageName
+
+ Required properties:
+ - RestApiId
+
+ Create-only properties:
+ - /properties/RestApiId
+ - /properties/StageName
+
+
+
+ IAM permissions required:
+ - apigateway:POST
+ - apigateway:PATCH
+ - apigateway:GET
+
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ stage_name = model.get("StageName", "default")
+ stage_variables = model.get("Variables")
+ # we need to deep copy as several fields are nested dict and arrays
+ params = keys_to_lower(copy.deepcopy(model))
+ # TODO: add methodSettings
+ # TODO: add custom CfN tags
+ param_names = [
+ "restApiId",
+ "deploymentId",
+ "description",
+ "cacheClusterEnabled",
+ "cacheClusterSize",
+ "documentationVersion",
+ "canarySettings",
+ "tracingEnabled",
+ "tags",
+ ]
+ params = util.select_attributes(params, param_names)
+ params["tags"] = {t["key"]: t["value"] for t in params.get("tags", [])}
+ params["stageName"] = stage_name
+ if stage_variables:
+ params["variables"] = stage_variables
+
+ result = apigw.create_stage(**params)
+ model["StageName"] = result["stageName"]
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayStageProperties],
+ ) -> ProgressEvent[ApiGatewayStageProperties]:
+ """
+ Fetch resource information
+
+ IAM permissions required:
+ - apigateway:GET
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayStageProperties],
+ ) -> ProgressEvent[ApiGatewayStageProperties]:
+ """
+ Delete a resource
+
+ IAM permissions required:
+ - apigateway:DELETE
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+ try:
+ # we are checking if stage api has already been deleted before calling delete
+ apigw.get_stage(restApiId=model["RestApiId"], stageName=model["StageName"])
+ apigw.delete_stage(restApiId=model["RestApiId"], stageName=model["StageName"])
+ except apigw.exceptions.NotFoundException:
+ pass
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayStageProperties],
+ ) -> ProgressEvent[ApiGatewayStageProperties]:
+ """
+ Update a resource
+
+ IAM permissions required:
+ - apigateway:GET
+ - apigateway:PATCH
+ - apigateway:PUT
+ - apigateway:DELETE
+ """
+ raise NotImplementedError
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage.schema.json
new file mode 100644
index 0000000000000..fe67c2c0c626f
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage.schema.json
@@ -0,0 +1,260 @@
+{
+ "typeName": "AWS::ApiGateway::Stage",
+ "description": "Resource Type definition for AWS::ApiGateway::Stage",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway",
+ "additionalProperties": false,
+ "properties": {
+ "AccessLogSetting": {
+ "description": "Specifies settings for logging access in this stage.",
+ "$ref": "#/definitions/AccessLogSetting"
+ },
+ "CacheClusterEnabled": {
+ "description": "Indicates whether cache clustering is enabled for the stage.",
+ "type": "boolean"
+ },
+ "CacheClusterSize": {
+ "description": "The stage's cache cluster size.",
+ "type": "string"
+ },
+ "CanarySetting": {
+ "description": "Specifies settings for the canary deployment in this stage.",
+ "$ref": "#/definitions/CanarySetting"
+ },
+ "ClientCertificateId": {
+ "description": "The ID of the client certificate that API Gateway uses to call your integration endpoints in the stage. ",
+ "type": "string"
+ },
+ "DeploymentId": {
+ "description": "The ID of the deployment that the stage is associated with. This parameter is required to create a stage. ",
+ "type": "string"
+ },
+ "Description": {
+ "description": "A description of the stage.",
+ "type": "string"
+ },
+ "DocumentationVersion": {
+ "description": "The version ID of the API documentation snapshot.",
+ "type": "string"
+ },
+ "MethodSettings": {
+ "description": "Settings for all methods in the stage.",
+ "type": "array",
+ "uniqueItems": true,
+ "insertionOrder": false,
+ "items": {
+ "$ref": "#/definitions/MethodSetting"
+ }
+ },
+ "RestApiId": {
+ "description": "The ID of the RestApi resource that you're deploying with this stage.",
+ "type": "string"
+ },
+ "StageName": {
+ "description": "The name of the stage, which API Gateway uses as the first path segment in the invoked Uniform Resource Identifier (URI).",
+ "type": "string"
+ },
+ "Tags": {
+ "description": "An array of arbitrary tags (key-value pairs) to associate with the stage.",
+ "type": "array",
+ "uniqueItems": false,
+ "insertionOrder": false,
+ "items": {
+ "$ref": "#/definitions/Tag"
+ }
+ },
+ "TracingEnabled": {
+ "description": "Specifies whether active X-Ray tracing is enabled for this stage.",
+ "type": "boolean"
+ },
+ "Variables": {
+ "description": "A map (string-to-string map) that defines the stage variables, where the variable name is the key and the variable value is the value.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "definitions": {
+ "CanarySetting": {
+ "description": "Specifies settings for the canary deployment in this stage.",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "DeploymentId": {
+ "description": "The identifier of the deployment that the stage points to.",
+ "type": "string"
+ },
+ "PercentTraffic": {
+ "description": "The percentage (0-100) of traffic diverted to a canary deployment.",
+ "type": "number",
+ "minimum": 0,
+ "maximum": 100
+ },
+ "StageVariableOverrides": {
+ "description": "Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "[a-zA-Z0-9]+": {
+ "type": "string"
+ }
+ }
+ },
+ "UseStageCache": {
+ "description": "Whether the canary deployment uses the stage cache or not.",
+ "type": "boolean"
+ }
+ }
+ },
+ "AccessLogSetting": {
+ "description": "Specifies settings for logging access in this stage.",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "DestinationArn": {
+ "description": "The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. This parameter is required to enable access logging.",
+ "type": "string"
+ },
+ "Format": {
+ "description": "A single line format of the access logs of data, as specified by selected $context variables (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference). The format must include at least $context.requestId. This parameter is required to enable access logging.",
+ "type": "string"
+ }
+ }
+ },
+ "MethodSetting": {
+ "description": "Configures settings for all methods in a stage.",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "CacheDataEncrypted": {
+ "description": "Indicates whether the cached responses are encrypted.",
+ "type": "boolean"
+ },
+ "CacheTtlInSeconds": {
+ "description": "The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.",
+ "type": "integer"
+ },
+ "CachingEnabled": {
+ "description": "Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.",
+ "type": "boolean"
+ },
+ "DataTraceEnabled": {
+ "description": "Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.",
+ "type": "boolean"
+ },
+ "HttpMethod": {
+ "description": "The HTTP method. You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.",
+ "type": "string"
+ },
+ "LoggingLevel": {
+ "description": "The logging level for this method. For valid values, see the loggingLevel property of the Stage (https://docs.aws.amazon.com/apigateway/api-reference/resource/stage/#loggingLevel) resource in the Amazon API Gateway API Reference.",
+ "type": "string"
+ },
+ "MetricsEnabled": {
+ "description": "Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.",
+ "type": "boolean"
+ },
+ "ResourcePath": {
+ "description": "The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. For example, the path value /resource/subresource must be encoded as /~1resource~1subresource. To specify the root path, use only a slash (/). You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.",
+ "type": "string"
+ },
+ "ThrottlingBurstLimit": {
+ "description": "The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.",
+ "type": "integer",
+ "minimum": 0
+ },
+ "ThrottlingRateLimit": {
+ "description": "The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.",
+ "type": "number",
+ "minimum": 0
+ }
+ }
+ },
+ "Tag": {
+ "description": "Identify and categorize resources.",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "Key": {
+ "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:.",
+ "type": "string",
+ "minLength": 1,
+ "maxLength": 128
+ },
+ "Value": {
+ "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:.",
+ "type": "string",
+ "minLength": 0,
+ "maxLength": 256
+ }
+ },
+ "required": [
+ "Key",
+ "Value"
+ ]
+ }
+ },
+ "required": [
+ "RestApiId"
+ ],
+ "createOnlyProperties": [
+ "/properties/RestApiId",
+ "/properties/StageName"
+ ],
+ "primaryIdentifier": [
+ "/properties/RestApiId",
+ "/properties/StageName"
+ ],
+ "tagging": {
+ "taggable": true,
+ "tagOnCreate": true,
+ "tagUpdatable": true,
+ "cloudFormationSystemTags": true,
+ "tagProperty": "/properties/Tags"
+ },
+ "handlers": {
+ "create": {
+ "permissions": [
+ "apigateway:POST",
+ "apigateway:PATCH",
+ "apigateway:GET"
+ ]
+ },
+ "read": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ },
+ "update": {
+ "permissions": [
+ "apigateway:GET",
+ "apigateway:PATCH",
+ "apigateway:PUT",
+ "apigateway:DELETE"
+ ]
+ },
+ "delete": {
+ "permissions": [
+ "apigateway:DELETE"
+ ]
+ },
+ "list": {
+ "handlerSchema": {
+ "properties": {
+ "RestApiId": {
+ "$ref": "resource-schema.json#/properties/RestApiId"
+ }
+ },
+ "required": [
+ "RestApiId"
+ ]
+ },
+ "permissions": [
+ "apigateway:GET"
+ ]
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage_plugin.py
new file mode 100644
index 0000000000000..e0898bae2c695
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayStageProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::Stage"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_stage import (
+ ApiGatewayStageProvider,
+ )
+
+ self.factory = ApiGatewayStageProvider
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan.py
new file mode 100644
index 0000000000000..1e10c9badfc3f
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan.py
@@ -0,0 +1,215 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+import json
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+from localstack.utils.aws.arns import get_partition
+from localstack.utils.objects import keys_to_lower
+from localstack.utils.strings import first_char_to_lower
+
+
+class ApiGatewayUsagePlanProperties(TypedDict):
+ ApiStages: Optional[list[ApiStage]]
+ Description: Optional[str]
+ Id: Optional[str]
+ Quota: Optional[QuotaSettings]
+ Tags: Optional[list[Tag]]
+ Throttle: Optional[ThrottleSettings]
+ UsagePlanName: Optional[str]
+
+
+class ApiStage(TypedDict):
+ ApiId: Optional[str]
+ Stage: Optional[str]
+ Throttle: Optional[dict]
+
+
+class QuotaSettings(TypedDict):
+ Limit: Optional[int]
+ Offset: Optional[int]
+ Period: Optional[str]
+
+
+class Tag(TypedDict):
+ Key: Optional[str]
+ Value: Optional[str]
+
+
+class ThrottleSettings(TypedDict):
+ BurstLimit: Optional[int]
+ RateLimit: Optional[float]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayUsagePlanProvider(ResourceProvider[ApiGatewayUsagePlanProperties]):
+ TYPE = "AWS::ApiGateway::UsagePlan" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayUsagePlanProperties],
+ ) -> ProgressEvent[ApiGatewayUsagePlanProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/Id
+
+ Read-only properties:
+ - /properties/Id
+
+ IAM permissions required:
+ - apigateway:POST
+ - apigateway:GET
+
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ plan_name = model.get("UsagePlanName")
+ if not plan_name:
+ model["UsagePlanName"] = util.generate_default_name(
+ request.stack_name, request.logical_resource_id
+ )
+
+ params = util.select_attributes(model, ["Description", "ApiStages", "Quota", "Throttle"])
+ params = keys_to_lower(params.copy())
+ params["name"] = model["UsagePlanName"]
+
+ if model.get("Tags"):
+ params["tags"] = {tag["Key"]: tag["Value"] for tag in model["Tags"]}
+
+ # set int and float types
+ if params.get("quota"):
+ params["quota"]["limit"] = int(params["quota"]["limit"])
+
+ if params.get("throttle"):
+ params["throttle"]["burstLimit"] = int(params["throttle"]["burstLimit"])
+ params["throttle"]["rateLimit"] = float(params["throttle"]["rateLimit"])
+
+ response = apigw.create_usage_plan(**params)
+
+ model["Id"] = response["id"]
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayUsagePlanProperties],
+ ) -> ProgressEvent[ApiGatewayUsagePlanProperties]:
+ """
+ Fetch resource information
+
+ IAM permissions required:
+ - apigateway:GET
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayUsagePlanProperties],
+ ) -> ProgressEvent[ApiGatewayUsagePlanProperties]:
+ """
+ Delete a resource
+
+ IAM permissions required:
+ - apigateway:DELETE
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ apigw.delete_usage_plan(usagePlanId=model["Id"])
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayUsagePlanProperties],
+ ) -> ProgressEvent[ApiGatewayUsagePlanProperties]:
+ """
+ Update a resource
+
+ IAM permissions required:
+ - apigateway:GET
+ - apigateway:DELETE
+ - apigateway:PATCH
+ - apigateway:PUT
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ parameters_to_select = [
+ "UsagePlanName",
+ "Description",
+ "ApiStages",
+ "Quota",
+ "Throttle",
+ "Tags",
+ ]
+ update_config_props = util.select_attributes(model, parameters_to_select)
+
+ updated_tags = update_config_props.pop("Tags", [])
+
+ usage_plan_id = request.previous_state["Id"]
+
+ patch_operations = []
+
+ for parameter in update_config_props:
+ value = update_config_props[parameter]
+ if parameter == "ApiStages":
+ for stage in value:
+ patch_operations.append(
+ {
+ "op": "replace",
+ "path": f"/{first_char_to_lower(parameter)}",
+ "value": f"{stage['ApiId']}:{stage['Stage']}",
+ }
+ )
+
+ if "Throttle" in stage:
+ patch_operations.append(
+ {
+ "op": "replace",
+ "path": f"/{first_char_to_lower(parameter)}/{stage['ApiId']}:{stage['Stage']}",
+ "value": json.dumps(stage["Throttle"]),
+ }
+ )
+
+ elif isinstance(value, dict):
+ for item in value:
+ last_value = value[item]
+ path = f"/{first_char_to_lower(parameter)}/{first_char_to_lower(item)}"
+ patch_operations.append({"op": "replace", "path": path, "value": last_value})
+ else:
+ patch_operations.append(
+ {"op": "replace", "path": f"/{first_char_to_lower(parameter)}", "value": value}
+ )
+ apigw.update_usage_plan(usagePlanId=usage_plan_id, patchOperations=patch_operations)
+
+ if updated_tags:
+ tags = {tag["Key"]: tag["Value"] for tag in updated_tags}
+ usage_plan_arn = f"arn:{get_partition(request.region_name)}:apigateway:{request.region_name}::/usageplans/{usage_plan_id}"
+ apigw.tag_resource(resourceArn=usage_plan_arn, tags=tags)
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model={**request.previous_state, **request.desired_state},
+ custom_context=request.custom_context,
+ )
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan.schema.json
new file mode 100644
index 0000000000000..96f6f07bb01ca
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan.schema.json
@@ -0,0 +1,173 @@
+{
+ "typeName": "AWS::ApiGateway::UsagePlan",
+ "description": "Resource Type definition for AWS::ApiGateway::UsagePlan",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway.git",
+ "additionalProperties": false,
+ "properties": {
+ "Id": {
+ "type": "string",
+ "description": "The provider-assigned unique ID for this managed resource."
+ },
+ "ApiStages": {
+ "type": "array",
+ "description": "The API stages to associate with this usage plan.",
+ "uniqueItems": true,
+ "items": {
+ "$ref": "#/definitions/ApiStage"
+ }
+ },
+ "Description": {
+ "type": "string",
+ "description": "A description of the usage plan."
+ },
+ "Quota": {
+ "$ref": "#/definitions/QuotaSettings",
+ "description": "Configures the number of requests that users can make within a given interval."
+ },
+ "Tags": {
+ "type": "array",
+ "description": "An array of arbitrary tags (key-value pairs) to associate with the usage plan.",
+ "insertionOrder": false,
+ "uniqueItems": false,
+ "items": {
+ "$ref": "#/definitions/Tag"
+ }
+ },
+ "Throttle": {
+ "$ref": "#/definitions/ThrottleSettings",
+ "description": "Configures the overall request rate (average requests per second) and burst capacity."
+ },
+ "UsagePlanName": {
+ "type": "string",
+ "description": "A name for the usage plan."
+ }
+ },
+ "definitions": {
+ "ApiStage": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "ApiId": {
+ "type": "string",
+ "description": "The ID of an API that is in the specified Stage property that you want to associate with the usage plan."
+ },
+ "Stage": {
+ "type": "string",
+ "description": "The name of the stage to associate with the usage plan."
+ },
+ "Throttle": {
+ "type": "object",
+ "description": "Map containing method-level throttling information for an API stage in a usage plan. The key for the map is the path and method for which to configure custom throttling, for example, '/pets/GET'. Duplicates are not allowed.",
+ "additionalProperties": false,
+ "patternProperties": {
+ ".*": {
+ "$ref": "#/definitions/ThrottleSettings"
+ }
+ }
+ }
+ }
+ },
+ "ThrottleSettings": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "BurstLimit": {
+ "type": "integer",
+ "minimum": 0,
+ "description": "The maximum API request rate limit over a time ranging from one to a few seconds. The maximum API request rate limit depends on whether the underlying token bucket is at its full capacity."
+ },
+ "RateLimit": {
+ "type": "number",
+ "minimum": 0,
+ "description": "The API request steady-state rate limit (average requests per second over an extended period of time)."
+ }
+ }
+ },
+ "Tag": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "Key": {
+ "type": "string",
+ "minLength": 1,
+ "maxLength": 128,
+ "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -."
+ },
+ "Value": {
+ "type": "string",
+ "minLength": 0,
+ "maxLength": 256,
+ "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -."
+ }
+ },
+ "required": [
+ "Value",
+ "Key"
+ ]
+ },
+ "QuotaSettings": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "Limit": {
+ "type": "integer",
+ "minimum": 0,
+ "description": "The maximum number of requests that users can make within the specified time period."
+ },
+ "Offset": {
+ "type": "integer",
+ "minimum": 0,
+ "description": "For the initial time period, the number of requests to subtract from the specified limit. When you first implement a usage plan, the plan might start in the middle of the week or month. With this property, you can decrease the limit for this initial time period."
+ },
+ "Period": {
+ "type": "string",
+ "description": "The time period for which the maximum limit of requests applies, such as DAY or WEEK. For valid values, see the period property for the UsagePlan resource in the Amazon API Gateway REST API Reference."
+ }
+ }
+ }
+ },
+ "tagging": {
+ "taggable": true,
+ "tagOnCreate": true,
+ "tagUpdatable": true,
+ "cloudFormationSystemTags": true,
+ "tagProperty": "/properties/Tags"
+ },
+ "primaryIdentifier": [
+ "/properties/Id"
+ ],
+ "readOnlyProperties": [
+ "/properties/Id"
+ ],
+ "handlers": {
+ "create": {
+ "permissions": [
+ "apigateway:POST",
+ "apigateway:GET"
+ ]
+ },
+ "read": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ },
+ "update": {
+ "permissions": [
+ "apigateway:GET",
+ "apigateway:DELETE",
+ "apigateway:PATCH",
+ "apigateway:PUT"
+ ]
+ },
+ "delete": {
+ "permissions": [
+ "apigateway:DELETE"
+ ]
+ },
+ "list": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan_plugin.py
new file mode 100644
index 0000000000000..154207ac69b58
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayUsagePlanProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::UsagePlan"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_usageplan import (
+ ApiGatewayUsagePlanProvider,
+ )
+
+ self.factory = ApiGatewayUsagePlanProvider
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey.py
new file mode 100644
index 0000000000000..33a6e155d5c4f
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey.py
@@ -0,0 +1,114 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+from localstack.utils.objects import keys_to_lower
+
+
+class ApiGatewayUsagePlanKeyProperties(TypedDict):
+ KeyId: Optional[str]
+ KeyType: Optional[str]
+ UsagePlanId: Optional[str]
+ Id: Optional[str]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class ApiGatewayUsagePlanKeyProvider(ResourceProvider[ApiGatewayUsagePlanKeyProperties]):
+ TYPE = "AWS::ApiGateway::UsagePlanKey" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[ApiGatewayUsagePlanKeyProperties],
+ ) -> ProgressEvent[ApiGatewayUsagePlanKeyProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/Id
+
+ Required properties:
+ - KeyType
+ - UsagePlanId
+ - KeyId
+
+ Create-only properties:
+ - /properties/KeyId
+ - /properties/UsagePlanId
+ - /properties/KeyType
+
+ Read-only properties:
+ - /properties/Id
+
+ IAM permissions required:
+ - apigateway:POST
+ - apigateway:GET
+
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ params = keys_to_lower(model.copy())
+ result = apigw.create_usage_plan_key(**params)
+
+ model["Id"] = result["id"]
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[ApiGatewayUsagePlanKeyProperties],
+ ) -> ProgressEvent[ApiGatewayUsagePlanKeyProperties]:
+ """
+ Fetch resource information
+
+ IAM permissions required:
+ - apigateway:GET
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[ApiGatewayUsagePlanKeyProperties],
+ ) -> ProgressEvent[ApiGatewayUsagePlanKeyProperties]:
+ """
+ Delete a resource
+
+ IAM permissions required:
+ - apigateway:DELETE
+ """
+ model = request.desired_state
+ apigw = request.aws_client_factory.apigateway
+
+ apigw.delete_usage_plan_key(usagePlanId=model["UsagePlanId"], keyId=model["KeyId"])
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[ApiGatewayUsagePlanKeyProperties],
+ ) -> ProgressEvent[ApiGatewayUsagePlanKeyProperties]:
+ """
+ Update a resource
+
+
+ """
+ raise NotImplementedError
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey.schema.json
new file mode 100644
index 0000000000000..997f3be9a0d49
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey.schema.json
@@ -0,0 +1,77 @@
+{
+ "typeName": "AWS::ApiGateway::UsagePlanKey",
+ "description": "Resource Type definition for AWS::ApiGateway::UsagePlanKey",
+ "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway",
+ "additionalProperties": false,
+ "properties": {
+ "KeyId": {
+ "description": "The ID of the usage plan key.",
+ "type": "string"
+ },
+ "KeyType": {
+ "description": "The type of usage plan key. Currently, the only valid key type is API_KEY.",
+ "type": "string",
+ "enum": [
+ "API_KEY"
+ ]
+ },
+ "UsagePlanId": {
+ "description": "The ID of the usage plan.",
+ "type": "string"
+ },
+ "Id": {
+ "description": "An autogenerated ID which is a combination of the ID of the key and ID of the usage plan combined with a : such as 123abcdef:abc123.",
+ "type": "string"
+ }
+ },
+ "taggable": false,
+ "required": [
+ "KeyType",
+ "UsagePlanId",
+ "KeyId"
+ ],
+ "createOnlyProperties": [
+ "/properties/KeyId",
+ "/properties/UsagePlanId",
+ "/properties/KeyType"
+ ],
+ "primaryIdentifier": [
+ "/properties/Id"
+ ],
+ "readOnlyProperties": [
+ "/properties/Id"
+ ],
+ "handlers": {
+ "create": {
+ "permissions": [
+ "apigateway:POST",
+ "apigateway:GET"
+ ]
+ },
+ "read": {
+ "permissions": [
+ "apigateway:GET"
+ ]
+ },
+ "delete": {
+ "permissions": [
+ "apigateway:DELETE"
+ ]
+ },
+ "list": {
+ "handlerSchema": {
+ "properties": {
+ "UsagePlanId": {
+ "$ref": "resource-schema.json#/properties/UsagePlanId"
+ }
+ },
+ "required": [
+ "UsagePlanId"
+ ]
+ },
+ "permissions": [
+ "apigateway:GET"
+ ]
+ }
+ }
+}
diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey_plugin.py
new file mode 100644
index 0000000000000..eb21b610bfc22
--- /dev/null
+++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class ApiGatewayUsagePlanKeyProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::ApiGateway::UsagePlanKey"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.apigateway.resource_providers.aws_apigateway_usageplankey import (
+ ApiGatewayUsagePlanKeyProvider,
+ )
+
+ self.factory = ApiGatewayUsagePlanKeyProvider
diff --git a/tests/integration/lambdas/__init__.py b/localstack-core/localstack/services/cdk/__init__.py
similarity index 100%
rename from tests/integration/lambdas/__init__.py
rename to localstack-core/localstack/services/cdk/__init__.py
diff --git a/localstack-core/localstack/services/cdk/resource_providers/__init__.py b/localstack-core/localstack/services/cdk/resource_providers/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata.py b/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata.py
new file mode 100644
index 0000000000000..7e5eb5ca2f988
--- /dev/null
+++ b/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata.py
@@ -0,0 +1,90 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+
+
+class CDKMetadataProperties(TypedDict):
+ Id: Optional[str]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class CDKMetadataProvider(ResourceProvider[CDKMetadataProperties]):
+ TYPE = "AWS::CDK::Metadata" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[CDKMetadataProperties],
+ ) -> ProgressEvent[CDKMetadataProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/Id
+
+
+
+ """
+ model = request.desired_state
+ model["Id"] = util.generate_default_name(
+ stack_name=request.stack_name, logical_resource_id=request.logical_resource_id
+ )
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[CDKMetadataProperties],
+ ) -> ProgressEvent[CDKMetadataProperties]:
+ """
+ Fetch resource information
+
+
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[CDKMetadataProperties],
+ ) -> ProgressEvent[CDKMetadataProperties]:
+ """
+ Delete a resource
+
+
+ """
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=request.previous_state,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[CDKMetadataProperties],
+ ) -> ProgressEvent[CDKMetadataProperties]:
+ """
+ Update a resource
+
+
+ """
+ model = request.desired_state
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ )
diff --git a/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata.schema.json b/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata.schema.json
new file mode 100644
index 0000000000000..636fc68e2e9c0
--- /dev/null
+++ b/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata.schema.json
@@ -0,0 +1,22 @@
+{
+ "typeName": "AWS::CDK::Metadata" ,
+ "description": "Resource Type definition for AWS::CDK::Metadata",
+ "additionalProperties": false,
+ "properties": {
+ "Id": {
+ "type": "string"
+ }
+ },
+ "definitions": {
+ },
+ "required": [
+ ],
+ "createOnlyProperties": [
+ ],
+ "primaryIdentifier": [
+ "/properties/Id"
+ ],
+ "readOnlyProperties": [
+ "/properties/Id"
+ ]
+}
diff --git a/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata_plugin.py b/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata_plugin.py
new file mode 100644
index 0000000000000..924ca3cb79eae
--- /dev/null
+++ b/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata_plugin.py
@@ -0,0 +1,18 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class LambdaAliasProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::CDK::Metadata"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.cdk.resource_providers.cdk_metadata import CDKMetadataProvider
+
+ self.factory = CDKMetadataProvider
diff --git a/localstack-core/localstack/services/certificatemanager/__init__.py b/localstack-core/localstack/services/certificatemanager/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/localstack-core/localstack/services/certificatemanager/resource_providers/__init__.py b/localstack-core/localstack/services/certificatemanager/resource_providers/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate.py b/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate.py
new file mode 100644
index 0000000000000..d79d62975e87f
--- /dev/null
+++ b/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate.py
@@ -0,0 +1,151 @@
+# LocalStack Resource Provider Scaffolding v2
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Optional, TypedDict
+
+import localstack.services.cloudformation.provider_utils as util
+from localstack.services.cloudformation.resource_provider import (
+ OperationStatus,
+ ProgressEvent,
+ ResourceProvider,
+ ResourceRequest,
+)
+
+
+class CertificateManagerCertificateProperties(TypedDict):
+ DomainName: Optional[str]
+ CertificateAuthorityArn: Optional[str]
+ CertificateTransparencyLoggingPreference: Optional[str]
+ DomainValidationOptions: Optional[list[DomainValidationOption]]
+ Id: Optional[str]
+ SubjectAlternativeNames: Optional[list[str]]
+ Tags: Optional[list[Tag]]
+ ValidationMethod: Optional[str]
+
+
+class DomainValidationOption(TypedDict):
+ DomainName: Optional[str]
+ HostedZoneId: Optional[str]
+ ValidationDomain: Optional[str]
+
+
+class Tag(TypedDict):
+ Key: Optional[str]
+ Value: Optional[str]
+
+
+REPEATED_INVOCATION = "repeated_invocation"
+
+
+class CertificateManagerCertificateProvider(
+ ResourceProvider[CertificateManagerCertificateProperties]
+):
+ TYPE = "AWS::CertificateManager::Certificate" # Autogenerated. Don't change
+ SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
+
+ def create(
+ self,
+ request: ResourceRequest[CertificateManagerCertificateProperties],
+ ) -> ProgressEvent[CertificateManagerCertificateProperties]:
+ """
+ Create a new resource.
+
+ Primary identifier fields:
+ - /properties/Id
+
+ Required properties:
+ - DomainName
+
+ Create-only properties:
+ - /properties/SubjectAlternativeNames
+ - /properties/DomainValidationOptions
+ - /properties/ValidationMethod
+ - /properties/DomainName
+ - /properties/CertificateAuthorityArn
+
+ Read-only properties:
+ - /properties/Id
+
+
+
+ """
+ model = request.desired_state
+ acm = request.aws_client_factory.acm
+
+ params = util.select_attributes(
+ model,
+ [
+ "CertificateAuthorityArn",
+ "DomainName",
+ "DomainValidationOptions",
+ "SubjectAlternativeNames",
+ "Tags",
+ "ValidationMethod",
+ ],
+ )
+ # adjust domain validation options
+ valid_opts = params.get("DomainValidationOptions")
+ if valid_opts:
+
+ def _convert(opt):
+ res = util.select_attributes(opt, ["DomainName", "ValidationDomain"])
+ res.setdefault("ValidationDomain", res["DomainName"])
+ return res
+
+ params["DomainValidationOptions"] = [_convert(opt) for opt in valid_opts]
+
+ # adjust logging preferences
+ logging_pref = params.get("CertificateTransparencyLoggingPreference")
+ if logging_pref:
+ params["Options"] = {"CertificateTransparencyLoggingPreference": logging_pref}
+
+ response = acm.request_certificate(**params)
+ model["Id"] = response["CertificateArn"]
+
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def read(
+ self,
+ request: ResourceRequest[CertificateManagerCertificateProperties],
+ ) -> ProgressEvent[CertificateManagerCertificateProperties]:
+ """
+ Fetch resource information
+
+
+ """
+ raise NotImplementedError
+
+ def delete(
+ self,
+ request: ResourceRequest[CertificateManagerCertificateProperties],
+ ) -> ProgressEvent[CertificateManagerCertificateProperties]:
+ """
+ Delete a resource
+
+
+ """
+ model = request.desired_state
+ acm = request.aws_client_factory.acm
+
+ acm.delete_certificate(CertificateArn=model["Id"])
+ return ProgressEvent(
+ status=OperationStatus.SUCCESS,
+ resource_model=model,
+ custom_context=request.custom_context,
+ )
+
+ def update(
+ self,
+ request: ResourceRequest[CertificateManagerCertificateProperties],
+ ) -> ProgressEvent[CertificateManagerCertificateProperties]:
+ """
+ Update a resource
+
+
+ """
+ raise NotImplementedError
diff --git a/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate.schema.json b/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate.schema.json
new file mode 100644
index 0000000000000..a4d90a42f0839
--- /dev/null
+++ b/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate.schema.json
@@ -0,0 +1,95 @@
+{
+ "typeName": "AWS::CertificateManager::Certificate",
+ "description": "Resource Type definition for AWS::CertificateManager::Certificate",
+ "additionalProperties": false,
+ "properties": {
+ "CertificateAuthorityArn": {
+ "type": "string"
+ },
+ "DomainValidationOptions": {
+ "type": "array",
+ "uniqueItems": true,
+ "items": {
+ "$ref": "#/definitions/DomainValidationOption"
+ }
+ },
+ "CertificateTransparencyLoggingPreference": {
+ "type": "string"
+ },
+ "DomainName": {
+ "type": "string"
+ },
+ "ValidationMethod": {
+ "type": "string"
+ },
+ "SubjectAlternativeNames": {
+ "type": "array",
+ "uniqueItems": true,
+ "items": {
+ "type": "string"
+ }
+ },
+ "Id": {
+ "type": "string"
+ },
+ "Tags": {
+ "type": "array",
+ "uniqueItems": false,
+ "items": {
+ "$ref": "#/definitions/Tag"
+ }
+ }
+ },
+ "definitions": {
+ "DomainValidationOption": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "DomainName": {
+ "type": "string"
+ },
+ "ValidationDomain": {
+ "type": "string"
+ },
+ "HostedZoneId": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "DomainName"
+ ]
+ },
+ "Tag": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "Value": {
+ "type": "string"
+ },
+ "Key": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "Value",
+ "Key"
+ ]
+ }
+ },
+ "required": [
+ "DomainName"
+ ],
+ "createOnlyProperties": [
+ "/properties/SubjectAlternativeNames",
+ "/properties/DomainValidationOptions",
+ "/properties/ValidationMethod",
+ "/properties/DomainName",
+ "/properties/CertificateAuthorityArn"
+ ],
+ "primaryIdentifier": [
+ "/properties/Id"
+ ],
+ "readOnlyProperties": [
+ "/properties/Id"
+ ]
+}
diff --git a/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate_plugin.py b/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate_plugin.py
new file mode 100644
index 0000000000000..5aae4de01c7b3
--- /dev/null
+++ b/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate_plugin.py
@@ -0,0 +1,20 @@
+from typing import Optional, Type
+
+from localstack.services.cloudformation.resource_provider import (
+ CloudFormationResourceProviderPlugin,
+ ResourceProvider,
+)
+
+
+class CertificateManagerCertificateProviderPlugin(CloudFormationResourceProviderPlugin):
+ name = "AWS::CertificateManager::Certificate"
+
+ def __init__(self):
+ self.factory: Optional[Type[ResourceProvider]] = None
+
+ def load(self):
+ from localstack.services.certificatemanager.resource_providers.aws_certificatemanager_certificate import (
+ CertificateManagerCertificateProvider,
+ )
+
+ self.factory = CertificateManagerCertificateProvider
diff --git a/localstack-core/localstack/services/cloudformation/__init__.py b/localstack-core/localstack/services/cloudformation/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/localstack-core/localstack/services/cloudformation/analytics.py b/localstack-core/localstack/services/cloudformation/analytics.py
new file mode 100644
index 0000000000000..80ec4d1960005
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/analytics.py
@@ -0,0 +1,67 @@
+import enum
+from typing import Self
+
+from localstack.aws.api.cloudformation import ChangeAction
+from localstack.utils.analytics.metrics import LabeledCounter
+
+COUNTER_NAMESPACE = "cloudformation"
+COUNTER_VERSION = 2
+
+
+class ActionOptions(enum.StrEnum):
+ """
+ Available actions that can be performed on a resource.
+
+ Must support both CFn and CloudControl.
+ """
+
+ CREATE = "create"
+ DELETE = "delete"
+ UPDATE = "update"
+ # for cloudcontrol
+ READ = "read"
+ LIST = "list"
+
+ @classmethod
+ def from_action(cls, action: Self | str | ChangeAction) -> Self:
+ if isinstance(action, cls):
+ return action
+
+ # only used in CFn
+ if isinstance(action, ChangeAction):
+ action = action.value
+
+ match action:
+ case "Add":
+ return cls.CREATE
+ case "Modify" | "Dynamic":
+ return cls.UPDATE
+ case "Remove":
+ return cls.DELETE
+ case "Read":
+ return cls.READ
+ case "List":
+ return cls.LIST
+ case _:
+ available_values = [every.value for every in cls]
+ raise ValueError(
+ f"Invalid action option '{action}', should be one of {available_values}"
+ )
+
+
+resources = LabeledCounter(
+ namespace=COUNTER_NAMESPACE,
+ name="resources",
+ labels=["resource_type", "missing", "action"],
+ schema_version=COUNTER_VERSION,
+)
+
+
+def track_resource_operation(
+ action: ActionOptions | str, expected_resource_type: str, *, missing: bool
+):
+ resources.labels(
+ resource_type=expected_resource_type,
+ missing=missing,
+ action=ActionOptions.from_action(action),
+ ).increment()
diff --git a/localstack-core/localstack/services/cloudformation/api_utils.py b/localstack-core/localstack/services/cloudformation/api_utils.py
new file mode 100644
index 0000000000000..c4172974cec35
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/api_utils.py
@@ -0,0 +1,162 @@
+import logging
+import re
+from urllib.parse import urlparse
+
+from localstack import config, constants
+from localstack.aws.connect import connect_to
+from localstack.services.cloudformation.engine.validations import ValidationError
+from localstack.services.s3.utils import (
+ extract_bucket_name_and_key_from_headers_and_path,
+ normalize_bucket_name,
+)
+from localstack.utils.functions import run_safe
+from localstack.utils.http import safe_requests
+from localstack.utils.strings import to_str
+from localstack.utils.urls import localstack_host
+
+LOG = logging.getLogger(__name__)
+
+
+def prepare_template_body(req_data: dict) -> str | bytes | None: # TODO: mutating and returning
+ template_url = req_data.get("TemplateURL")
+ if template_url:
+ req_data["TemplateURL"] = convert_s3_to_local_url(template_url)
+ url = req_data.get("TemplateURL", "")
+ if is_local_service_url(url):
+ modified_template_body = get_template_body(req_data)
+ if modified_template_body:
+ req_data.pop("TemplateURL", None)
+ req_data["TemplateBody"] = modified_template_body
+ modified_template_body = get_template_body(req_data)
+ if modified_template_body:
+ req_data["TemplateBody"] = modified_template_body
+ return modified_template_body
+
+
+def extract_template_body(request: dict) -> str:
+ """
+ Given a request payload, fetch the body of the template either from S3 or from the payload itself
+ """
+ if template_body := request.get("TemplateBody"):
+ if request.get("TemplateURL"):
+ raise ValidationError(
+ "Specify exactly one of 'TemplateBody' or 'TemplateUrl'"
+ ) # TODO: check proper message
+
+ return template_body
+
+ elif template_url := request.get("TemplateURL"):
+ template_url = convert_s3_to_local_url(template_url)
+ return get_remote_template_body(template_url)
+
+ else:
+ raise ValidationError(
+ "Specify exactly one of 'TemplateBody' or 'TemplateUrl'"
+ ) # TODO: check proper message
+
+
+def get_remote_template_body(url: str) -> str:
+ response = run_safe(lambda: safe_requests.get(url, verify=False))
+ # check error codes, and code 301 - fixes https://github.com/localstack/localstack/issues/1884
+ status_code = 0 if response is None else response.status_code
+ if 200 <= status_code < 300:
+ # request was ok
+ return response.text
+ elif response is None or status_code == 301 or status_code >= 400:
+ # check if this is an S3 URL, then get the file directly from there
+ url = convert_s3_to_local_url(url)
+ if is_local_service_url(url):
+ parsed_path = urlparse(url).path.lstrip("/")
+ parts = parsed_path.partition("/")
+ client = connect_to().s3
+ LOG.debug(
+ "Download CloudFormation template content from local S3: %s - %s",
+ parts[0],
+ parts[2],
+ )
+ result = client.get_object(Bucket=parts[0], Key=parts[2])
+ body = to_str(result["Body"].read())
+ return body
+ raise RuntimeError(
+ "Unable to fetch template body (code %s) from URL %s" % (status_code, url)
+ )
+ else:
+ raise RuntimeError(
+ f"Bad status code from fetching template from url '{url}' ({status_code})",
+ url,
+ status_code,
+ )
+
+
+def get_template_body(req_data: dict) -> str:
+ body = req_data.get("TemplateBody")
+ if body:
+ return body
+ url = req_data.get("TemplateURL")
+ if url:
+ response = run_safe(lambda: safe_requests.get(url, verify=False))
+ # check error codes, and code 301 - fixes https://github.com/localstack/localstack/issues/1884
+ status_code = 0 if response is None else response.status_code
+ if response is None or status_code == 301 or status_code >= 400:
+ # check if this is an S3 URL, then get the file directly from there
+ url = convert_s3_to_local_url(url)
+ if is_local_service_url(url):
+ parsed_path = urlparse(url).path.lstrip("/")
+ parts = parsed_path.partition("/")
+ client = connect_to().s3
+ LOG.debug(
+ "Download CloudFormation template content from local S3: %s - %s",
+ parts[0],
+ parts[2],
+ )
+ result = client.get_object(Bucket=parts[0], Key=parts[2])
+ body = to_str(result["Body"].read())
+ return body
+ raise Exception(
+ "Unable to fetch template body (code %s) from URL %s" % (status_code, url)
+ )
+ return to_str(response.content)
+ raise Exception("Unable to get template body from input: %s" % req_data)
+
+
+def is_local_service_url(url: str) -> bool:
+ if not url:
+ return False
+ candidates = (
+ constants.LOCALHOST,
+ constants.LOCALHOST_HOSTNAME,
+ localstack_host().host,
+ )
+ if any(re.match(r"^[^:]+://[^:/]*%s([:/]|$)" % host, url) for host in candidates):
+ return True
+ host = url.split("://")[-1].split("/")[0]
+ return "localhost" in host
+
+
+def convert_s3_to_local_url(url: str) -> str:
+ from localstack.services.cloudformation.provider import ValidationError
+
+ url_parsed = urlparse(url)
+ path = url_parsed.path
+
+ headers = {"host": url_parsed.netloc}
+ bucket_name, key_name = extract_bucket_name_and_key_from_headers_and_path(headers, path)
+
+ if url_parsed.scheme == "s3":
+ raise ValidationError(
+ f"S3 error: Domain name specified in {url_parsed.netloc} is not a valid S3 domain"
+ )
+
+ if not bucket_name or not key_name:
+ if not (url_parsed.netloc.startswith("s3.") or ".s3." in url_parsed.netloc):
+ raise ValidationError("TemplateURL must be a supported URL.")
+
+ # note: make sure to normalize the bucket name here!
+ bucket_name = normalize_bucket_name(bucket_name)
+ local_url = f"{config.internal_service_url()}/{bucket_name}/{key_name}"
+ return local_url
+
+
+def validate_stack_name(stack_name):
+ pattern = r"[a-zA-Z][-a-zA-Z0-9]*|arn:[-a-zA-Z0-9:/._+]*"
+ return re.match(pattern, stack_name) is not None
diff --git a/localstack-core/localstack/services/cloudformation/cfn_utils.py b/localstack-core/localstack/services/cloudformation/cfn_utils.py
new file mode 100644
index 0000000000000..6fcc5d16fb573
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/cfn_utils.py
@@ -0,0 +1,84 @@
+import json
+from typing import Callable
+
+from localstack.utils.objects import recurse_object
+
+
+def rename_params(func, rename_map):
+ def do_rename(account_id, region_name, params, logical_resource_id, *args, **kwargs):
+ values = (
+ func(account_id, region_name, params, logical_resource_id, *args, **kwargs)
+ if func
+ else params
+ )
+ for old_param, new_param in rename_map.items():
+ values[new_param] = values.pop(old_param, None)
+ return values
+
+ return do_rename
+
+
+def lambda_convert_types(func, types):
+ return (
+ lambda account_id, region_name, params, logical_resource_id, *args, **kwargs: convert_types(
+ func(account_id, region_name, params, *args, **kwargs), types
+ )
+ )
+
+
+def lambda_to_json(attr):
+ return lambda account_id, region_name, params, logical_resource_id, *args, **kwargs: json.dumps(
+ params[attr]
+ )
+
+
+def lambda_rename_attributes(attrs, func=None):
+ def recurse(o, path):
+ if isinstance(o, dict):
+ for k in list(o.keys()):
+ for a in attrs.keys():
+ if k == a:
+ o[attrs[k]] = o.pop(k)
+ return o
+
+ func = func or (lambda account_id, region_name, x, logical_resource_id, *args, **kwargs: x)
+ return (
+ lambda account_id,
+ region_name,
+ params,
+ logical_resource_id,
+ *args,
+ **kwargs: recurse_object(
+ func(account_id, region_name, params, logical_resource_id, *args, **kwargs), recurse
+ )
+ )
+
+
+def convert_types(obj, types):
+ def fix_types(key, type_class):
+ def recurse(o, path):
+ if isinstance(o, dict):
+ for k, v in dict(o).items():
+ key_path = "%s%s" % (path or ".", k)
+ if key in [k, key_path]:
+ o[k] = type_class(v)
+ return o
+
+ return recurse_object(obj, recurse)
+
+ for key, type_class in types.items():
+ fix_types(key, type_class)
+ return obj
+
+
+def get_tags_param(resource_type: str) -> Callable:
+ """Return a tag parameters creation function for the given resource type"""
+
+ def _param(account_id: str, region_name: str, params, logical_resource_id, *args, **kwargs):
+ tags = params.get("Tags")
+ if not tags:
+ return None
+
+ return [{"ResourceType": resource_type, "Tags": tags}]
+
+ return _param
diff --git a/localstack-core/localstack/services/cloudformation/deploy.html b/localstack-core/localstack/services/cloudformation/deploy.html
new file mode 100644
index 0000000000000..47af619288057
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/deploy.html
@@ -0,0 +1,144 @@
+
+
+
+
+ LocalStack - CloudFormation Deployment
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/localstack-core/localstack/services/cloudformation/deploy_ui.py b/localstack-core/localstack/services/cloudformation/deploy_ui.py
new file mode 100644
index 0000000000000..deac95b408b1f
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/deploy_ui.py
@@ -0,0 +1,47 @@
+import json
+import logging
+import os
+
+import requests
+from rolo import Response
+
+from localstack import constants
+from localstack.utils.files import load_file
+from localstack.utils.json import parse_json_or_yaml
+
+LOG = logging.getLogger(__name__)
+
+
+class CloudFormationUi:
+ def on_get(self, request):
+ from localstack.utils.aws.aws_stack import get_valid_regions
+
+ deploy_html_file = os.path.join(
+ constants.MODULE_MAIN_PATH, "services", "cloudformation", "deploy.html"
+ )
+ deploy_html = load_file(deploy_html_file)
+ req_params = request.values
+ params = {
+ "stackName": "stack1",
+ "templateBody": "{}",
+ "errorMessage": "''",
+ "regions": json.dumps(sorted(get_valid_regions())),
+ }
+
+ download_url = req_params.get("templateURL")
+ if download_url:
+ try:
+ LOG.debug("Attempting to download CloudFormation template URL: %s", download_url)
+ template_body = requests.get(download_url).text
+ template_body = parse_json_or_yaml(template_body)
+ params["templateBody"] = json.dumps(template_body)
+ except Exception as e:
+ msg = f"Unable to download CloudFormation template URL: {e}"
+ LOG.info(msg)
+ params["errorMessage"] = json.dumps(msg.replace("\n", " - "))
+
+ # using simple string replacement here, for simplicity (could be replaced with, e.g., jinja)
+ for key, value in params.items():
+ deploy_html = deploy_html.replace(f"<{key}>", value)
+
+ return Response(deploy_html, mimetype="text/html")
diff --git a/localstack-core/localstack/services/cloudformation/deployment_utils.py b/localstack-core/localstack/services/cloudformation/deployment_utils.py
new file mode 100644
index 0000000000000..6355db6b5c27a
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/deployment_utils.py
@@ -0,0 +1,319 @@
+import builtins
+import json
+import logging
+import re
+from copy import deepcopy
+from typing import Callable, List
+
+from localstack import config
+from localstack.utils import common
+from localstack.utils.aws import aws_stack
+from localstack.utils.common import select_attributes, short_uid
+from localstack.utils.functions import run_safe
+from localstack.utils.json import json_safe
+from localstack.utils.objects import recurse_object
+from localstack.utils.strings import is_string
+
+# placeholders
+PLACEHOLDER_AWS_NO_VALUE = "__aws_no_value__"
+
+LOG = logging.getLogger(__name__)
+
+
+def dump_json_params(param_func=None, *param_names):
+ def replace(account_id: str, region_name: str, params, logical_resource_id, *args, **kwargs):
+ result = (
+ param_func(account_id, region_name, params, logical_resource_id, *args, **kwargs)
+ if param_func
+ else params
+ )
+ for name in param_names:
+ if isinstance(result.get(name), (dict, list)):
+ # Fix for https://github.com/localstack/localstack/issues/2022
+ # Convert any date instances to date strings, etc, Version: "2012-10-17"
+ param_value = common.json_safe(result[name])
+ result[name] = json.dumps(param_value)
+ return result
+
+ return replace
+
+
+# TODO: remove
+def param_defaults(param_func, defaults):
+ def replace(
+ account_id: str,
+ region_name: str,
+ properties: dict,
+ logical_resource_id: str,
+ *args,
+ **kwargs,
+ ):
+ result = param_func(
+ account_id, region_name, properties, logical_resource_id, *args, **kwargs
+ )
+ for key, value in defaults.items():
+ if result.get(key) in ["", None]:
+ result[key] = value
+ return result
+
+ return replace
+
+
+def remove_none_values(params):
+ """Remove None values and AWS::NoValue placeholders (recursively) in the given object."""
+
+ def remove_nones(o, **kwargs):
+ if isinstance(o, dict):
+ for k, v in dict(o).items():
+ if v in [None, PLACEHOLDER_AWS_NO_VALUE]:
+ o.pop(k)
+ if isinstance(o, list):
+ common.run_safe(o.remove, None)
+ common.run_safe(o.remove, PLACEHOLDER_AWS_NO_VALUE)
+ return o
+
+ result = common.recurse_object(params, remove_nones)
+ return result
+
+
+def params_list_to_dict(param_name, key_attr_name="Key", value_attr_name="Value"):
+ def do_replace(account_id: str, region_name: str, params, logical_resource_id, *args, **kwargs):
+ result = {}
+ for entry in params.get(param_name, []):
+ key = entry[key_attr_name]
+ value = entry[value_attr_name]
+ result[key] = value
+ return result
+
+ return do_replace
+
+
+def lambda_keys_to_lower(key=None, skip_children_of: List[str] = None):
+ return (
+ lambda account_id,
+ region_name,
+ params,
+ logical_resource_id,
+ *args,
+ **kwargs: common.keys_to_lower(
+ obj=(params.get(key) if key else params), skip_children_of=skip_children_of
+ )
+ )
+
+
+def merge_parameters(func1, func2):
+ return (
+ lambda account_id,
+ region_name,
+ properties,
+ logical_resource_id,
+ *args,
+ **kwargs: common.merge_dicts(
+ func1(account_id, region_name, properties, logical_resource_id, *args, **kwargs),
+ func2(account_id, region_name, properties, logical_resource_id, *args, **kwargs),
+ )
+ )
+
+
+def str_or_none(o):
+ return o if o is None else json.dumps(o) if isinstance(o, (dict, list)) else str(o)
+
+
+def params_dict_to_list(param_name, key_attr_name="Key", value_attr_name="Value", wrapper=None):
+ def do_replace(account_id: str, region_name: str, params, logical_resource_id, *args, **kwargs):
+ result = []
+ for key, value in params.get(param_name, {}).items():
+ result.append({key_attr_name: key, value_attr_name: value})
+ if wrapper:
+ result = {wrapper: result}
+ return result
+
+ return do_replace
+
+
+# TODO: remove
+def params_select_attributes(*attrs):
+ def do_select(account_id: str, region_name: str, params, logical_resource_id, *args, **kwargs):
+ result = {}
+ for attr in attrs:
+ if params.get(attr) is not None:
+ result[attr] = str_or_none(params.get(attr))
+ return result
+
+ return do_select
+
+
+def param_json_to_str(name):
+ def _convert(account_id: str, region_name: str, params, logical_resource_id, *args, **kwargs):
+ result = params.get(name)
+ if result:
+ result = json.dumps(result)
+ return result
+
+ return _convert
+
+
+def lambda_select_params(*selected):
+ # TODO: remove and merge with function below
+ return select_parameters(*selected)
+
+
+def select_parameters(*param_names):
+ return (
+ lambda account_id,
+ region_name,
+ properties,
+ logical_resource_id,
+ *args,
+ **kwargs: select_attributes(properties, param_names)
+ )
+
+
+def is_none_or_empty_value(value):
+ return not value or value == PLACEHOLDER_AWS_NO_VALUE
+
+
+def generate_default_name(stack_name: str, logical_resource_id: str):
+ random_id_part = short_uid()
+ resource_id_part = logical_resource_id[:24]
+ stack_name_part = stack_name[: 63 - 2 - (len(random_id_part) + len(resource_id_part))]
+ return f"{stack_name_part}-{resource_id_part}-{random_id_part}"
+
+
+def generate_default_name_without_stack(logical_resource_id: str):
+ random_id_part = short_uid()
+ resource_id_part = logical_resource_id[: 63 - 1 - len(random_id_part)]
+ return f"{resource_id_part}-{random_id_part}"
+
+
+# Utils for parameter conversion
+
+# TODO: handling of multiple valid types
+param_validation = re.compile(
+ r"Invalid type for parameter (?P [\w.]+), value: (?P\w+), type: \w+)'>, valid types: \w+)'>"
+)
+
+
+def get_nested(obj: dict, path: str):
+ parts = path.split(".")
+ result = obj
+ for p in parts[:-1]:
+ result = result.get(p, {})
+ return result.get(parts[-1])
+
+
+def set_nested(obj: dict, path: str, value):
+ parts = path.split(".")
+ result = obj
+ for p in parts[:-1]:
+ result = result.get(p, {})
+ result[parts[-1]] = value
+
+
+def fix_boto_parameters_based_on_report(original_params: dict, report: str) -> dict:
+ """
+ Fix invalid type parameter validation errors in boto request parameters
+
+ :param original_params: original boto request parameters that lead to the parameter validation error
+ :param report: error report from botocore ParamValidator
+ :return: a copy of original_params with all values replaced by their correctly cast ones
+ """
+ params = deepcopy(original_params)
+ for found in param_validation.findall(report):
+ param_name, value, wrong_class, valid_class = found
+ cast_class = getattr(builtins, valid_class)
+ old_value = get_nested(params, param_name)
+
+ if cast_class == bool and str(old_value).lower() in ["true", "false"]:
+ new_value = str(old_value).lower() == "true"
+ else:
+ new_value = cast_class(old_value)
+ set_nested(params, param_name, new_value)
+ return params
+
+
+def fix_account_id_in_arns(params: dict, replacement_account_id: str) -> dict:
+ def fix_ids(o, **kwargs):
+ if isinstance(o, dict):
+ for k, v in o.items():
+ if is_string(v, exclude_binary=True):
+ o[k] = aws_stack.fix_account_id_in_arns(v, replacement=replacement_account_id)
+ elif is_string(o, exclude_binary=True):
+ o = aws_stack.fix_account_id_in_arns(o, replacement=replacement_account_id)
+ return o
+
+ result = recurse_object(params, fix_ids)
+ return result
+
+
+def convert_data_types(type_conversions: dict[str, Callable], params: dict) -> dict:
+ """Convert data types in the "params" object, with the type defs
+ specified in the 'types' attribute of "func_details"."""
+ attr_names = type_conversions.keys() or []
+
+ def cast(_obj, _type):
+ if _type == bool:
+ return _obj in ["True", "true", True]
+ if _type == str:
+ if isinstance(_obj, bool):
+ return str(_obj).lower()
+ return str(_obj)
+ if _type in (int, float):
+ return _type(_obj)
+ return _obj
+
+ def fix_types(o, **kwargs):
+ if isinstance(o, dict):
+ for k, v in o.items():
+ if k in attr_names:
+ o[k] = cast(v, type_conversions[k])
+ return o
+
+ result = recurse_object(params, fix_types)
+ return result
+
+
+def log_not_available_message(resource_type: str, message: str):
+ LOG.warning(
+ "%s. To find out if %s is supported in LocalStack Pro, "
+ "please check out our docs at https://docs.localstack.cloud/user-guide/aws/cloudformation/#resources-pro--enterprise-edition",
+ message,
+ resource_type,
+ )
+
+
+def dump_resource_as_json(resource: dict) -> str:
+ return str(run_safe(lambda: json.dumps(json_safe(resource))) or resource)
+
+
+def get_action_name_for_resource_change(res_change: str) -> str:
+ return {"Add": "CREATE", "Remove": "DELETE", "Modify": "UPDATE"}.get(res_change)
+
+
+def check_not_found_exception(e, resource_type, resource, resource_status=None):
+ # we expect this to be a "not found" exception
+ markers = [
+ "NoSuchBucket",
+ "ResourceNotFound",
+ "NoSuchEntity",
+ "NotFoundException",
+ "404",
+ "not found",
+ "not exist",
+ ]
+
+ markers_hit = [m for m in markers if m in str(e)]
+ if not markers_hit:
+ LOG.warning(
+ "Unexpected error processing resource type %s: Exception: %s - %s - status: %s",
+ resource_type,
+ str(e),
+ resource,
+ resource_status,
+ )
+ if config.CFN_VERBOSE_ERRORS:
+ raise e
+ else:
+ return False
+
+ return True
diff --git a/localstack-core/localstack/services/cloudformation/engine/__init__.py b/localstack-core/localstack/services/cloudformation/engine/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/localstack-core/localstack/services/cloudformation/engine/changes.py b/localstack-core/localstack/services/cloudformation/engine/changes.py
new file mode 100644
index 0000000000000..ae6ced9e5563e
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/engine/changes.py
@@ -0,0 +1,18 @@
+from typing import Literal, Optional, TypedDict
+
+Action = str
+
+
+class ResourceChange(TypedDict):
+ Action: Action
+ LogicalResourceId: str
+ PhysicalResourceId: Optional[str]
+ ResourceType: str
+ Scope: list
+ Details: list
+ Replacement: Optional[Literal["False"]]
+
+
+class ChangeConfig(TypedDict):
+ Type: str
+ ResourceChange: ResourceChange
diff --git a/localstack-core/localstack/services/cloudformation/engine/entities.py b/localstack-core/localstack/services/cloudformation/engine/entities.py
new file mode 100644
index 0000000000000..e1498258694ee
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/engine/entities.py
@@ -0,0 +1,442 @@
+import logging
+from typing import Optional, TypedDict
+
+from localstack.aws.api.cloudformation import Capability, ChangeSetType, Parameter
+from localstack.services.cloudformation.engine.parameters import (
+ StackParameter,
+ convert_stack_parameters_to_list,
+ mask_no_echo,
+ strip_parameter_type,
+)
+from localstack.services.cloudformation.engine.v2.change_set_model import (
+ ChangeSetModel,
+ NodeTemplate,
+)
+from localstack.utils.aws import arns
+from localstack.utils.collections import select_attributes
+from localstack.utils.id_generator import ExistingIds, ResourceIdentifier, Tags, generate_short_uid
+from localstack.utils.json import clone_safe
+from localstack.utils.objects import recurse_object
+from localstack.utils.strings import long_uid, short_uid
+from localstack.utils.time import timestamp_millis
+
+LOG = logging.getLogger(__name__)
+
+
+class StackSet:
+ """A stack set contains multiple stack instances."""
+
+ # FIXME: confusing name. metadata is the complete incoming request object
+ def __init__(self, metadata: dict):
+ self.metadata = metadata
+ # list of stack instances
+ self.stack_instances = []
+ # maps operation ID to stack set operation details
+ self.operations = {}
+
+ @property
+ def stack_set_name(self):
+ return self.metadata.get("StackSetName")
+
+
+class StackInstance:
+ """A stack instance belongs to a stack set and is specific to a region / account ID."""
+
+ # FIXME: confusing name. metadata is the complete incoming request object
+ def __init__(self, metadata: dict):
+ self.metadata = metadata
+ # reference to the deployed stack belonging to this stack instance
+ self.stack = None
+
+
+class CreateChangeSetInput(TypedDict):
+ StackName: str
+ Capabilities: list[Capability]
+ ChangeSetName: Optional[str]
+ ChangSetType: Optional[ChangeSetType]
+ Parameters: list[Parameter]
+
+
+class StackTemplate(TypedDict):
+ StackName: str
+ ChangeSetName: Optional[str]
+ Outputs: dict
+ Resources: dict
+
+
+class StackIdentifier(ResourceIdentifier):
+ service = "cloudformation"
+ resource = "stack"
+
+ def __init__(self, account_id: str, region: str, stack_name: str):
+ super().__init__(account_id, region, stack_name)
+
+ def generate(self, existing_ids: ExistingIds = None, tags: Tags = None) -> str:
+ return generate_short_uid(resource_identifier=self, existing_ids=existing_ids, tags=tags)
+
+
+# TODO: remove metadata (flatten into individual fields)
+class Stack:
+ change_sets: list["StackChangeSet"]
+
+ def __init__(
+ self,
+ account_id: str,
+ region_name: str,
+ metadata: Optional[CreateChangeSetInput] = None,
+ template: Optional[StackTemplate] = None,
+ template_body: Optional[str] = None,
+ ):
+ self.account_id = account_id
+ self.region_name = region_name
+
+ if template is None:
+ template = {}
+
+ self.resolved_outputs = list() # TODO
+ self.resolved_parameters: dict[str, StackParameter] = {}
+ self.resolved_conditions: dict[str, bool] = {}
+
+ self.metadata = metadata or {}
+ self.template = template or {}
+ self.template_body = template_body
+ self._template_raw = clone_safe(self.template)
+ self.template_original = clone_safe(self.template)
+ # initialize resources
+ for resource_id, resource in self.template_resources.items():
+ # HACK: if the resource is a Fn::ForEach intrinsic call from the LanguageExtensions transform, then it is not a dictionary but a list
+ if resource_id.startswith("Fn::ForEach"):
+ # we are operating on an untransformed template, so ignore for now
+ continue
+ resource["LogicalResourceId"] = self.template_original["Resources"][resource_id][
+ "LogicalResourceId"
+ ] = resource.get("LogicalResourceId") or resource_id
+ # initialize stack template attributes
+ stack_id = self.metadata.get("StackId") or arns.cloudformation_stack_arn(
+ self.stack_name,
+ stack_id=StackIdentifier(
+ account_id=account_id, region=region_name, stack_name=metadata.get("StackName")
+ ).generate(tags=metadata.get("tags")),
+ account_id=account_id,
+ region_name=region_name,
+ )
+ self.template["StackId"] = self.metadata["StackId"] = stack_id
+ self.template["Parameters"] = self.template.get("Parameters") or {}
+ self.template["Outputs"] = self.template.get("Outputs") or {}
+ self.template["Conditions"] = self.template.get("Conditions") or {}
+ # initialize metadata
+ self.metadata["Parameters"] = self.metadata.get("Parameters") or []
+ self.metadata["StackStatus"] = "CREATE_IN_PROGRESS"
+ self.metadata["CreationTime"] = self.metadata.get("CreationTime") or timestamp_millis()
+ self.metadata["LastUpdatedTime"] = self.metadata["CreationTime"]
+ self.metadata.setdefault("Description", self.template.get("Description"))
+ self.metadata.setdefault("RollbackConfiguration", {})
+ self.metadata.setdefault("DisableRollback", False)
+ self.metadata.setdefault("EnableTerminationProtection", False)
+ # maps resource id to resource state
+ self._resource_states = {}
+ # list of stack events
+ self.events = []
+ # list of stack change sets
+ self.change_sets = []
+ # self.evaluated_conditions = {}
+
+ def set_resolved_parameters(self, resolved_parameters: dict[str, StackParameter]):
+ self.resolved_parameters = resolved_parameters
+ if resolved_parameters:
+ self.metadata["Parameters"] = list(resolved_parameters.values())
+
+ def set_resolved_stack_conditions(self, resolved_conditions: dict[str, bool]):
+ self.resolved_conditions = resolved_conditions
+
+ def describe_details(self):
+ attrs = [
+ "StackId",
+ "StackName",
+ "Description",
+ "StackStatusReason",
+ "StackStatus",
+ "Capabilities",
+ "ParentId",
+ "RootId",
+ "RoleARN",
+ "CreationTime",
+ "DeletionTime",
+ "LastUpdatedTime",
+ "ChangeSetId",
+ "RollbackConfiguration",
+ "DisableRollback",
+ "EnableTerminationProtection",
+ "DriftInformation",
+ ]
+ result = select_attributes(self.metadata, attrs)
+ result["Tags"] = self.tags
+ outputs = self.resolved_outputs
+ if outputs:
+ result["Outputs"] = outputs
+ stack_parameters = convert_stack_parameters_to_list(self.resolved_parameters)
+ if stack_parameters:
+ result["Parameters"] = [
+ mask_no_echo(strip_parameter_type(sp)) for sp in stack_parameters
+ ]
+ if not result.get("DriftInformation"):
+ result["DriftInformation"] = {"StackDriftStatus": "NOT_CHECKED"}
+ for attr in ["Tags", "NotificationARNs"]:
+ result.setdefault(attr, [])
+ return result
+
+ def set_stack_status(self, status: str, status_reason: Optional[str] = None):
+ self.metadata["StackStatus"] = status
+ if "FAILED" in status:
+ self.metadata["StackStatusReason"] = status_reason or "Deployment failed"
+ self.log_stack_errors()
+ self.add_stack_event(
+ self.stack_name, self.stack_id, status, status_reason=status_reason or ""
+ )
+
+ def log_stack_errors(self, level=logging.WARNING):
+ for event in self.events:
+ if event["ResourceStatus"].endswith("FAILED"):
+ if reason := event.get("ResourceStatusReason"):
+ reason = reason.replace("\n", "; ")
+ LOG.log(
+ level,
+ "CFn resource failed to deploy: %s (%s)",
+ event["LogicalResourceId"],
+ reason,
+ )
+ else:
+ LOG.warning("CFn resource failed to deploy: %s", event["LogicalResourceId"])
+
+ def set_time_attribute(self, attribute, new_time=None):
+ self.metadata[attribute] = new_time or timestamp_millis()
+
+ def add_stack_event(
+ self,
+ resource_id: str = None,
+ physical_res_id: str = None,
+ status: str = "",
+ status_reason: str = "",
+ ):
+ resource_id = resource_id or self.stack_name
+ physical_res_id = physical_res_id or self.stack_id
+ resource_type = (
+ self.template.get("Resources", {})
+ .get(resource_id, {})
+ .get("Type", "AWS::CloudFormation::Stack")
+ )
+
+ event = {
+ "EventId": long_uid(),
+ "Timestamp": timestamp_millis(),
+ "StackId": self.stack_id,
+ "StackName": self.stack_name,
+ "LogicalResourceId": resource_id,
+ "PhysicalResourceId": physical_res_id,
+ "ResourceStatus": status,
+ "ResourceType": resource_type,
+ }
+
+ if status_reason:
+ event["ResourceStatusReason"] = status_reason
+
+ self.events.insert(0, event)
+
+ def set_resource_status(self, resource_id: str, status: str, status_reason: str = ""):
+ """Update the deployment status of the given resource ID and publish a corresponding stack event."""
+ physical_res_id = self.resources.get(resource_id, {}).get("PhysicalResourceId")
+ self._set_resource_status_details(resource_id, physical_res_id=physical_res_id)
+ state = self.resource_states.setdefault(resource_id, {})
+ state["PreviousResourceStatus"] = state.get("ResourceStatus")
+ state["ResourceStatus"] = status
+ state["LastUpdatedTimestamp"] = timestamp_millis()
+ self.add_stack_event(resource_id, physical_res_id, status, status_reason=status_reason)
+
+ def _set_resource_status_details(self, resource_id: str, physical_res_id: str = None):
+ """Helper function to ensure that the status details for the given resource ID are up-to-date."""
+ resource = self.resources.get(resource_id)
+ if resource is None or resource.get("Type") == "Parameter":
+ # make sure we delete the states for any non-existing/deleted resources
+ self._resource_states.pop(resource_id, None)
+ return
+ state = self._resource_states.setdefault(resource_id, {})
+ attr_defaults = (
+ ("LogicalResourceId", resource_id),
+ ("PhysicalResourceId", physical_res_id),
+ )
+ for res in [resource, state]:
+ for attr, default in attr_defaults:
+ res[attr] = res.get(attr) or default
+ state["StackName"] = state.get("StackName") or self.stack_name
+ state["StackId"] = state.get("StackId") or self.stack_id
+ state["ResourceType"] = state.get("ResourceType") or self.resources[resource_id].get("Type")
+ state["Timestamp"] = timestamp_millis()
+ return state
+
+ def resource_status(self, resource_id: str):
+ result = self._lookup(self.resource_states, resource_id)
+ return result
+
+ def latest_template_raw(self):
+ if self.change_sets:
+ return self.change_sets[-1]._template_raw
+ return self._template_raw
+
+ @property
+ def resource_states(self):
+ for resource_id in list(self._resource_states.keys()):
+ self._set_resource_status_details(resource_id)
+ return self._resource_states
+
+ @property
+ def stack_name(self):
+ return self.metadata["StackName"]
+
+ @property
+ def stack_id(self):
+ return self.metadata["StackId"]
+
+ @property
+ def resources(self):
+ """Return dict of resources"""
+ return dict(self.template_resources)
+
+ @resources.setter
+ def resources(self, resources: dict):
+ self.template["Resources"] = resources
+
+ @property
+ def template_resources(self):
+ return self.template.setdefault("Resources", {})
+
+ @property
+ def tags(self):
+ return self.metadata.get("Tags", [])
+
+ @property
+ def imports(self):
+ def _collect(o, **kwargs):
+ if isinstance(o, dict):
+ import_val = o.get("Fn::ImportValue")
+ if import_val:
+ result.add(import_val)
+ return o
+
+ result = set()
+ recurse_object(self.resources, _collect)
+ return result
+
+ @property
+ def template_parameters(self):
+ return self.template["Parameters"]
+
+ @property
+ def conditions(self):
+ """Returns the (mutable) dict of stack conditions."""
+ return self.template.setdefault("Conditions", {})
+
+ @property
+ def mappings(self):
+ """Returns the (mutable) dict of stack mappings."""
+ return self.template.setdefault("Mappings", {})
+
+ @property
+ def outputs(self):
+ """Returns the (mutable) dict of stack outputs."""
+ return self.template.setdefault("Outputs", {})
+
+ @property
+ def status(self):
+ return self.metadata["StackStatus"]
+
+ @property
+ def resource_types(self):
+ return [r.get("Type") for r in self.template_resources.values()]
+
+ def resource(self, resource_id):
+ return self._lookup(self.resources, resource_id)
+
+ def _lookup(self, resource_map, resource_id):
+ resource = resource_map.get(resource_id)
+ if not resource:
+ raise Exception(
+ 'Unable to find details for resource "%s" in stack "%s"'
+ % (resource_id, self.stack_name)
+ )
+ return resource
+
+ def copy(self):
+ return Stack(
+ account_id=self.account_id,
+ region_name=self.region_name,
+ metadata=dict(self.metadata),
+ template=dict(self.template),
+ )
+
+
+# FIXME: remove inheritance
+# TODO: what functionality of the Stack object do we rely on here?
+class StackChangeSet(Stack):
+ update_graph: NodeTemplate | None
+ change_set_type: ChangeSetType | None
+
+ def __init__(
+ self,
+ account_id: str,
+ region_name: str,
+ stack: Stack,
+ params=None,
+ template=None,
+ change_set_type: ChangeSetType | None = None,
+ ):
+ if template is None:
+ template = {}
+ if params is None:
+ params = {}
+ super(StackChangeSet, self).__init__(account_id, region_name, params, template)
+
+ name = self.metadata["ChangeSetName"]
+ if not self.metadata.get("ChangeSetId"):
+ self.metadata["ChangeSetId"] = arns.cloudformation_change_set_arn(
+ name, change_set_id=short_uid(), account_id=account_id, region_name=region_name
+ )
+
+ self.account_id = account_id
+ self.region_name = region_name
+ self.stack = stack
+ self.metadata["StackId"] = stack.stack_id
+ self.metadata["Status"] = "CREATE_PENDING"
+ self.change_set_type = change_set_type
+
+ @property
+ def change_set_id(self):
+ return self.metadata["ChangeSetId"]
+
+ @property
+ def change_set_name(self):
+ return self.metadata["ChangeSetName"]
+
+ @property
+ def resources(self):
+ return dict(self.stack.resources)
+
+ @property
+ def changes(self):
+ result = self.metadata["Changes"] = self.metadata.get("Changes", [])
+ return result
+
+ # V2 only
+ def populate_update_graph(
+ self,
+ before_template: Optional[dict],
+ after_template: Optional[dict],
+ before_parameters: Optional[dict],
+ after_parameters: Optional[dict],
+ ) -> None:
+ change_set_model = ChangeSetModel(
+ before_template=before_template,
+ after_template=after_template,
+ before_parameters=before_parameters,
+ after_parameters=after_parameters,
+ )
+ self.update_graph = change_set_model.get_update_model()
diff --git a/localstack-core/localstack/services/cloudformation/engine/errors.py b/localstack-core/localstack/services/cloudformation/engine/errors.py
new file mode 100644
index 0000000000000..0ee44f3530e58
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/engine/errors.py
@@ -0,0 +1,4 @@
+class TemplateError(RuntimeError):
+ """
+ Error thrown on a programming error from the user
+ """
diff --git a/localstack-core/localstack/services/cloudformation/engine/parameters.py b/localstack-core/localstack/services/cloudformation/engine/parameters.py
new file mode 100644
index 0000000000000..ba39fafc40db2
--- /dev/null
+++ b/localstack-core/localstack/services/cloudformation/engine/parameters.py
@@ -0,0 +1,209 @@
+"""
+TODO: ordering & grouping of parameters
+TODO: design proper structure for parameters to facilitate validation etc.
+TODO: clearer language around both parameters and "resolving"
+
+Documentation extracted from AWS docs (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html):
+ The following requirements apply when using parameters:
+
+ You can have a maximum of 200 parameters in an AWS CloudFormation template.
+ Each parameter must be given a logical name (also called logical ID), which must be alphanumeric and unique among all logical names within the template.
+ Each parameter must be assigned a parameter type that is supported by AWS CloudFormation. For more information, see Type.
+ Each parameter must be assigned a value at runtime for AWS CloudFormation to successfully provision the stack. You can optionally specify a default value for AWS CloudFormation to use unless another value is provided.
+ Parameters must be declared and referenced from within the same template. You can reference parameters from the Resources and Outputs sections of the template.
+
+ When you create or update stacks and create change sets, AWS CloudFormation uses whatever values exist in Parameter Store at the time the operation is run. If a specified parameter doesn't exist in Parameter Store under the caller's AWS account, AWS CloudFormation returns a validation error.
+
+ For stack updates, the Use existing value option in the console and the UsePreviousValue attribute for update-stack tell AWS CloudFormation to use the existing Systems Manager parameter keyβnot its value. AWS CloudFormation always fetches the latest values from Parameter Store when it updates stacks.
+
+"""
+
+import logging
+from typing import Literal, Optional, TypedDict
+
+from botocore.exceptions import ClientError
+
+from localstack.aws.api.cloudformation import Parameter, ParameterDeclaration
+from localstack.aws.connect import connect_to
+
+LOG = logging.getLogger(__name__)
+
+
+def extract_stack_parameter_declarations(template: dict) -> dict[str, ParameterDeclaration]:
+ """
+ Extract and build a dict of stack parameter declarations from a CloudFormation stack templatef
+
+ :param template: the parsed CloudFormation stack template
+ :return: a dictionary of declared parameters, mapping logical IDs to the corresponding parameter declaration
+ """
+ result = {}
+ for param_key, param in template.get("Parameters", {}).items():
+ result[param_key] = ParameterDeclaration(
+ ParameterKey=param_key,
+ DefaultValue=param.get("Default"),
+ ParameterType=param.get("Type"),
+ NoEcho=param.get("NoEcho", False),
+ # TODO: test & implement rest here
+ # ParameterConstraints=?,
+ # Description=?
+ )
+ return result
+
+
+class StackParameter(Parameter):
+ # we need the type information downstream when actually using the resolved value
+ # e.g. in case of lists so that we know that we should interpret the string as a comma-separated list.
+ ParameterType: str
+
+
+def resolve_parameters(
+ account_id: str,
+ region_name: str,
+ parameter_declarations: dict[str, ParameterDeclaration],
+ new_parameters: dict[str, Parameter],
+ old_parameters: dict[str, Parameter],
+) -> dict[str, StackParameter]:
+ """
+ Resolves stack parameters or raises an exception if any parameter can not be resolved.
+
+ Assumptions:
+ - There are no extra undeclared parameters given (validate before calling this method)
+
+ TODO: is UsePreviousValue=False equivalent to not specifying it, in all situations?
+
+ :param parameter_declarations: The parameter declaration from the (potentially new) template, i.e. the "Parameters" section
+ :param new_parameters: The parameters to resolve
+ :param old_parameters: The old parameters from the previous stack deployment, if available
+ :return: a copy of new_parameters with resolved values
+ """
+ resolved_parameters = dict()
+
+ # populate values for every parameter declared in the template
+ for pm in parameter_declarations.values():
+ pm_key = pm["ParameterKey"]
+ resolved_param = StackParameter(ParameterKey=pm_key, ParameterType=pm["ParameterType"])
+ new_parameter = new_parameters.get(pm_key)
+ old_parameter = old_parameters.get(pm_key)
+
+ if new_parameter is None:
+ # since no value has been specified for the deployment, we need to be able to resolve the default or fail
+ default_value = pm["DefaultValue"]
+ if default_value is None:
+ LOG.error("New parameter without a default value: %s", pm_key)
+ raise Exception(
+ f"Invalid. Parameter '{pm_key}' needs to have either param specified or Default."
+ ) # TODO: test and verify
+
+ resolved_param["ParameterValue"] = default_value
+ else:
+ if (
+ new_parameter.get("UsePreviousValue", False)
+ and new_parameter.get("ParameterValue") is not None
+ ):
+ raise Exception(
+ f"Can't set both 'UsePreviousValue' and a concrete value for parameter '{pm_key}'."
+ ) # TODO: test and verify
+
+ if new_parameter.get("UsePreviousValue", False):
+ if old_parameter is None:
+ raise Exception(
+ f"Set 'UsePreviousValue' but stack has no previous value for parameter '{pm_key}'."
+ ) # TODO: test and verify
+
+ resolved_param["ParameterValue"] = old_parameter["ParameterValue"]
+ else:
+ resolved_param["ParameterValue"] = new_parameter["ParameterValue"]
+
+ resolved_param["NoEcho"] = pm.get("NoEcho", False)
+ resolved_parameters[pm_key] = resolved_param
+
+ # Note that SSM parameters always need to be resolved anew here
+ # TODO: support more parameter types
+ if pm["ParameterType"].startswith("AWS::SSM"):
+ if pm["ParameterType"] in [
+ "AWS::SSM::Parameter::Value",
+ "AWS::SSM::Parameter::Value