Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
92 changes: 63 additions & 29 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -234,19 +234,29 @@ jobs:
aws-access-key-id: ${{ secrets.DEPLOYER_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.DEPLOYER_AWS_SECRET_ACCESS_KEY }}

# Staging deployment (mason) - triggered on staging branch
build-and-deploy-staging:
# Staging Docker builds - parallel via matrix
build-staging-images:
if: github.ref == 'refs/heads/staging'
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- service: admin
dockerfile: docker/admin/Dockerfile
repository: mark-admin
image_tag: mark-admin-${{ github.sha }}
- service: handler
dockerfile: docker/handler/Dockerfile
repository: mark-handler
image_tag: mark-handler-${{ github.sha }}
- service: poller
dockerfile: docker/poller/Dockerfile
repository: mark-poller
image_tag: mark-poller-${{ github.sha }}
env:
AWS_REGION: sa-east-1
REGISTRY: 679752396206.dkr.ecr.sa-east-1.amazonaws.com
POLLER_REPOSITORY: mark-poller
POLLER_IMAGE_TAG: mark-poller-${{ github.sha }}
HANDLER_REPOSITORY: mark-handler
HANDLER_IMAGE_TAG: mark-handler-${{ github.sha }}
ADMIN_REPOSITORY: mark-admin
ADMIN_IMAGE_TAG: mark-admin-${{ github.sha }}
permissions:
contents: read
packages: write
Expand All @@ -268,37 +278,61 @@ jobs:
with:
mask-password: 'true'

- name: Ensure ECR repositories exist
- name: Ensure ECR repository exists
run: |
# Create repositories if they don't exist
aws ecr describe-repositories --repository-names $ADMIN_REPOSITORY --region $AWS_REGION || \
aws ecr create-repository --repository-name $ADMIN_REPOSITORY --region $AWS_REGION --image-scanning-configuration scanOnPush=true --image-tag-mutability MUTABLE
aws ecr describe-repositories --repository-names $HANDLER_REPOSITORY --region $AWS_REGION || \
aws ecr create-repository --repository-name $HANDLER_REPOSITORY --region $AWS_REGION --image-scanning-configuration scanOnPush=true --image-tag-mutability MUTABLE
aws ecr describe-repositories --repository-names $POLLER_REPOSITORY --region $AWS_REGION || \
aws ecr create-repository --repository-name $POLLER_REPOSITORY --region $AWS_REGION --image-scanning-configuration scanOnPush=true --image-tag-mutability MUTABLE
aws ecr describe-repositories --repository-names ${{ matrix.repository }} --region $AWS_REGION || \
aws ecr create-repository --repository-name ${{ matrix.repository }} --region $AWS_REGION --image-scanning-configuration scanOnPush=true --image-tag-mutability MUTABLE

- name: Build and push Admin Docker image
run: |
docker build --provenance=false --sbom=false -f docker/admin/Dockerfile -t $REGISTRY/$ADMIN_REPOSITORY:$ADMIN_IMAGE_TAG .
docker push $REGISTRY/$ADMIN_REPOSITORY:$ADMIN_IMAGE_TAG
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3

- name: Build and push Invoice Handler Docker image
run: |
docker build -f docker/handler/Dockerfile -t $REGISTRY/$HANDLER_REPOSITORY:$HANDLER_IMAGE_TAG .
docker push $REGISTRY/$HANDLER_REPOSITORY:$HANDLER_IMAGE_TAG
- name: Build and push ${{ matrix.service }} Docker image
uses: docker/build-push-action@v5
with:
context: .
file: ${{ matrix.dockerfile }}
push: true
tags: ${{ env.REGISTRY }}/${{ matrix.repository }}:${{ matrix.image_tag }}
cache-from: type=gha,scope=staging-${{ matrix.service }}
cache-to: type=gha,scope=staging-${{ matrix.service }},mode=max
provenance: false
sbom: false

# Staging deployment - runs after all images are built
deploy-staging:
if: github.ref == 'refs/heads/staging'
needs: build-staging-images
runs-on: ubuntu-latest
env:
AWS_REGION: sa-east-1
REGISTRY: 679752396206.dkr.ecr.sa-east-1.amazonaws.com
POLLER_REPOSITORY: mark-poller
POLLER_IMAGE_TAG: mark-poller-${{ github.sha }}
HANDLER_REPOSITORY: mark-handler
HANDLER_IMAGE_TAG: mark-handler-${{ github.sha }}
ADMIN_REPOSITORY: mark-admin
ADMIN_IMAGE_TAG: mark-admin-${{ github.sha }}
permissions:
contents: read
packages: write

- name: Build and push Poller Docker image
run: |
docker build --provenance=false --sbom=false -f docker/poller/Dockerfile -t $REGISTRY/$POLLER_REPOSITORY:$POLLER_IMAGE_TAG .
docker push $REGISTRY/$POLLER_REPOSITORY:$POLLER_IMAGE_TAG
steps:
- name: Checkout repository
uses: actions/checkout@v3

- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-region: sa-east-1
aws-access-key-id: ${{ secrets.DEPLOYER_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.DEPLOYER_AWS_SECRET_ACCESS_KEY }}

# ============================================================================
# POLLER REMOVAL - TEMPORARY
# ============================================================================
# Remove only the main poller Lambda function (mark_poller) before deploying the invoice
# handler to prevent duplicate intent creation. Other poller Lambdas remain active.
#
#
# TODO: Remove this step once poller migration is complete
# ============================================================================
- name: Remove Main Poller Lambda Function
Expand Down
10 changes: 5 additions & 5 deletions config/shard-manifest-mason.json
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@
"secretId": "mason-fastfill-signer-pk-share2"
},
"method": "shamir",
"required": true,
"_comment": "Fill Service signer private key for fast fills"
"required": false,
"_comment": "Fill Service signer private key for fast fills (optional - only needed when fill service is enabled)"
},
{
"path": "ton.mnemonic",
Expand All @@ -40,8 +40,8 @@
"secretId": "mason-ton-mnemonic-share2"
},
"method": "shamir",
"required": true,
"_comment": "TON wallet mnemonic for TAC bridge operations"
"required": false,
"_comment": "TON wallet mnemonic for TAC bridge operations (optional - only needed when TAC rebalance is enabled)"
},
{
"path": "solana.privateKey",
Expand All @@ -52,7 +52,7 @@
},
"method": "shamir",
"required": true,
"_comment": "Solana wallet private key (base58 encoded)"
"_comment": "Solana wallet private key (required for the dedicated Solana USDC poller)"
},
{
"path": "binance.apiSecret",
Expand Down
25 changes: 10 additions & 15 deletions docker/admin/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# syntax=docker/dockerfile:1
FROM public.ecr.aws/lambda/nodejs:20 AS node

# ----------------------------------------
Expand Down Expand Up @@ -53,9 +54,10 @@ COPY yarn.lock /tmp/build/
# Install dependencies including devDependencies
# Note: --mode=skip-build skips preinstall/postinstall scripts during install
# This avoids the "npx only-allow pnpm" check in @eth-optimism/core-utils
# Clear yarn cache before install to avoid corrupted package downloads
# Retry install on failure to handle transient npm registry issues
RUN yarn cache clean --all && \
# Cache mount speeds up yarn install when Docker layer cache misses
# Clear cache and retry on failure to handle corrupted downloads or transient registry issues
ENV YARN_CACHE_FOLDER=/tmp/yarn-cache
RUN --mount=type=cache,target=/tmp/yarn-cache \
yarn install --immutable --mode=skip-build || \
(yarn cache clean --all && sleep 2 && yarn install --immutable --mode=skip-build) && \
yarn workspaces foreach -A run rebuild
Expand All @@ -76,18 +78,11 @@ COPY packages/adapters/database /tmp/build/packages/adapters/database
COPY tsconfig.json /tmp/build/

# Build packages
# Build only the workspaces needed for admin (not all workspaces)
# Build dependencies first, then admin
RUN yarn workspace @mark/core build && \
yarn workspace @mark/logger build && \
yarn workspace @mark/cache build && \
yarn workspace @mark/chainservice build && \
yarn workspace @mark/database build && \
yarn workspace @mark/everclear build && \
yarn workspace @mark/prometheus build && \
yarn workspace @mark/web3signer build && \
yarn workspace @mark/rebalance build && \
yarn workspace @mark/admin build
# Build packages in topological order (respects declared deps)
# Exclude root workspace (its build script would re-run foreach without our excludes)
# Exclude packages not needed by admin
RUN yarn workspaces foreach -Av --topological-dev \
--exclude mark --exclude @mark/handler --exclude @mark/poller --exclude @mark/agent --exclude @mark/webhooks run build

# ----------------------------------------
# Runtime stage
Expand Down
14 changes: 9 additions & 5 deletions docker/handler/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# syntax=docker/dockerfile:1
FROM node:20 AS base

# ----------------------------------------
Expand Down Expand Up @@ -53,7 +54,9 @@ COPY yarn.lock /tmp/build/
# Note: --mode=skip-build skips preinstall/postinstall scripts during install
# This avoids the "npx only-allow pnpm" check in @eth-optimism/core-utils
# Then we run rebuild to build native modules with our build tools
RUN yarn install --immutable --mode=skip-build && \
ENV YARN_CACHE_FOLDER=/tmp/yarn-cache
RUN --mount=type=cache,target=/tmp/yarn-cache \
yarn install --immutable --mode=skip-build && \
yarn workspaces foreach -A run rebuild

# Copy source files
Expand All @@ -73,10 +76,11 @@ COPY packages/adapters/database /tmp/build/packages/adapters/database
COPY packages/adapters/webhooks /tmp/build/packages/adapters/webhooks
COPY tsconfig.json /tmp/build/

# Build packages
# Build core first to ensure declaration files are available
RUN yarn workspace @mark/core build && \
yarn build
# Build packages in topological order (respects declared deps)
# Exclude root workspace (its build script would re-run foreach without our excludes)
# Exclude admin — not needed by handler
RUN yarn workspaces foreach -Av --topological-dev \
--exclude mark --exclude @mark/admin run build

# ----------------------------------------
# Runtime stage
Expand Down
35 changes: 18 additions & 17 deletions docker/poller/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# syntax=docker/dockerfile:1
FROM public.ecr.aws/lambda/nodejs:20 AS node

# ----------------------------------------
Expand Down Expand Up @@ -53,9 +54,10 @@ COPY yarn.lock /tmp/build/
# Install dependencies including devDependencies
# Note: --mode=skip-build skips preinstall/postinstall scripts during install
# This avoids the "npx only-allow pnpm" check in @eth-optimism/core-utils
# Clear yarn cache before install to avoid corrupted package downloads
# Retry install on failure to handle transient npm registry issues
RUN yarn cache clean --all && \
# Cache mount speeds up yarn install when Docker layer cache misses
# Clear cache and retry on failure to handle corrupted downloads or transient registry issues
ENV YARN_CACHE_FOLDER=/tmp/yarn-cache
RUN --mount=type=cache,target=/tmp/yarn-cache \
yarn install --immutable --mode=skip-build || \
(yarn cache clean --all && sleep 2 && yarn install --immutable --mode=skip-build) && \
yarn workspaces foreach -A run rebuild
Expand All @@ -76,19 +78,11 @@ COPY packages/adapters/database /tmp/build/packages/adapters/database
COPY tsconfig.json /tmp/build/

# Build packages
# Build only the workspaces needed for poller (not all workspaces)
# Build dependencies first, then poller
RUN yarn workspace @mark/core build && \
yarn workspace @mark/logger build && \
yarn workspace @mark/cache build && \
yarn workspace @mark/chainservice build && \
yarn workspace @mark/database build && \
yarn workspace @mark/everclear build && \
yarn workspace @mark/prometheus build && \
yarn workspace @mark/web3signer build && \
yarn workspace @mark/rebalance build && \
yarn workspace @mark/agent build && \
yarn workspace @mark/poller build
# Build packages in topological order (respects declared deps)
# Exclude root workspace (its build script would re-run foreach without our excludes)
# Exclude packages not needed by poller
RUN yarn workspaces foreach -Av --topological-dev \
--exclude mark --exclude @mark/admin --exclude @mark/handler --exclude @mark/webhooks run build

# ----------------------------------------
# Runtime stage
Expand All @@ -97,7 +91,14 @@ RUN yarn workspace @mark/core build && \
FROM node AS runtime

# Install dbmate for database migrations
RUN curl -fsSL -o /usr/local/bin/dbmate https://github.com/amacneil/dbmate/releases/latest/download/dbmate-linux-amd64 && \
# Pin to specific version and verify SHA256 checksum for supply chain security
ARG DBMATE_VERSION=v2.29.3
ARG DBMATE_SHA256=2bb1554a32d9c0bd544841d3523eae64fd60a58d7720c5d82900043dc5e87a6c
RUN set -eux; \
curl -fsSL -o /tmp/dbmate \
https://github.com/amacneil/dbmate/releases/download/${DBMATE_VERSION}/dbmate-linux-amd64; \
echo "${DBMATE_SHA256} /tmp/dbmate" | sha256sum -c -; \
mv /tmp/dbmate /usr/local/bin/dbmate; \
chmod +x /usr/local/bin/dbmate

ENV NODE_ENV=production \
Expand Down
30 changes: 27 additions & 3 deletions ops/mainnet/mark/config.tf
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,14 @@ locals {
}
]

# NOTE: TAC/METH rebalance config is loaded from SSM at runtime (not as env vars)
# NOTE: TAC/METH/aManUSDe/aMansyrupUSDT rebalance config is loaded from SSM at runtime (not as env vars)
# to stay under AWS Lambda's 4KB env var limit.
#
#
# SSM-loaded config (via MARK_CONFIG_SSM_PARAMETER):
# - tacRebalance.* (all TAC_REBALANCE_* values)
# - methRebalance.* (all METH_REBALANCE_* values)
# - methRebalance.* (all METH_REBALANCE_* values)
# - aManUsdeRebalance.* (all AMANUSDE_REBALANCE_* values)
# - aMansyrupUsdtRebalance.* (all AMANSYRUPUSDT_REBALANCE_* values)
# - ton.mnemonic, tonSignerAddress
#
# See packages/core/src/config.ts for the fallback logic.
Expand Down Expand Up @@ -141,6 +143,28 @@ locals {
}
)

# aManUSDe rebalancing poller configuration
# Flow: USDC (ETH) → Stargate Bridge → USDC (Mantle) → DEX Swap → USDe → Aave Supply → aManUSDe
amanusde_poller_env_vars = merge(
local.poller_env_vars,
{
RUN_MODE = "aManUsdeOnly"
AMANUSDE_AAVE_POOL_ADDRESS = "0x458F293454fE0d67EC0655f3672301301DD51422"
AMANUSDE_DEX_SWAP_SLIPPAGE_BPS = "100"
}
)

# aMansyrupUSDT rebalancing poller configuration
# Flow: USDC (ETH) → Stargate Bridge → USDC (Mantle) → DEX Swap → syrupUSDT → Aave Supply → aMansyrupUSDT
amansyrupusdt_poller_env_vars = merge(
local.poller_env_vars,
{
RUN_MODE = "aMansyrupUsdtOnly"
AMANSYRUPUSDT_AAVE_POOL_ADDRESS = "0x458F293454fE0d67EC0655f3672301301DD51422"
AMANSYRUPUSDT_DEX_SWAP_SLIPPAGE_BPS = "100"
}
)

web3signer_env_vars = [
{
name = "WEB3_SIGNER_PRIVATE_KEY"
Expand Down
Loading
Loading