diff --git a/.github/workflows/pr-containerd-tests.yml b/.github/workflows/pr-containerd-tests.yml new file mode 100644 index 000000000..0ae03134e --- /dev/null +++ b/.github/workflows/pr-containerd-tests.yml @@ -0,0 +1,62 @@ +name: Containerd Engine Tests + +on: + pull_request: + +jobs: + leia-tests: + runs-on: ${{ matrix.os }} + env: + TERM: xterm + strategy: + fail-fast: false + matrix: + leia-test: + - containerd + node-version: + - "20" + os: + - ubuntu-24.04 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Install node ${{ matrix.node-version }} + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} + registry-url: https://registry.npmjs.org + cache: npm + - name: Bundle Deps + uses: lando/prepare-release-action@v3 + with: + lando-plugin: true + version: dev + sync: false + - name: Install pkg dependencies + run: npm clean-install --prefer-offline --frozen-lockfile --production + - name: Package into node binary + uses: lando/pkg-action@v6 + id: pkg-action + with: + entrypoint: bin/lando + filename: lando + node-version: ${{ matrix.node-version }} + options: --options dns-result-order=ipv4first + upload: false + pkg: "@yao-pkg/pkg@5.16.1" + - name: Install full deps + run: npm clean-install --prefer-offline --frozen-lockfile + - name: Setup lando ${{ steps.pkg-action.outputs.file }} + uses: lando/setup-lando@v3 + with: + auto-setup: false + lando-version: ${{ steps.pkg-action.outputs.file }} + telemetry: false + - name: Run Leia Tests + uses: lando/run-leia-action@v2 + with: + leia-test: "./examples/${{ matrix.leia-test }}/README.md" + cleanup-header: "Destroy tests" + shell: bash + stdin: true diff --git a/.github/workflows/pr-core-tests.yml b/.github/workflows/pr-core-tests.yml index 0bd6950d8..3bfad5d00 100644 --- a/.github/workflows/pr-core-tests.yml +++ b/.github/workflows/pr-core-tests.yml @@ -5,6 +5,7 @@ on: jobs: leia-tests: + name: ${{ matrix.leia-test }} (${{ matrix.engine }}) runs-on: ${{ matrix.os }} env: TERM: xterm @@ -62,6 +63,8 @@ jobs: - update - version - yaml + engine: + - docker node-version: - "20" os: @@ -106,13 +109,14 @@ jobs: pkg: "@yao-pkg/pkg@5.16.1" - name: Install full deps run: npm clean-install --prefer-offline --frozen-lockfile - - name: Setup lando ${{ steps.pkg-action.outputs.file }} + - name: Setup lando ${{ steps.pkg-action.outputs.file }} (${{ matrix.engine }}) uses: lando/setup-lando@v3 with: lando-version: ${{ steps.pkg-action.outputs.file }} telemetry: false config: | setup.skipCommonPlugins=true + engine=${{ matrix.engine }} - name: Run Leia Tests uses: lando/run-leia-action@v2 env: diff --git a/BRIEF.md b/BRIEF.md new file mode 100644 index 000000000..407009d4e --- /dev/null +++ b/BRIEF.md @@ -0,0 +1,228 @@ +> **For any agent working on this feature:** Update this file before you finish your session. +> Add gotchas, move status items, record anything the next agent needs to know. + +# Moby-based Lando: Project Goals & Guidelines + +## The Problem + +Lando's dependency on Docker Desktop is its biggest operational headache. Docker is a moving target — users manage their own Docker versions, Docker Inc. changes licensing and behavior across releases, and version compatibility is a constant support burden. Users who use Docker for other things end up in version conflicts with Lando. + +**We want users to never know or care about the containerization tool.** They should be able to use Docker, Podman, whatever they want for their own work without affecting Lando. + +## The Solution + +Replace Docker Desktop with Lando's **own isolated containerd stack** — bundled, versioned, and managed entirely by Lando. The user's Docker installation is untouched. Lando pins its own runtime version. Version compatibility becomes an internal CI problem, not a user support problem. + +This follows the **Finch model** (AWS's approach): bundle containerd + nerdctl + BuildKit + finch-daemon into Lando's own isolated environment with its own sockets, its own data directories, its own everything. + +Docker remains available as a fallback engine for users who prefer it. + +## Architecture + +``` +lando setup (one-time, needs root) + │ + ├── Installs binaries to /usr/local/lib/lando/bin/ + ├── Creates lando-containerd.service (systemd) + ├── Creates 'lando' group + └── Starts the service + +lando start/stop/build/destroy (daily use, NO root needed) + │ + ▼ +docker-compose ──► finch-daemon ──► containerd + buildkitd +(DOCKER_HOST) (Docker API) (/run/lando/ sockets) +``` + +### The Stack + +- **containerd** — Container runtime. Manages images, containers, snapshots. +- **buildkitd** — Image builder. Dockerfile → image via BuildKit. +- **finch-daemon** — Docker API compatibility layer. Translates Docker API calls → containerd operations. This is what lets docker-compose and Dockerode work unchanged. +- **docker-compose** — Same binary used by the Docker engine path. Talks to finch-daemon via `DOCKER_HOST`. +- **runc** — Low-level OCI runtime. +- **nerdctl** — containerd CLI. Used internally by OCI runtime hooks only — NOT by user-facing Lando code. + +### Socket Isolation + +Everything lives under `/run/lando/`: +- `/run/lando/containerd.sock` — containerd gRPC +- `/run/lando/buildkitd.sock` — buildkitd gRPC +- `/run/lando/finch.sock` — finch-daemon Docker API + +This means Lando's containerd coexists peacefully with Docker Desktop, system containerd, Podman, or anything else. No conflicts. + +### Platform Strategy + +- **Linux/WSL**: Native containerd via systemd service +- **macOS**: Lima VM with containerd (similar to Docker Desktop's VM, but Lando-managed) +- **Windows (non-WSL)**: Not yet implemented + +## The Prime Directive: No Sudo After Setup + +**`lando setup` is the ONLY command that needs root.** After that, a normal user in the `lando` group does everything — start, stop, build, destroy, rebuild — without ever elevating privileges. + +This is the single most important design constraint. Every implementation decision flows from it: + +- The systemd service owns all root operations (starting daemons, creating sockets, managing permissions) +- User code talks to sockets (group-accessible, `660` permissions, `lando` group) +- No `sudo`, no `getSudoCommand`, no `run-elevated` in any runtime code path + +## Guidelines for All Tasks + +### 0. JSDoc type annotations on all touched code + +We're planning a TypeScript migration. Any code you write or modify should include **JSDoc type annotations** — the kind that work as real type definitions for VS Code's IntelliSense (`@param`, `@returns`, `@type`, `@typedef`). This means: + +- All function parameters and return types documented with `@param` and `@returns` +- Complex objects described with `@typedef` where appropriate +- Class properties annotated with `@type` +- Use `/** */` doc comments, not `//` — VS Code only picks up JSDoc-style + +This isn't busywork — it's laying the groundwork so the eventual TS migration is a rename + tighten, not a rewrite. + +### 1. Never use sudo in runtime code + +If you're writing code that runs during `lando start/stop/build/destroy/rebuild`: +- **No `sudo`**, no `getSudoCommand()`, no `run-elevated()` +- Talk to sockets instead. finch-daemon at `/run/lando/finch.sock` provides the Docker API. buildkitd at `/run/lando/buildkitd.sock` handles builds. +- `sudo` and `run-elevated` are for **setup hooks only** (installing binaries, creating the systemd service) + +### 2. Never shell out to nerdctl from user-facing code + +nerdctl has a hardcoded rootless check: if you're not root, it fails. There is no workaround from user-land. This was a hard-won lesson. + +Instead: +- **Compose operations** → `docker-compose` with `DOCKER_HOST=unix:///run/lando/finch.sock` +- **Container operations** (inspect, list, stop, remove) → Dockerode pointed at finch-daemon +- **Image builds** → `buildctl` directly (talks to buildkitd socket, no rootless check) +- **Image loading** → Dockerode's `loadImage()` via finch-daemon + +nerdctl IS used internally by containerd's OCI runtime hooks (invoked as root by the systemd service). That's fine. But Lando's JavaScript code must never invoke it. + +### 3. Don't conflict with system-wide containerd + +Our sockets, data, and state all live in Lando-specific directories: +- Sockets: `/run/lando/` (not `/run/containerd/`) +- Data: `~/.lando/data/containerd/` +- State: `/run/lando/containerd/` (ephemeral, under RuntimeDirectory) +- Config: `~/.lando/config/` + +**Never create symlinks from `/run/containerd/` to our sockets.** That conflicts with system containerd or Docker Desktop. Instead, set `CONTAINERD_ADDRESS=/run/lando/containerd.sock` in the systemd service environment so child processes (including OCI hooks) find our containerd. + +### 4. Use finch-daemon as the Docker compatibility bridge + +finch-daemon translates Docker API → containerd. This is what makes docker-compose and Dockerode work without modification. Everything that used to talk to Docker's socket now talks to finch's socket. + +**Known gap**: finch-daemon creates networks at the Docker API level but doesn't write CNI config files. The nerdctl OCI hook needs CNI configs for container networking. Bridge this gap by pre-creating CNI conflist files before docker-compose creates networks. + +### 5. Guard containerd code paths from Docker-era assumptions + +Lando's codebase was built for Docker. Many hooks assume Docker is the engine. When the containerd backend is active, these must be skipped: + +```js +if (lando.engine?.engineBackend === 'containerd' || lando.config.engine === 'containerd') return; +``` + +Key files that need guards: +- `lando-autostart-engine.js` — skips Docker autostart +- `lando-reset-orchestrator.js` — skips Docker engine recreation +- `app-reset-orchestrator.js` — skips Docker engine recreation + +### 6. Same compose interface, different socket + +Both Docker and containerd engines use `lib/compose.js` for generating compose command arrays. The only difference is execution environment: + +- **Docker**: `shell.sh([orchestratorBin, ...cmd], opts)` +- **Containerd**: `shell.sh([orchestratorBin, ...cmd], {...opts, env: {DOCKER_HOST: finchSocket}})` + +Don't create separate compose command builders. Use the same one with different env vars. + +### 7. The systemd service is the single source of root operations + +`lando-containerd.service` handles: +- Starting containerd, buildkitd, and finch-daemon +- Creating and permissioning sockets +- Setting environment variables (`CONTAINERD_ADDRESS`, `PATH`) +- Auto-restart on failure + +Any new root-level requirement goes into the service unit (via setup hooks), never into runtime code. + +### 8. The daemon verifies — it doesn't start + +`ContainerdDaemon.up()`: +1. Checks `systemctl is-active --quiet lando-containerd.service` +2. Verifies sockets exist +3. Pings finch-daemon via Dockerode + +If the service isn't active → throw an error telling the user to run `lando setup`. Never start processes or spawn daemons from user code. + +`ContainerdDaemon.down()` is a no-op on Linux/WSL. The service keeps running for fast restart. Only macOS (Lima VM) actually stops something. + +## Current Status + +### Working ✅ +- Engine detection and backend selection (`containerd` / `docker` / `auto`) +- Systemd service creation and management via `lando setup` +- Image building via buildctl (no sudo) +- Image loading via Dockerode/finch-daemon (no sudo) +- Container inspection via Dockerode/finch-daemon (no sudo) +- Compose operations via docker-compose + `DOCKER_HOST` (no sudo) +- Container creation and network creation (no sudo) +- Container creation with `CONTAINERD_ADDRESS` env var for OCI hooks (no sudo) +- **Container start** via runc + nerdctl OCI hooks (no sudo) — the former "shim deadlock" blocker is resolved +- `lando destroy` (no sudo) +- CNI network config bridging — all compose-defined networks get CNI conflist files pre-created before docker-compose up (covers `_default`, custom named networks, proxy networks, etc.) +- Systemd service sets `NERDCTL_TOML` and `CNI_PATH` env vars so OCI hooks use Lando's isolated CNI paths +- **Outbound internet from containers** — Fixed via corrected CNI plugin chain (removed `tc-redirect-tap`, added `portmap`/`tuning`), systemd service now enables `net.ipv4.ip_forward=1` and creates iptables `LANDO-FORWARD` chain for container subnet traffic +- **CNI conflist migration** — Old conflist files (with `tc-redirect-tap`) are automatically detected and rewritten in-place with the correct plugin chain while preserving subnet/bridge/nerdctlID +- **Multi-container orchestration** — Full `lando start` → running container end-to-end flow verified for multi-service apps. `docker-compose up` via finch-daemon starts all services simultaneously. CNI conflist files pre-created for all networks (default + custom). Inter-container DNS via `/etc/hosts` injection (the `app-add-2-landonet.js` containerd path scans all containers, collects IPs, and injects all aliases into all containers). Verified: multiple services start, each gets an IP from the CNI bridge subnet (`10.4.x.0/24`), cross-container name resolution works via injected `/etc/hosts` entries, services survive stop/restart cycles. + +### In Progress 🔧 +- (None currently) + +### Not Started 📋 +- macOS support (Lima VM integration exists but untested with new architecture) +- Windows non-WSL support +- Plugin compatibility verification +- Installer/packaging updates to bundle containerd stack + +### Gotchas for Next Agent +- **NERDCTL_TOML env var is CRITICAL for OCI hooks**: finch-daemon injects nerdctl `createRuntime` OCI hooks into every container's OCI spec. These hooks run as root within the systemd service context. Without `NERDCTL_TOML` pointing to Lando's `nerdctl.toml`, the hooks look for `/etc/nerdctl/nerdctl.toml` (doesn't exist), fall back to `/etc/cni/net.d/` for CNI configs, and **self-deadlock** on `/etc/cni/net.d/.nerdctl.lock` (flock acquired on FD N, then re-acquired on FD N+1 — flock is not re-entrant across different file descriptors). The fix: `Environment=NERDCTL_TOML=` in the systemd service. This propagates through finch-daemon's process env into the OCI hook env list. +- **Shim socket directory is hardcoded**: containerd v2's `pkg/shim/util_unix.go` uses `defaults.DefaultStateDir = "/run/containerd"` as a compile-time constant for `SocketAddress()`. ALL containerd instances share `/run/containerd/s/` for shim sockets. Hashes are unique per instance (sha256 of `containerdAddress + namespace + id`), so the sockets don't conflict — but the directory must exist and be writable. The systemd service's `ExecStartPre` creates it with `mkdir -p /run/containerd/s`. +- **State directory is now ephemeral**: Moved from `~/.lando/state/containerd` (persistent) to `/run/lando/containerd` (tmpfs, under `RuntimeDirectory=lando`). This means shim bundles are cleaned on reboot, which is correct — containerd state is transient. Persistent data (images, snapshots) remains in `~/.lando/data/containerd` (the `root` directory). +- **Re-running `lando setup` is required** after this change: The `hasRun` check now verifies the service file contains `/run/containerd/s`, `NERDCTL_TOML=`, and the containerd config has `state = "/run/lando/containerd"`. Existing installs will re-run the setup-containerd-service task automatically. +- `NerdctlCompose` (`lib/backends/containerd/nerdctl-compose.js`) and `setup-engine-containerd.js` are **deprecated dead code**. Production uses `docker-compose + DOCKER_HOST` via `BackendManager._createContainerdEngine()`. The files are kept for reference but removed from the public index exports. +- `FinchDaemonManager.start()` uses destructured `const {spawn} = require('child_process')` — cannot be stubbed with sinon alone; needs `proxyquire` or `rewire` for full spawn-level testing. The lifecycle tests cover `_isProcessRunning`, `stop`, `isRunning`, and `_cleanup` but not the actual `spawn` call. +- `LimaManager._run()` lazily requires `utils/run-command` inside the method body, so the `runCommand` dependency cannot be stubbed without `proxyquire`. Tests stub `_run` on the instance instead, which covers all public method behavior but not the actual CLI invocation. +- The smoke test script (`scripts/test-containerd-engine.sh`) now tests the **production path** (`docker-compose + DOCKER_HOST + finch-daemon`) instead of the deprecated `nerdctl compose` path. It requires `finch-daemon` and `docker-compose` binaries. +- `events.emit` stubs for engine.start() tests **must return Bluebird promises** (not native Promises). `router.eventWrapper` chains `.tap()` which is Bluebird-only. Use `require('../lib/promise').resolve()` in test stubs. +- `datum.opts.env` is **NOT forwarded** through the compose closure. `compose.js`'s `buildShell()` returns `{mode, cstdio, silent}` — no `env` property. The only env vars in the shell opts come from `process.env` and the containerd overrides (`DOCKER_HOST`, `DOCKER_BUILDKIT`, `BUILDKIT_HOST`). +- CNI conflist files are written to `/etc/lando/cni/finch/` with the naming pattern `nerdctl-.conflist`. Tests using mock-fs must mock that path. +- The Leia containerd test (`examples/containerd/README.md`) uses `LANDO_ENGINE=containerd` prefix on every `lando` command. This is needed because Leia runs commands in isolation — each line is a fresh shell, so env vars don't persist. The CI workflow uses `auto-setup: false` so `lando setup` runs inside the test itself (same pattern as `setup-linux`). +- **CNI plugin chain is `bridge → portmap → firewall → tuning`**: The old chain included `tc-redirect-tap` which is NOT in the standard `containernetworking/plugins` release (it's from `github.com/awslabs/tc-redirect-tap` and is only needed for Kata/Firecracker VMs). `ensure-cni-network.js` now auto-migrates old conflist files in-place, preserving subnet/bridge/nerdctlID. Tests using mock-fs that create conflist files must use the correct 4-plugin chain or the migration will overwrite them. +- **LANDO-FORWARD iptables chain**: The systemd service creates a `LANDO-FORWARD` chain in ExecStartPre that ACCEPTs traffic from/to `10.4.0.0/16` (the Lando subnet range). This is a belt-and-suspenders safety net — the CNI `firewall` plugin also manages per-container FORWARD rules via its `CNI-FORWARD` chain, but the host's default FORWARD policy may be DROP. The `LANDO-FORWARD` chain is flushed and re-created on each service start. +- **`net.ipv4.ip_forward=1` is set by the systemd service**: ExecStartPre runs `sysctl -w net.ipv4.ip_forward=1`. The CNI bridge plugin with `isGateway: true` also enables this per-container, but setting it in the service ensures it's enabled before any container starts and survives across container restarts. +- **`hasRun` check now verifies `ip_forward` and `LANDO-FORWARD`**: The setup-containerd-service task's `hasRun` checks for `net.ipv4.ip_forward=1` and `LANDO-FORWARD` in the service file content. Old service files without these will trigger automatic re-setup on the next `lando setup`. +- **System containerd can coexist**: Lando's containerd shares `/run/containerd/s/` (shim sockets) and `/run/containerd/runc/` (runc state) with system containerd. Hashes/namespaces are unique, so there's no conflict. The `NERDCTL_TOML` env var ensures OCI hooks use Lando's CNI paths, not the system's. +- **Sinon `.withArgs().returns()` chaining pitfall**: Do NOT chain `sinon.stub().withArgs('a').returns(x).withArgs('b').returns(y)` — the second `.withArgs()` operates on the behavior object returned by `.returns()`, not the original stub. Both args will return the LAST value. Instead, use separate lines: `const s = sinon.stub(); s.withArgs('a').returns(x); s.withArgs('b').returns(y);` +- **Inter-container DNS on containerd uses `/etc/hosts`, not Docker DNS**: The containerd path in `app-add-2-landonet.js` does NOT use Docker's built-in DNS (containers are not reconnected to `lando_bridge_network` via Docker API). Instead, it scans each container's IP from the `nerdctl/networks` label + `unknown-ethN` mapping and injects all `..internal` aliases into every container's `/etc/hosts` via Dockerode exec. This means: (1) aliases only update on `lando start`, not dynamically; (2) if a container's IP changes (e.g., after restart), the hosts file is re-injected on the next `post-start`; (3) the `getContainerdNetworkIP()` function prefers IPs from `lando_bridge_network` > `${project}_default` > `proxyNet`, but in practice only `${project}_default` is in the `nerdctl/networks` label. +- **finch-daemon doesn't persist Docker API network labels across restarts**: When the `lando-containerd.service` restarts, all networks lose their `com.docker.compose.*` labels. docker-compose v2 validates these labels and refuses to start if they're missing/wrong. Fix: `removeStaleComposeNetworks()` removes unlabeled project networks before compose up. Additionally, finch-daemon auto-reports Docker API networks (without labels) for any CNI conflist file it discovers, so `removeComposeCniConflists()` removes conflist files before network cleanup to prevent ghost networks. +- **CNI portmap plugin rejects HostPort:0**: The standard `portmap` CNI plugin (v1.6.2) validates `hostPort > 0`. Docker handles random port allocation (`-p 0:80`) BEFORE container start, but nerdctl's OCI hook passes `HostPort:0` directly to portmap. Fix: portmap is removed from the CNI conflist plugin chain. Lando uses Traefik proxy for port routing instead. The compose start is split into two phases (`up --no-start` + conflist overwrite + `up --detach`) so we can overwrite finch-daemon's conflist (which includes portmap) with our version (without portmap) between network creation and container start. +- **Two-phase compose start for containerd**: The compose closure in `backend-manager.js` splits `docker-compose up` into three steps: (1) `removeComposeCniConflists + removeStaleComposeNetworks` clean up, (2) `docker-compose up --no-start` creates networks and containers, (3) `ensureComposeCniNetworks` overwrites finch-daemon's conflist, (4) `docker-compose up --detach --no-recreate` starts containers. This is necessary because finch-daemon writes conflist files with portmap during network creation, and we can't modify them between creation and start in a single compose up. + +### Recently Completed +- **Task 41: Multi-container orchestration verification + finch-daemon fixes** — Verified and fixed docker-compose multi-container flows on the containerd backend. **Two finch-daemon issues fixed**: (1) finch-daemon doesn't persist Docker API network labels across restarts, causing docker-compose v2 to reject existing networks ("not created by compose"). Fix: `removeStaleComposeNetworks()` removes unlabeled project networks before compose up, and `removeComposeCniConflists()` removes CNI conflist files that cause finch-daemon to auto-report ghost networks. (2) finch-daemon writes CNI conflist files that include the `portmap` plugin, which fails on `HostPort:0` (random port) — Docker handles this via port allocation before container start, but nerdctl's OCI hook passes it directly to portmap. Fix: two-phase compose start — Phase 1 runs `docker-compose up --no-start` (creates networks/containers), Phase 2 overwrites conflist files to remove portmap (Lando proxy handles port publishing), Phase 3 runs `docker-compose up --detach --no-recreate` (starts containers). **Architecture confirmation**: `app-add-2-landonet.js` containerd path scans all containers post-start, collects IPs from the `nerdctl/networks` label + `unknown-ethN` interface mapping, and injects all aliases into all containers' `/etc/hosts` via Dockerode exec. **Test coverage added**: (1) 11 new unit tests in `test/app-add-2-landonet.spec.js`. (2) 4 new integration tests in `test/containerd-compose-start.spec.js`. (3) Enhanced Leia E2E test with multi-container verification. **Verified live**: both web (nginx:1.22.1) and web2 (nginx-unprivileged:1.26.1) containers start and serve content via docker-compose exec. All 737 tests pass (0 failing). +- **Task 40: Fix outbound internet from containers** — Root-caused and fixed the outbound connectivity blocker. **Root cause**: The CNI conflist plugin chain included `tc-redirect-tap`, which is from a separate AWS Labs repository (`github.com/awslabs/tc-redirect-tap`) and is NOT included in the `containernetworking/plugins` v1.6.2 release that `lando setup` installs. This plugin is only needed for VM-based runtimes (Kata, Firecracker), not standard runc containers. Its presence in the chain caused the CNI ADD operation to fail or produce incomplete networking (bridge created but iptables FORWARD/MASQUERADE rules not properly applied). **Three-part fix**: (1) Replaced the CNI plugin chain from `[bridge, firewall, tc-redirect-tap]` to `[bridge, portmap, firewall, tuning]` — all plugins that are actually installed and appropriate for runc. Added `portmap` for port publishing support and `tuning` for sysctl/veth tuning. (2) Added `sysctl -w net.ipv4.ip_forward=1` and a `LANDO-FORWARD` iptables chain to the systemd service's `ExecStartPre` — ensures IP forwarding is enabled and FORWARD chain accepts Lando subnet traffic regardless of host firewall policy. (3) Added conflist migration logic: existing conflist files with the old plugin chain are automatically detected and rewritten in-place while preserving subnet, bridge name, and nerdctlID. Updated `hasRun` check to detect old service files. All 722 tests pass (net +9 new tests: 7 migration tests, 3 plugin chain tests, -1 replaced test). +- **Task 39: OCI hook deadlock fix — containers now start** — Root-caused and fixed the "get state: context deadline exceeded" blocker that prevented all container starts. **Root cause**: finch-daemon injects `nerdctl internal oci-hook createRuntime` hooks into OCI specs. These hooks run as root and look for nerdctl config at `/etc/nerdctl/nerdctl.toml` (the root default). Since this file didn't exist, nerdctl fell back to `/etc/cni/net.d/` for CNI config and locked `/etc/cni/net.d/.nerdctl.lock`. A bug in nerdctl's lock handling causes a self-deadlock: it acquires flock on one FD, then tries to acquire it again on a different FD to the same file (flock is not re-entrant across FDs). This blocked `runc create` → shim → containerd indefinitely. **Fix**: Added `Environment=NERDCTL_TOML=` and `Environment=CNI_PATH=` to the systemd service unit. These env vars propagate through finch-daemon into the OCI hook env, directing nerdctl to use `/etc/lando/cni/` for CNI configs instead of the system directory. The `hasRun` check now verifies `NERDCTL_TOML=` is present in the service file, forcing re-setup on existing installs. **Verified**: containers start, tasks reach RUNNING status, eth0 gets IP from CNI bridge (10.4.0.0/24), gateway ping works. Added 6 new tests to `get-nerdctl-config.spec.js` (713 total, 0 failing). +- **Task 38: State directory fix and shim investigation** — Investigated the "get state: context deadline exceeded" blocker. Key findings: (1) containerd v2's shim socket path is hardcoded to `/run/containerd/s/` via compile-time constant `DefaultStateDir` in `pkg/shim/util_unix.go` — no config can change it. Hashes include the containerd address so sockets are unique per instance. (2) **The failure is NOT caused by system containerd coexistence** — tested with system containerd stopped, same result. The shim creates its socket and containerd connects to it, but runc never starts the container (no `init.pid`). The actual root cause is in runc/shim/OCI-hook interaction, not socket conflicts. Fixes applied: moved containerd `state` from `~/.lando/state/containerd` (persistent) to `/run/lando/containerd` (tmpfs) — prevents stale-bundle issues after reboots. Added `mkdir -p /run/containerd/s` to `ExecStartPre`. Fixed `_ensureDirectories()` to not attempt mkdir on root-owned `/run/lando/containerd`. Updated hasRun checks to detect old configs. All 707 tests pass. +- **Task 37: End-to-end `lando start` integration tests for containerd backend** — Two layers of test coverage: + - **Mocha (stub-based):** 44 tests in `test/containerd-compose-start.spec.js` covering the production compose closure in `BackendManager._createContainerdEngine()`. Tests cover 8 areas: env injection, shell.sh() invocation, CNI network bridging (mock-fs verified), all compose commands, Bluebird Proxy wrapping, full engine.start() → router.eventWrapper → compose flow, Docker/containerd parity, binary path resolution. + - **Leia (real containers):** `examples/containerd/README.md` — full end-to-end test exercising `LANDO_ENGINE=containerd lando setup -y` → binary installation → systemd service → socket availability → `lando start` → container lifecycle (list, exec, stop, restart) → `lando destroy`. CI workflow at `.github/workflows/pr-containerd-tests.yml` (mirrors `pr-setup-linux-tests.yml` pattern with `auto-setup: false`). +- **Task 36: LimaManager + WslHelper unit tests and smoke test update** — Added 60 tests for `LimaManager` covering all 10 methods (constructor, `vmExists`, `createVM`, `startVM`, `stopVM`, `isRunning`, `getSocketPath`, `exec`, `nerdctl`, `_parseListOutput`). Added 19 tests for `WslHelper` covering all 3 methods (`isWsl`, `isDockerDesktopRunning`, `ensureSocketPermissions`). Rewrote smoke test script to exercise the production `docker-compose + finch-daemon` path instead of deprecated `nerdctl compose`. +- **Task 35: Bug fix, test coverage, and dead code cleanup** — Fixed binary path bug in `lando-setup-containerd-engine-check.js` (was checking `~/.lando/bin/` instead of `/usr/local/lib/lando/bin/` for system binaries). Added 23 new tests for `ensure-cni-network.js` covering conflist creation, subnet allocation, error handling. Extended `finch-daemon-manager.spec.js` from 18 to 34 tests covering `_isProcessRunning`, `start`, `stop`, `isRunning`, `_cleanup`. Deprecated unused `NerdctlCompose` and `setup-engine-containerd.js`; removed `NerdctlCompose` from public exports. +- **Task 34: Comprehensive CNI network config bridging** — Created `utils/ensure-compose-cni-networks.js` to parse compose YAML files and pre-create CNI conflist files for ALL non-external networks before docker-compose up. Updated `lib/backend-manager.js` compose wrapper to use this instead of single-network `ensureCniNetwork()`. Previously only `${project}_default` got a CNI config; now custom networks (e.g. `frontend`, `backend`, proxy `edge`) are covered. 17 new tests in `test/ensure-compose-cni-networks.spec.js`. This resolves the "compose-created networks need CNI conflist files" item from the In Progress list. +- **Task 33: CNI directory permissions** — Fixed the EACCES blocker: `lando setup` now sets `chgrp lando` + `chmod g+w` on `/etc/cni/net.d/finch` so `ensureCniNetwork()` can write conflist files from user-land without sudo. Permissions are also enforced on every systemd service start via `ExecStartPre`. The `hasRun` check detects missing permissions so re-running `lando setup` will fix existing installs. Added CNI directory permission check to `lando doctor`. Fixed pre-existing test failure in `containerd-proxy-adapter.spec.js` (missing mock-fs for CNI directory). +- **Task 30: Troubleshooting documentation** — Created `docs/troubleshooting/containerd.md` covering all 10 error scenarios. Updated 7 message modules to link to specific troubleshooting sections instead of the generic engine config page. +- **Task 28: Proxy (Traefik) compatibility** — Traefik proxy now works with containerd backend via finch-daemon's Docker API. Created `proxy-adapter.js` for CNI pre-creation and compatibility checks. Fixed `app-add-proxy-2-landonet.js` to no longer skip containerd (uses Dockerode-compatible getNetwork). Updated `app-start-proxy.js` to ensure proxy CNI networks. finch-daemon verified compatible: ping, events API, and label format all pass. See `docs/dev/containerd-proxy-design.md`. **Known caveat:** end-to-end test blocked by Docker Desktop's WSL proxy binding ports 80/443. +- **Task 32: BRIEF violation cleanup** — Removed all nerdctl shellouts from user-facing code. Renamed misleading `nerdctl-*` message files. Fixed `app-check-containerd-compat.js` to use docker-compose + DOCKER_HOST instead of `nerdctl compose`. Updated all related tests. (See `todo.md` for full file list.) diff --git a/app.js b/app.js index 920f230b3..a54c609d2 100644 --- a/app.js +++ b/app.js @@ -210,6 +210,9 @@ module.exports = async (app, lando) => { // Check for docker compat warnings and surface them nicely as well app.events.on('post-start', async () => await require('./hooks/app-check-docker-compat')(app, lando)); + // Check for containerd compat warnings and surface them nicely as well + app.events.on('post-start', async () => await require('./hooks/app-check-containerd-compat')(app, lando)); + // throw service not start errors app.events.on('post-start', 1, async () => await require('./hooks/app-check-v4-service-running')(app, lando)); diff --git a/builders/_proxy.js b/builders/_proxy.js index 1f31be61c..a9d0decec 100644 --- a/builders/_proxy.js +++ b/builders/_proxy.js @@ -6,7 +6,7 @@ const _ = require('lodash'); /* * Helper to get core proxy service */ -const getProxy = ({proxyCommand, proxyPassThru, proxyDomain, userConfRoot, version = 'unknown'} = {}) => { +const getProxy = ({proxyCommand, proxyPassThru, proxyDomain, userConfRoot, dockerSocket, version = 'unknown'} = {}) => { return { services: { proxy: { @@ -21,7 +21,7 @@ const getProxy = ({proxyCommand, proxyPassThru, proxyDomain, userConfRoot, versi }, networks: ['edge'], volumes: [ - '/var/run/docker.sock:/var/run/docker.sock', + `${dockerSocket || '/var/run/docker.sock'}:/var/run/docker.sock`, `${userConfRoot}/scripts/proxy-certs.sh:/scripts/100-proxy-certs`, 'proxy_config:/proxy_config', ], diff --git a/builders/lando-v4.js b/builders/lando-v4.js index e8108a56b..5966b23bb 100644 --- a/builders/lando-v4.js +++ b/builders/lando-v4.js @@ -434,12 +434,16 @@ module.exports = { }, config.labels); // add it all 2getha + const networks = lando.engine?.engineBackend === 'containerd' + ? {} + : {[this.network]: {aliases: this.hostnames}}; + this.addLandoServiceData({ environment, extra_hosts: ['host.lando.internal:host-gateway'], labels, logging: {driver: 'json-file', options: {'max-file': '3', 'max-size': '10m'}}, - networks: {[this.network]: {aliases: this.hostnames}}, + networks, user: this.user.name, volumes: this.volumes, }); diff --git a/components/docker-engine.js b/components/docker-engine.js index 617a4a6ea..64325ce8b 100644 --- a/components/docker-engine.js +++ b/components/docker-engine.js @@ -3,6 +3,7 @@ const fs = require('fs-extra'); const path = require('path'); + const merge = require('lodash/merge'); const slugify = require('slugify'); @@ -35,7 +36,21 @@ class DockerEngine extends Dockerode { orchestrator = DockerEngine.orchestrator, } = {}) { super(config); - this.builder = builder; + const userConfRoot = config.userConfRoot || path.join(require('os').homedir(), '.lando'); + const systemBinDir = config.containerdSystemBinDir || '/usr/local/lib/lando/bin'; + + this.containerdMode = config.containerdMode === true + || config.engine === 'containerd' + || process.env.LANDO_ENGINE === 'containerd'; + this.containerdNamespace = config.containerdNamespace || 'default'; + this.containerdSocket = config.containerdSocket || '/run/lando/containerd.sock'; + this.buildkitHost = config.buildkitHost || 'unix:///run/lando/buildkitd.sock'; + this.buildctl = config.buildctlBin + || (fs.existsSync(path.join(userConfRoot, 'bin', 'buildctl')) ? path.join(userConfRoot, 'bin', 'buildctl') : path.join(systemBinDir, 'buildctl')); + this.nerdctlConfig = config.nerdctlConfig || path.join(userConfRoot, 'config', 'nerdctl.toml'); + this.authConfig = config.authConfig || {env: {}}; + this.builder = this.containerdMode ? path.join(userConfRoot, 'bin', 'nerdctl') : builder; + if (this.containerdMode) this.modem.socketPath = config.socketPath || '/run/lando/finch.sock'; this.debug = debug; this.orchestrator = orchestrator; } @@ -56,6 +71,10 @@ class DockerEngine extends Dockerode { id = tag, sources = [], } = {}) { + if (this.containerdMode) { + return this.buildx(dockerfile, {attach, buildArgs, context, id, sources, tag}); + } + // handles the promisification of the merged return const awaitHandler = async () => { return new Promise((resolve, reject) => { @@ -244,21 +263,27 @@ class DockerEngine extends Dockerode { fs.copySync(dockerfile, path.join(context, 'Dockerfile')); dockerfile = path.join(context, 'Dockerfile'); - // build initial buildx command - const args = { - command: this.builder, - args: [ - 'buildx', - 'build', - `--file=${dockerfile}`, - '--progress=plain', - `--tag=${tag}`, - context, - ], - }; + const outputPath = this.containerdMode ? path.join(context, 'image.tar') : null; + + // build initial build command + const args = this.containerdMode + ? this._getContainerdBuildctlCommand({buildArgs, context, dockerfile, outputPath, tag}) + : { + command: this.builder, + args: [ + 'buildx', + 'build', + `--file=${dockerfile}`, + '--progress=plain', + `--tag=${tag}`, + context, + ], + }; // add any needed build args into the command - for (const [key, value] of Object.entries(buildArgs)) args.args.push(`--build-arg=${key}=${value}`); + if (!this.containerdMode) { + for (const [key, value] of Object.entries(buildArgs)) args.args.push(`--build-arg=${key}=${value}`); + } // if we have sshKeys then lets pass those in if (sshKeys.length > 0) { @@ -274,7 +299,7 @@ class DockerEngine extends Dockerode { // if we have an sshAuth socket then add that as well if (sshSocket && fs.existsSync(sshSocket)) { - args.args.push(`--ssh=agent=${sshSocket}`); + args.args.push(`--ssh=${this.containerdMode ? 'default' : 'agent'}=${sshSocket}`); debug('passing in ssh agent socket %o', sshSocket); } @@ -282,7 +307,8 @@ class DockerEngine extends Dockerode { // @TODO: consider other opts? https://docs.docker.com/reference/cli/docker/buildx/build/ args? // secrets? // gha cache-from/to? - const buildxer = require('../utils/run-command')(args.command, args.args, {debug}); + const env = {...process.env, ...(this.authConfig.env || {})}; + const buildxer = require('../utils/run-command')(args.command, args.args, {debug, env}); // augment buildxer with more events so it has the same interface as build buildxer.stdout.on('data', data => { @@ -297,12 +323,24 @@ class DockerEngine extends Dockerode { for (const line of data.toString().trim().split('\n')) debug(line); stderr += data; }); - buildxer.on('close', code => { + buildxer.on('close', async code => { // if code is non-zero and we arent ignoring then reject here if (code !== 0 && !ignoreReturnCode) { buildxer.emit('error', require('../utils/get-buildx-error')({code, stdout, stderr})); // otherwise return done } else { + try { + if (this.containerdMode && outputPath) { + const loadOutput = await this._loadContainerdImage(outputPath, tag, debug); + stdout += loadOutput; + } + } catch (error) { + buildxer.emit('error', error); + return; + } finally { + if (outputPath && fs.existsSync(outputPath)) fs.removeSync(outputPath); + } + buildxer.emit('done', {code, stdout, stderr}); buildxer.emit('finished', {code, stdout, stderr}); buildxer.emit('success', {code, stdout, stderr}); @@ -310,12 +348,63 @@ class DockerEngine extends Dockerode { }); // debug - debug('buildxing image %o from %o with build-args', tag, context, buildArgs); + debug('%s image %o from %o with build-args %o', this.containerdMode ? 'building with buildctl' : 'buildxing', tag, context, buildArgs); // return merger return mergePromise(buildxer, awaitHandler); } + _getContainerdBuildctlCommand({buildArgs = {}, context, dockerfile, outputPath, tag}) { + const filename = path.basename(dockerfile); + const args = [ + '--addr', this.buildkitHost, + 'build', + '--frontend', 'dockerfile.v0', + '--local', `context=${context}`, + '--local', `dockerfile=${path.dirname(dockerfile)}`, + '--opt', `filename=${filename}`, + '--opt', `platform=${process.arch === 'arm64' ? 'linux/arm64' : 'linux/amd64'}`, + '--output', `type=docker,name=${tag},dest=${outputPath}`, + '--progress=plain', + ]; + + for (const [key, value] of Object.entries(buildArgs)) args.push('--opt', `build-arg:${key}=${value}`); + + return { + command: this.buildctl, + args, + }; + } + + async _loadContainerdImage(imageTarball, tag, debug = this.debug) { + // Load via finch-daemon's Docker-compatible API (Dockerode). + // finch-daemon proxies to containerd, so this loads into both. + return this._loadContainerdImageIntoFinch(imageTarball, tag, debug); + } + + async _loadContainerdImageIntoFinch(imageTarball, tag, debug = this.debug) { + return new Promise((resolve, reject) => { + const stream = fs.createReadStream(imageTarball); + + this.loadImage(stream, (error, responseStream) => { + if (error) return reject(error); + if (!responseStream) return resolve(''); + + this.modem.followProgress(responseStream, (followError, output = []) => { + if (followError) return reject(followError); + + const messages = output + .map(event => event.stream || event.status || '') + .filter(Boolean); + + for (const message of messages) debug(message.trim()); + debug('loaded image %o into finch-daemon from %o', tag, imageTarball); + resolve(messages.join('')); + }); + }); + }); + } + /* * A helper method that automatically will build the image needed for the run command * NOTE: this is only available as async/await so you cannot return directly and access events diff --git a/docs/config/engine.md b/docs/config/engine.md new file mode 100644 index 000000000..155298445 --- /dev/null +++ b/docs/config/engine.md @@ -0,0 +1,89 @@ +--- +title: Engine +description: How to configure the Lando container engine backend (Docker or containerd) +--- + +# Engine + +Lando supports multiple container engine backends. By default it uses [Docker](https://www.docker.com/) but can also use [containerd](https://containerd.io/) with [nerdctl](https://github.com/containerd/nerdctl) as an alternative runtime. + +The engine backend can be configured via the `engine` key in the [global config](global.md) or per-project in your `.lando.yml`. + +## Available Values + +| Value | Description | +|---|---| +| `auto` | **(default)** Auto-detects the best available backend. Prefers containerd if all binaries are found, otherwise falls back to Docker. | +| `docker` | Always use the Docker daemon and Docker Compose. This is the traditional Lando behavior. | +| `containerd` | Use Lando's own isolated containerd + buildkitd + nerdctl stack. | + +## Configuration + +**Global config (~/.lando/config.yml)** + +```yaml +# use auto-detection (default) +engine: auto + +# force Docker +engine: docker + +# force containerd +engine: containerd +``` + +**Per-project (.lando.yml)** + +```yaml +name: my-app +engine: containerd +services: + web: + type: php:8.2 + via: nginx +``` + +## Auto-Detection + +When `engine` is set to `auto` (the default), Lando checks for the presence of three binaries inside `~/.lando/bin/`: + +1. `containerd` — the container runtime daemon +2. `nerdctl` — the Docker-compatible CLI for containerd +3. `buildkitd` — the image build daemon + +If **all three** binaries exist, Lando uses the containerd backend. If any are missing, it falls back to Docker. + +::: tip +The containerd binaries are installed automatically by `lando setup` when containerd support is enabled. You don't need to install them manually. +::: + +## Overriding Binary Paths + +If your containerd stack binaries are installed in a non-standard location, you can override each path individually in the [global config](global.md): + +```yaml +# Override individual binary paths +containerdBin: /usr/local/bin/containerd +nerdctlBin: /usr/local/bin/nerdctl +buildkitdBin: /usr/local/bin/buildkitd + +# Override the containerd socket path +containerdSocket: /run/lando/containerd.sock +``` + +By default, Lando looks for binaries in `~/.lando/bin/` and manages its own isolated containerd socket at `/run/lando/containerd.sock`. + +## How It Works + +When using the containerd backend, Lando: + +1. Starts its **own isolated** containerd and buildkitd daemons (separate from any system containerd) +2. Uses `nerdctl compose` instead of `docker compose` for service orchestration +3. Uses `nerdctl` instead of `docker` for container inspection, listing, and management +4. Manages all state in `~/.lando/` to avoid interfering with system containers + +The containerd backend is fully compatible with existing Lando apps and compose files — no changes to your `.lando.yml` services are required. + +::: warning EXPERIMENTAL +The containerd engine backend is experimental. While it is designed to be a drop-in replacement for the Docker backend, some edge cases may behave differently. Please report any issues you encounter. +::: diff --git a/docs/dev/containerd-performance.md b/docs/dev/containerd-performance.md new file mode 100644 index 000000000..359e89139 --- /dev/null +++ b/docs/dev/containerd-performance.md @@ -0,0 +1,129 @@ +# containerd Engine Performance + +This document covers performance characteristics of the containerd backend compared to the Docker backend, and how to benchmark them. + +## Benchmark Script + +The benchmark script at `scripts/benchmark-engines.sh` measures three core operations: + +| Operation | What It Measures | +|-----------|-----------------| +| **Image pull** | Downloads `alpine:latest` from a registry. Measures registry I/O and image unpacking speed. The image is removed before each pull to ensure a fresh download. | +| **Container run** | Runs `echo hello` in a fresh container and removes it (`--rm`). Measures container creation, execution, and teardown overhead. | +| **Container list** | Runs `ps` to list containers. Measures daemon response time for metadata queries. | + +### Usage + +```bash +# Compare both engines (3 runs each, default) +./scripts/benchmark-engines.sh + +# Benchmark only containerd with 5 runs +./scripts/benchmark-engines.sh --engine containerd --runs 5 + +# Benchmark only Docker, output to a specific file +./scripts/benchmark-engines.sh --engine docker --runs 3 --output ./results.md +``` + +Results are written as a markdown table to `/tmp/lando-benchmark-.md` by default. + +### Environment Variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `DOCKER_BIN` | `docker` | Path to the Docker CLI binary | +| `NERDCTL_BIN` | `~/.lando/bin/nerdctl` | Path to the nerdctl binary | +| `CONTAINERD_SOCK` | `/run/lando/containerd.sock` | Path to the containerd socket | +| `LANDO_DIR` | `~/.lando` | Lando configuration root | + +## Known Performance Characteristics + +### Linux: containerd avoids Docker Desktop overhead + +On Linux, Lando's containerd backend runs natively — there is no Docker Desktop layer, no VM, and no Docker daemon multiplexing. This eliminates several sources of overhead: + +- **No Docker daemon**: containerd is a minimal container runtime. Docker adds an additional daemon layer (dockerd) on top of containerd that handles API translation, logging drivers, networking plugins, and more. Bypassing this layer reduces per-operation latency. +- **No Docker Desktop VM**: On macOS and Windows, Docker Desktop runs containers inside a Linux VM (HyperKit / WSL2). On Linux with containerd, containers run directly on the host kernel. +- **Direct socket communication**: nerdctl talks to containerd's gRPC API directly, without the Docker API translation layer. + +### Container startup + +Container startup time is primarily bounded by: + +1. Image layer unpacking (first run only — cached thereafter) +2. Namespace and cgroup setup (kernel overhead, similar for both engines) +3. Network namespace creation (Lando uses CNI with containerd vs. Docker's libnetwork) + +In practice, the difference for container startup is small (tens of milliseconds) because both engines ultimately call the same Linux kernel primitives. + +### Image operations + +Image pull performance is dominated by network I/O and registry latency. The containerd backend uses the same OCI registries and the same content-addressable storage model. Differences are typically negligible. + +### BuildKit Cache Optimization + +The containerd backend uses BuildKit directly (not via Docker's BuildKit integration). The BuildKit configuration (see Task 24) includes GC policies that manage the build cache: + +```toml +[worker.containerd] + gc = true + gckeepstorage = 10000 # ~10 GB + + [[worker.containerd.gcpolicy]] + keepBytes = 1073741824 # 1 GB reserved + keepDuration = 604800 # 7 days + all = true + + [[worker.containerd.gcpolicy]] + keepBytes = 5368709120 # 5 GB reserved + all = false +``` + +These GC policies ensure the build cache doesn't grow unbounded while retaining frequently-used layers. This is particularly beneficial for iterative development where the same base images and dependency layers are rebuilt frequently. + +### Performance Logging + +The containerd daemon includes built-in performance timers (via `utils/perf-timer.js`) that log elapsed time for key operations when debug mode is enabled: + +- `up()` — total engine startup time (systemd service check + socket verification) + +Enable debug logging with `DEBUG=@lando/*` or by setting `debug: true` in your Lando config to see these timings. + +## Benchmark Results + + + +_No benchmark results recorded yet. Run `./scripts/benchmark-engines.sh` and paste the output here._ + +### Example Output + +```markdown +# Lando Engine Benchmark Results + +- **Date**: 2026-03-14 00:00:00 UTC +- **Host**: Linux 6.x.x x86_64 +- **Runs per operation**: 3 + +## Docker + +| Operation | Mean (ms) | Median (ms) | Raw (ms) | +|-----------|-----------|-------------|----------| +| Image pull (`alpine:latest`) | — | — | — | +| Container run (`echo hello`) | — | — | — | +| Container list (`ps`) | — | — | — | + +## containerd (nerdctl) + +| Operation | Mean (ms) | Median (ms) | Raw (ms) | +|-----------|-----------|-------------|----------| +| Image pull (`alpine:latest`) | — | — | — | +| Container run (`echo hello`) | — | — | — | +| Container list (`ps`) | — | — | — | +``` + +## Future Work + +- **CI integration**: Run benchmarks automatically on tagged releases to track regressions. +- **Application-level benchmarks**: Measure `lando start` / `lando rebuild` end-to-end with a sample app. +- **Memory profiling**: Compare RSS of containerd + buildkitd vs. dockerd + containerd + buildkitd. +- **macOS Lima benchmarks**: Compare containerd-in-Lima vs. Docker Desktop performance on macOS. diff --git a/docs/dev/containerd-proxy-design.md b/docs/dev/containerd-proxy-design.md new file mode 100644 index 000000000..a95d8ee16 --- /dev/null +++ b/docs/dev/containerd-proxy-design.md @@ -0,0 +1,118 @@ +# Containerd Proxy (Traefik) Design + +How Lando's Traefik proxy works with the containerd backend. + +## Overview + +Lando uses Traefik as a reverse proxy to route `*.lndo.site` hostnames to the correct container port. Traefik uses the **Docker provider** (`--providers.docker=true`) to discover containers by watching the Docker socket and reading container labels. + +When Lando uses the containerd backend, **finch-daemon** provides a Docker API v1.43 compatibility layer on a Unix socket. Traefik talks to finch-daemon as if it were Docker — no Traefik configuration changes are needed. + +## Architecture + +``` +Browser → http://myapp.lndo.site + │ + ▼ +Traefik (landoproxyhyperion5000gandalfedition-proxy-1) + │ reads labels from containers + │ via Docker provider + │ + ▼ +/var/run/docker.sock (inside container) + │ mounted from host + │ + ▼ (volume mount differs by backend) +Docker backend: /var/run/docker.sock ──► dockerd +Containerd backend: /run/lando/finch.sock ──► finch-daemon ──► containerd +``` + +## How It Works + +### 1. Socket Mapping + +The `lando-set-proxy-config.js` hook detects the containerd backend and sets `lando.config.dockerSocket` to the finch-daemon socket path: + +```js +// hooks/lando-set-proxy-config.js +if (backend === 'containerd') { + lando.config.dockerSocket = getContainerdPaths(lando.config).finchSocket; + // → /run/lando/finch.sock +} +``` + +The `_proxy` builder uses this to mount the correct host socket into the Traefik container: + +```js +// builders/_proxy.js +volumes: [ + `${dockerSocket || '/var/run/docker.sock'}:/var/run/docker.sock`, +] +``` + +**Result:** Inside the Traefik container, `/var/run/docker.sock` always points to the active Docker-compatible API — whether that's Docker's real socket or finch-daemon's. + +### 2. CNI Network Bridging + +**The gap:** docker-compose via finch-daemon creates networks at the Docker API level but NOT at the CNI level. The nerdctl OCI runtime hook needs CNI conflist files for container networking. + +**The fix:** `ContainerdProxyAdapter.ensureProxyNetworks()` pre-creates CNI configs for the proxy's `_edge` network before the proxy container starts: + +``` +/etc/cni/net.d/finch/nerdctl-landoproxyhyperion5000gandalfedition_edge.conflist +``` + +This is called from `app-start-proxy.js` when the containerd backend is detected. + +### 3. Bridge Network DNS Aliases + +The `app-add-proxy-2-landonet.js` hook connects the proxy container to the Lando bridge network with DNS aliases for each proxied hostname. This enables container-to-container routing (e.g., one service calling another by its proxy hostname). + +This hook works identically for both backends because: +- `lando.engine.getNetwork()` returns a Dockerode-compatible handle for both Docker and containerd +- For containerd, `ContainerdContainer.getNetwork()` provides `connect()` and `disconnect()` methods backed by finch-daemon's Docker API + +### 4. Container Discovery + +Traefik discovers containers using the Docker events API (`GET /events`). finch-daemon implements this endpoint, so Traefik dynamically picks up new containers as they start. + +Each proxied service gets Traefik labels added by `app-start-proxy.js`: + +``` +traefik.enable=true +traefik.docker.network=landoproxyhyperion5000gandalfedition_edge +traefik.http.routers..rule=HostRegexp(`myapp.lndo.site`) +traefik.http.routers..entrypoints=http +traefik.http.services.-service.loadbalancer.server.port=80 +``` + +These labels are set on the container via docker-compose, which goes through finch-daemon. Traefik reads them from finch-daemon's container inspect API — same format as Docker. + +## Files + +| File | Role | +|------|------| +| `lib/backends/containerd/proxy-adapter.js` | CNI network pre-creation for proxy networks | +| `hooks/lando-set-proxy-config.js` | Sets `dockerSocket` to finch-daemon path for containerd | +| `hooks/app-start-proxy.js` | Starts Traefik, adds labels; calls proxy adapter for containerd CNI | +| `hooks/app-add-proxy-2-landonet.js` | Connects proxy to bridge network (works for both backends) | +| `builders/_proxy.js` | Generates Traefik compose service with socket mount | + +## Containerd-Specific Concerns + +### finch-daemon Events API + +Traefik's Docker provider uses a long-lived connection to `/events` to watch for container start/stop. If finch-daemon's events implementation has gaps, Traefik may miss containers that start after the proxy. + +**Mitigation:** If events don't work, restarting the proxy (`lando restart` or stopping/starting the app) forces Traefik to re-scan all containers. + +### CNI Network Timing + +CNI configs must exist BEFORE docker-compose creates containers on a network. The proxy adapter creates them proactively in `app-start-proxy.js`. The `app-add-proxy-2-landonet.js` hook also ensures the bridge network has a CNI config. + +### No nerdctl, No sudo + +Per the BRIEF's prime directives: +- No nerdctl shellouts from user-facing code +- No sudo in runtime code paths +- All operations go through finch-daemon's Docker API (Dockerode) or docker-compose with `DOCKER_HOST` diff --git a/docs/troubleshooting/containerd.md b/docs/troubleshooting/containerd.md new file mode 100644 index 000000000..6dd19f984 --- /dev/null +++ b/docs/troubleshooting/containerd.md @@ -0,0 +1,322 @@ +--- +title: Troubleshooting the Containerd Engine +description: How to diagnose and fix common issues with Lando's containerd engine backend. +--- + +# Troubleshooting the Containerd Engine + +Lando's containerd backend runs its own isolated stack — containerd, buildkitd, and finch-daemon — managed by a systemd service. This page covers common issues and how to resolve them. + +::: tip +Run `lando doctor` first. It checks for missing binaries, dead sockets, and unreachable daemons and will flag most problems automatically. +::: + +## Quick Diagnostics + +Before diving into specific errors, gather info: + +```bash +# Check if the systemd service is running +systemctl is-active lando-containerd.service + +# Check service logs +journalctl -u lando-containerd.service --no-pager -n 50 + +# Verify your user is in the lando group +groups | grep lando + +# Check that sockets exist +ls -la /run/lando/ + +# Run lando's built-in diagnostics +lando doctor +``` + +## Containerd Is Not Running + +**Error:** `containerd is not running` + +The containerd daemon is not active. This usually means the systemd service has stopped or was never started. + +**Fix:** + +```bash +# Re-run setup to install and start the service +lando setup + +# Or start the service directly if already installed +sudo systemctl start lando-containerd.service + +# Check why it stopped +journalctl -u lando-containerd.service --no-pager -n 100 +``` + +Common causes: +- The system was rebooted and the service wasn't enabled at boot. Run `sudo systemctl enable lando-containerd.service`. +- A configuration error is preventing startup. Check journalctl output for specifics. + +## BuildKit Daemon Is Not Running + +**Error:** `BuildKit daemon is not running` + +buildkitd handles image builds. It runs as part of the `lando-containerd.service` — if containerd is running but buildkitd is not, the service may have partially failed. + +**Fix:** + +```bash +# Restart the entire service (it manages all three daemons) +sudo systemctl restart lando-containerd.service + +# Check buildkitd-specific logs +journalctl -u lando-containerd.service --no-pager | grep buildkitd + +# Verify the socket exists +ls -la /run/lando/buildkitd.sock +``` + +If the socket exists but buildkitd isn't responding, the process may have crashed. A service restart should recover it. + +## finch-daemon Is Not Running + +**Error:** `finch-daemon is not running` + +finch-daemon provides Docker API compatibility — it's what lets docker-compose and Traefik talk to containerd. Without it, compose operations and the proxy will fail. + +**Fix:** + +```bash +# Restart the service +sudo systemctl restart lando-containerd.service + +# Check finch-daemon logs +journalctl -u lando-containerd.service --no-pager | grep finch + +# Verify the socket +ls -la /run/lando/finch.sock + +# Test connectivity manually +curl --unix-socket /run/lando/finch.sock http://localhost/_ping +``` + +A successful ping returns `OK`. If it returns nothing or errors, finch-daemon has crashed or failed to bind to the socket. + +## Binaries Not Found + +**Error:** `containerd backend binaries not found` + +One or more required binaries are missing. The containerd backend needs: `containerd`, `buildkitd`, `finch-daemon`, and `docker-compose`. + +**Fix:** + +```bash +# Re-run setup to install all binaries +lando setup + +# Verify binaries exist +ls -la ~/.lando/bin/ +``` + +`lando setup` installs binaries to `~/.lando/bin/` (user-level) and `/usr/local/lib/lando/bin/` (system-level). If you've moved or deleted them, setup will reinstall them. + +::: tip +You can override binary paths in `~/.lando/config.yml` if your binaries are in a non-standard location. See the [engine configuration docs](../config/engine.md) for details. +::: + +## Permission Denied + +**Error:** `containerd requires elevated permissions` + +Your user cannot access the containerd sockets. After `lando setup`, all runtime operations should work without sudo. + +**Fix:** + +```bash +# Check if your user is in the lando group +groups + +# If 'lando' is not listed, add yourself +sudo usermod -aG lando $USER + +# IMPORTANT: log out and log back in for the group change to take effect +# Or use newgrp as a quick test: +newgrp lando + +# Verify socket permissions +ls -la /run/lando/ +# Sockets should show group 'lando' with 660 permissions: +# srw-rw---- 1 root lando 0 ... containerd.sock +``` + +::: warning +You must log out and log back in (or reboot) after adding yourself to the `lando` group. Running `newgrp lando` in a single terminal is a quick test, but only a full re-login applies the change system-wide. +::: + +If the sockets exist but have wrong permissions, re-run `lando setup` to fix them. + +## Socket Conflict + +**Error:** `containerd socket conflict detected` + +Another containerd instance is using the same socket path, or stale socket files remain from a previous run. + +**Fix:** + +```bash +# Check what's using the socket +sudo fuser /run/lando/containerd.sock + +# If it's a stale file, restart the service +sudo systemctl restart lando-containerd.service + +# If another containerd is genuinely running on the same path, stop it +# Lando's sockets should be in /run/lando/ — not /run/containerd/ +``` + +Lando uses `/run/lando/` specifically to avoid conflicts with system containerd (`/run/containerd/`) or Docker (`/var/run/docker.sock`). If something else is binding to `/run/lando/`, it's likely a leftover from a previous Lando installation. + +## docker-compose Failed + +**Error:** `docker-compose failed (containerd backend)` + +docker-compose commands are failing when talking to finch-daemon. This is usually a connectivity or compatibility issue. + +**Fix:** + +```bash +# Verify finch-daemon is reachable +curl --unix-socket /run/lando/finch.sock http://localhost/_ping + +# Test docker-compose directly +DOCKER_HOST=unix:///run/lando/finch.sock docker-compose version + +# Run with verbose output to see the actual error +DOCKER_HOST=unix:///run/lando/finch.sock docker-compose -f .lando/compose//docker-compose.yml config +``` + +Common causes: +- finch-daemon is not running (see above) +- A service in your Landofile uses a Docker-only feature not yet supported by finch-daemon +- The compose file references images that haven't been built or pulled yet + +## Component Update Warning + +**Warning:** `Recommend updating ` + +A containerd stack component is outdated. This doesn't prevent Lando from working but may cause stability issues. + +**Fix:** + +```bash +# Update all containerd components +lando setup --skip-common-plugins +``` + +This re-runs the setup hooks that install containerd, buildkitd, and finch-daemon, bringing them to the versions bundled with your Lando release. + +## macOS: Lima VM Not Running + +**Error:** `Lando Lima VM is not running` + +On macOS, containerd runs inside a Lima VM (because containerd requires a Linux kernel). The VM has stopped or wasn't created. + +**Fix:** + +```bash +# Re-run setup to create/start the VM +lando setup + +# Or start it manually +limactl start lando + +# Check VM status +limactl list +``` + +If the VM exists but won't start, check Lima logs: + +```bash +limactl shell lando -- journalctl --no-pager -n 50 +# or +cat ~/.lima/lando/serial.log +``` + +## macOS: Lima Not Installed + +**Error:** `Lima is required for containerd on macOS` + +Lima is not installed. It's required for the containerd backend on macOS. + +**Fix:** + +```bash +# Install via lando setup (recommended) +lando setup + +# Or install manually +brew install lima +``` + +After installing Lima, run `lando setup` again to create and configure the Lando VM. + +## CNI Networking Issues + +If containers start but can't communicate with each other or the host, the issue is likely CNI network configuration. finch-daemon creates networks at the Docker API level but doesn't automatically write the CNI config files that containerd's OCI hooks need. + +**Symptoms:** +- Containers start but can't reach each other by service name +- Proxy (Traefik) can't route to app containers +- `lando start` succeeds but services timeout when connecting + +**Fix:** + +```bash +# Check if CNI configs exist +ls -la /etc/cni/net.d/finch/ + +# Check CNI directory permissions +stat /etc/cni/net.d/finch/ + +# If the directory is not group-writable for 'lando', fix permissions +sudo chgrp -R lando /etc/cni/net.d/finch/ +sudo chmod -R g+w /etc/cni/net.d/finch/ + +# Re-run setup to fix permissions permanently +lando setup +``` + +::: warning +CNI directory permissions (`/etc/cni/net.d/finch/`) must allow the `lando` group to write. If `lando setup` hasn't set this up yet, you may need to fix permissions manually as shown above. +::: + +## Logs Reference + +All containerd backend logs are available through journald and Lando's own log directory: + +| Log | How to access | +|---|---| +| systemd service | `journalctl -u lando-containerd.service` | +| containerd | `journalctl -u lando-containerd.service \| grep containerd` | +| buildkitd | `journalctl -u lando-containerd.service \| grep buildkitd` | +| finch-daemon | `journalctl -u lando-containerd.service \| grep finch` | +| Lando runtime | `~/.lando/logs/lando.log` | +| Lando errors | `~/.lando/logs/lando-error.log` | +| App-specific | `~/.lando/logs/.log` | + +::: tip +For more verbose output, run your Lando command with `-vvvv`: + +```bash +lando start -vvvv +``` + +This sets maximum log verbosity and often reveals the specific error behind a generic failure message. +::: + +## Still Stuck? + +If none of the above resolves your issue: + +1. Run `lando doctor` and note any warnings or errors +2. Collect logs: `journalctl -u lando-containerd.service --no-pager > /tmp/lando-containerd.log` +3. Run the failing command with max verbosity: `lando start -vvvv 2>&1 | tee /tmp/lando-debug.log` +4. Report the issue with both log files at [github.com/lando/core/issues](https://github.com/lando/core/issues) diff --git a/examples/containerd/.lando.yml b/examples/containerd/.lando.yml new file mode 100644 index 000000000..d6bf59164 --- /dev/null +++ b/examples/containerd/.lando.yml @@ -0,0 +1,23 @@ +name: lando-containerd +services: + web: + api: 3 + type: lando + services: + image: nginx:1.22.1 + command: /docker-entrypoint.sh nginx -g "daemon off;" + ports: + - 0:80 + volumes: + - ./:/usr/share/nginx/html + web2: + api: 4 + type: lando + image: nginxinc/nginx-unprivileged:1.26.1 + ports: + - 8080/http + app-mount: + destination: /usr/share/nginx/html + +plugins: + "@lando/core": ../.. diff --git a/examples/containerd/README.md b/examples/containerd/README.md new file mode 100644 index 000000000..71503fa9e --- /dev/null +++ b/examples/containerd/README.md @@ -0,0 +1,116 @@ +# Containerd Example + +This example exists primarily to test the following documentation: + +* [Containerd Backend](https://docs.lando.dev/getting-started/containerd.html) + +See the [Landofiles](https://docs.lando.dev/config/lando.html) in this directory for the exact magicks. + +## Start up tests + +```bash +# Should start successfully +lando poweroff +LANDO_ENGINE=containerd lando setup -y --skip-common-plugins +LANDO_ENGINE=containerd lando start +``` + +## Verification commands + +Run the following commands to verify things work as expected + +```bash +# Should have installed containerd binary +stat /usr/local/lib/lando/bin/containerd + +# Should have installed buildkitd binary +stat /usr/local/lib/lando/bin/buildkitd + +# Should have installed runc binary +stat /usr/local/lib/lando/bin/runc + +# Should have installed nerdctl binary +stat ~/.lando/bin/nerdctl + +# Should have installed finch-daemon binary +stat /usr/local/lib/lando/bin/finch-daemon + +# Should have installed docker-compose binary +find ~/.lando/bin -type f -name 'docker-compose-v2*' | grep docker-compose + +# Should have the lando-containerd systemd service running +systemctl is-active --quiet lando-containerd.service + +# Should have the containerd socket available +stat /run/lando/containerd.sock + +# Should have the buildkitd socket available +stat /run/lando/buildkitd.sock + +# Should have the finch-daemon socket available +stat /run/lando/finch.sock + +# Should have created the Lando Development CA +stat ~/.lando/certs/LandoCA.crt + +# Should report containerd as the engine backend +LANDO_ENGINE=containerd lando config | grep "engine" | grep containerd + +# Should have running containers +DOCKER_HOST=unix:///run/lando/finch.sock $(find ~/.lando/bin -type f -name 'docker-compose-v2*' | head -1) -p landocontainerd ps | grep -i "up\|running" + +# Should be able to list containers via lando +LANDO_ENGINE=containerd lando list | grep landocontainerd + +# Should serve content from the web service +curl -s "$(LANDO_ENGINE=containerd lando info -s web --format json | grep -o 'http://[^"]*' | head -1)" | grep "CONTAINERD WORKS" + +# Should serve content from the web2 service (multi-container verification) +curl -s "$(LANDO_ENGINE=containerd lando info -s web2 --format json | grep -o 'http://[^"]*' | head -1)" | grep "CONTAINERD WORKS" + +# Should list both web and web2 services +LANDO_ENGINE=containerd lando list | grep web2 + +# Should be able to run commands inside web2 container +LANDO_ENGINE=containerd lando exec web2 -- cat /usr/share/nginx/html/index.html | grep "CONTAINERD WORKS" + +# Should have inter-container DNS aliases in web hosts file +LANDO_ENGINE=containerd lando exec web -- cat /etc/hosts | grep "web2.landocontainerd.internal" + +# Should have inter-container DNS aliases in web2 hosts file +LANDO_ENGINE=containerd lando exec web2 -- cat /etc/hosts | grep "web.landocontainerd.internal" + +# Should have container IPs on the same CNI subnet +LANDO_ENGINE=containerd lando exec web -- cat /etc/hosts | grep "10\.4\." +LANDO_ENGINE=containerd lando exec web2 -- cat /etc/hosts | grep "10\.4\." + +# Should be able to stop and restart cleanly +LANDO_ENGINE=containerd lando stop +LANDO_ENGINE=containerd lando start +LANDO_ENGINE=containerd lando list | grep landocontainerd + +# Should retain inter-container DNS aliases after restart +LANDO_ENGINE=containerd lando exec web -- cat /etc/hosts | grep "web2.landocontainerd.internal" +LANDO_ENGINE=containerd lando exec web2 -- cat /etc/hosts | grep "web.landocontainerd.internal" + +# Should serve content from both services after restart +curl -s "$(LANDO_ENGINE=containerd lando info -s web --format json | grep -o 'http://[^"]*' | head -1)" | grep "CONTAINERD WORKS" +curl -s "$(LANDO_ENGINE=containerd lando info -s web2 --format json | grep -o 'http://[^"]*' | head -1)" | grep "CONTAINERD WORKS" + +# Should be able to run commands inside containers +LANDO_ENGINE=containerd lando exec web -- cat /usr/share/nginx/html/index.html | grep "CONTAINERD WORKS" + +# Should have the containerd service still running after lando operations +systemctl is-active --quiet lando-containerd.service + +# Should NOT have interfered with system docker +docker info +``` + +## Destroy tests + +```bash +# Should destroy successfully +LANDO_ENGINE=containerd lando destroy -y +LANDO_ENGINE=containerd lando poweroff +``` diff --git a/examples/containerd/index.html b/examples/containerd/index.html new file mode 100644 index 000000000..24dc69bed --- /dev/null +++ b/examples/containerd/index.html @@ -0,0 +1,5 @@ + + +Lando Containerd Test +

CONTAINERD WORKS

+ diff --git a/hooks/app-add-2-landonet.js b/hooks/app-add-2-landonet.js index af8edd685..611d09989 100644 --- a/hooks/app-add-2-landonet.js +++ b/hooks/app-add-2-landonet.js @@ -2,7 +2,157 @@ const _ = require('lodash'); + +const isNotConnectedError = error => _.includes(error.message, 'is not connected to network') + || _.includes(error.message, 'network or container is not found'); + +/** + * Resolve the container's IP on the preferred Lando network. + * + * @param {Object} lando - Lando instance. + * @param {Object} app - Lando app instance. + * @param {Object} [data={}] - Container inspect data. + * @returns {string|undefined} IP address if found. + * @private + */ +const getContainerdNetworkIP = (lando, app, data = {}) => { + const configuredNetworks = JSON.parse(_.get(data, 'Config.Labels.nerdctl/networks', '[]')); + const networks = _.get(data, 'NetworkSettings.Networks', {}); + const preferred = [lando.config.networkBridge, `${app.project}_default`, lando.config.proxyNet]; + + for (const name of preferred) { + const index = configuredNetworks.indexOf(name); + if (index === -1) continue; + const ip = _.get(networks, `unknown-eth${index}.IPAddress`); + if (ip) return ip; + } + return undefined; +}; + +/** + * Retrieve the Dockerode instance from the Lando engine. + * + * Uses the existing Dockerode instance on the containerd container backend + * (already pointed at finch-daemon) rather than creating a new one. This makes + * the function testable and avoids duplicate socket connections. + * + * @param {Object} lando - Lando instance. + * @returns {import('dockerode')} Dockerode instance. + * @private + */ +const getDockerode = lando => { + // Prefer the Dockerode instance already wired to finch-daemon + if (_.get(lando, 'engine.docker.dockerode')) return lando.engine.docker.dockerode; + // Fallback: create one (shouldn't normally happen) + const Docker = require('dockerode'); + const finchSocket = lando.config.finchSocket || '/run/lando/finch.sock'; + return new Docker({socketPath: finchSocket}); +}; + +/** + * Update /etc/hosts inside a container using Dockerode exec via finch-daemon. + * + * Per BRIEF: "Never shell out to nerdctl from user-facing code." This uses + * the Docker API exec endpoint through finch-daemon instead. + * + * @param {Object} lando - Lando instance. + * @param {string} target - Container name. + * @param {Array<{ip: string, alias: string}>} entries - Host entries to add. + * @returns {Promise} + * @private + */ +const updateHosts = async (lando, target, entries) => { + const dockerode = getDockerode(lando); + const container = dockerode.getContainer(target); + + const echoLines = entries + .map(({ip, alias}) => { + // Allowlist sanitization: IPs may only contain digits, dots, colons; aliases only alphanum, dots, hyphens + const safeIp = ip.replace(/[^0-9.:]/g, ''); + const safeAlias = alias.replace(/[^a-zA-Z0-9.\-_]/g, ''); + return `echo '${safeIp} ${safeAlias} # lando-internal-aliases' >> "$tmp"`; + }) + .join(' && '); + const script = [ + 'tmp=$(mktemp)', + "grep -v 'lando-internal-aliases' /etc/hosts > \"$tmp\" || true", + echoLines, + 'cat "$tmp" > /etc/hosts', + 'rm -f "$tmp"', + ].join(' && '); + + const exec = await container.exec({ + Cmd: ['sh', '-lc', script], + User: 'root', + AttachStdout: true, + AttachStderr: true, + }); + const stream = await exec.start({hijack: true, stdin: false}); + + return new Promise((resolve, reject) => { + let stderr = ''; + stream.on('data', chunk => { + // Docker multiplexed stream: 8-byte header [type, 0, 0, 0, size(4)] + // type 1 = stdout, type 2 = stderr + if (chunk.length > 8 && chunk[0] === 2) { + stderr += chunk.slice(8).toString(); + } + }); + stream.on('error', reject); + stream.on('end', async () => { + try { + const info = await exec.inspect(); + if (info.ExitCode !== 0) { + reject(new Error(`updateHosts exec failed on ${target} (exit ${info.ExitCode}): ${stderr}`)); + } else { + resolve(); + } + } catch (err) { + reject(err); + } + }); + }); +}; + module.exports = async (app, lando) => { + if (lando.engine?.engineBackend === 'containerd') { + // Derive containers from app metadata (populated during init from compose files) + // instead of finch-daemon listContainers which may not report running containers + const aliases = []; + const targets = []; + + for (const service of (app.services || [])) { + const containerName = _.get(app, `containers.${service}`, `${app.project}-${service}-1`); + try { + const data = await lando.engine.scan({id: containerName}); + const ip = getContainerdNetworkIP(lando, app, data); + const name = _.get(data, 'Name', containerName).replace(/^\//, ''); + targets.push(name); + if (ip) { + aliases.push({ip, alias: `${service}.${app.project}.internal`}); + } + } catch (err) { + app.log.debug('containerd landonet: could not scan %s: %s', containerName, err.message); + } + } + + app.log.debug('containerd landonet hook found containers %j', targets); + app.log.debug('containerd landonet aliases %j', aliases); + if (_.isEmpty(aliases)) return; + + if (lando.config.proxy === 'ON' && await lando.engine.exists({id: lando.config.proxyContainer})) { + try { + const proxyData = await lando.engine.scan({id: lando.config.proxyContainer}); + targets.push(_.get(proxyData, 'Name', lando.config.proxyContainer).replace(/^\//, '')); + } catch (err) { + app.log.debug('containerd landonet: could not scan proxy: %s', err.message); + } + } + + app.log.debug('containerd landonet targets %j', _.uniq(targets)); + return lando.Promise.each(_.uniq(targets), target => updateHosts(lando, target, aliases)); + } + // We assume the lando net exists at this point const landonet = lando.engine.getNetwork(lando.config.networkBridge); // List all our app containers @@ -15,11 +165,11 @@ module.exports = async (app, lando) => { return landonet.disconnect({Container: container.id, Force: true}) // Only throw non not connected errors .catch(error => { - if (!_.includes(error.message, 'is not connected to network')) throw error; + if (!isNotConnectedError(error)) throw error; }) // Connect + .then(() => landonet.connect({Container: container.id, EndpointConfig: {Aliases: [internalAlias]}})) .then(() => { - landonet.connect({Container: container.id, EndpointConfig: {Aliases: [internalAlias]}}); app.log.debug('connected %s to the landonet', container.name); }); }); diff --git a/hooks/app-add-proxy-2-landonet.js b/hooks/app-add-proxy-2-landonet.js index 831b251d9..18e5a4cfb 100644 --- a/hooks/app-add-proxy-2-landonet.js +++ b/hooks/app-add-proxy-2-landonet.js @@ -2,10 +2,37 @@ const _ = require('lodash'); +const isNotConnectedError = error => _.includes(error.message, 'is not connected to network') + || _.includes(error.message, 'network or container is not found'); + +/** + * Connects the proxy container to the lando bridge network with DNS aliases + * for all proxied hostnames in the app. + * + * Works with both Docker and containerd backends: + * - Docker: uses Dockerode's native Network handle via lando.engine.getNetwork() + * - Containerd: uses ContainerdContainer.getNetwork() which provides a + * Dockerode-compatible handle backed by finch-daemon's Docker API + * + * For containerd, also ensures the bridge network has a CNI conflist so that + * the nerdctl OCI hook can configure container networking. + * + * @param {Object} app - The Lando app instance. + * @param {Object} lando - The Lando instance. + * @return {Promise} + */ module.exports = async (app, lando) => { // If the proxy isnt on then just bail if (lando.config.proxy !== 'ON') return; + // For containerd backend, ensure the bridge network has a CNI config + if (lando.engine?.engineBackend === 'containerd') { + const ensureCniNetwork = require('../utils/ensure-cni-network'); + ensureCniNetwork(lando.config.networkBridge, { + debug: lando.log.debug.bind(lando.log), + }); + } + // Get the needed ids const bridgeNet = lando.engine.getNetwork(lando.config.networkBridge); const proxyContainer = lando.config.proxyContainer; @@ -17,6 +44,8 @@ module.exports = async (app, lando) => { // Otherwise scan and add as needed return lando.engine.scan({id: proxyContainer}).then(data => { + const containerId = _.get(data, 'Id', proxyContainer); + // Get existing aliases and merge them into our new ones // @NOTE: Do we need to handle wildcards and paths? const aliasPath = `NetworkSettings.Networks.${lando.config.networkBridge}.Aliases`; @@ -31,14 +60,14 @@ module.exports = async (app, lando) => { .value(); // Disconnect so we can reconnect - return bridgeNet.disconnect({Container: proxyContainer, Force: true}) + return bridgeNet.disconnect({Container: containerId, Force: true}) // Only throw non not connected errors .catch(error => { - if (!_.includes(error.message, 'is not connected to network')) throw error; + if (!isNotConnectedError(error)) throw error; }) // Connect + .then(() => bridgeNet.connect({Container: containerId, EndpointConfig: {Aliases: aliases}})) .then(() => { - bridgeNet.connect({Container: proxyContainer, EndpointConfig: {Aliases: aliases}}); app.log.debug('aliased %j to the proxynet', aliases); }); }); diff --git a/hooks/app-add-v4-services.js b/hooks/app-add-v4-services.js index 54b0f505a..9f2d2b36e 100644 --- a/hooks/app-add-v4-services.js +++ b/hooks/app-add-v4-services.js @@ -3,6 +3,9 @@ const _ = require('lodash'); const path = require('path'); +const getContainerdPaths = require('../utils/get-containerd-paths'); +const {getContainerdAuthConfig} = require('../utils/setup-containerd-auth'); + module.exports = async (app, lando) => { // add parsed services to app object so we can use them downstream app.v4.parsedConfig = _(require('../utils/parse-v4-services')(_.get(app, 'config.services', {}))) @@ -41,8 +44,31 @@ module.exports = async (app, lando) => { // retrieve the correct class and mimic-ish v4 patterns to ensure faster loads const Service = lando.factory.get(config.builder, config.api); - Service.bengineConfig = lando.config.engineConfig; - Service.builder = lando.config.dockerBin; + const isContainerd = _.get(lando, 'engine.engineBackend') === 'containerd' + || lando.config.engine === 'containerd'; + const containerdPaths = getContainerdPaths(lando.config); + const userConfRoot = lando.config.userConfRoot; + const nerdctlBin = _.get(lando, 'engine.daemon.nerdctlBin') || path.join(userConfRoot, 'bin', 'nerdctl'); + const buildkitSocket = _.get(lando, 'engine.daemon.buildkitSocket') || containerdPaths.buildkitSocket; + const containerdSocket = _.get(lando, 'engine.daemon.socketPath') || containerdPaths.containerdSocket; + const finchSocket = _.get(lando, 'engine.daemon.finchDaemon.getSocketPath') + ? lando.engine.daemon.finchDaemon.getSocketPath() + : containerdPaths.finchSocket; + + Service.bengineConfig = isContainerd + ? { + ...lando.config.engineConfig, + authConfig: getContainerdAuthConfig({configPath: lando.config.registryAuth}), + buildkitHost: `unix://${buildkitSocket}`, + containerdMode: true, + containerdNamespace: 'default', + containerdSocket, + engine: 'containerd', + nerdctlConfig: path.join(userConfRoot, 'config', 'nerdctl.toml'), + socketPath: finchSocket, + } + : lando.config.engineConfig; + Service.builder = isContainerd ? nerdctlBin : lando.config.dockerBin; Service.orchestrator = lando.config.orchestratorBin; // instantiate diff --git a/hooks/app-check-containerd-compat.js b/hooks/app-check-containerd-compat.js new file mode 100644 index 000000000..b7818e2d5 --- /dev/null +++ b/hooks/app-check-containerd-compat.js @@ -0,0 +1,103 @@ +'use strict'; + +const _ = require('lodash'); +const fs = require('fs'); + +/** + * App-level containerd backend compatibility checks. + * + * Runs when the containerd backend is active to verify: + * - Component version recommendations + * - docker-compose availability (via finch-daemon Docker API) + * - buildkitd socket availability + * + * Per BRIEF: never shell out to nerdctl from user-facing code. All checks + * use Dockerode against finch-daemon or check socket/binary existence directly. + * + * @param {Object} app - The Lando app instance. + * @param {Object} lando - The Lando instance. + * @returns {Promise} + */ +module.exports = async (app, lando) => { + // Skip if not using the containerd backend + const backend = _.get(lando, 'engine.engineBackend', _.get(lando, 'config.engine', 'auto')); + if (backend !== 'containerd') return; + + _.forEach(_(lando.versions) + .filter(version => version && version.name && !version.dockerVersion) + .value(), thing => { + // handle generic unsupported or untested notices + if (!thing.satisfied) app.addMessage(require('../messages/unsupported-version-warning')({ + ...thing, + name: thing.name, + })); + if (thing.untested) app.addMessage(require('../messages/untested-version-notice')(thing)); + + // handle containerd backend component update recommendations + if (thing.rupdate) { + app.addMessage(require('../messages/update-containerd-warning')(thing)); + } + }); + + // Run live containerd-specific health checks + try { + const daemon = lando.engine.daemon; + + // Verify containerd daemon is running via Dockerode ping against finch-daemon + // Per BRIEF: finch-daemon provides Docker API compatibility — use it. + const isUp = await daemon.isUp(); + if (!isUp) { + app.addMessage({ + type: 'warning', + title: 'Containerd daemon is not running', + detail: [ + 'The containerd daemon does not appear to be running.', + 'Lando needs containerd to manage containers. Try running "lando start"', + 'which will attempt to start the daemon automatically.', + ], + }); + } + + // Verify docker-compose is functional with finch-daemon + // Per BRIEF: compose operations use docker-compose with DOCKER_HOST, NOT nerdctl compose + if (isUp) { + try { + const {execSync} = require('child_process'); + const finchSocket = _.get(daemon, 'finchDaemon.socketPath', '/run/lando/finch.sock'); + const composeBin = lando.config.orchestratorBin || 'docker-compose'; + execSync(`${composeBin} version`, { + stdio: 'ignore', + env: {...process.env, DOCKER_HOST: `unix://${finchSocket}`}, + }); + } catch (err) { + app.addMessage({ + type: 'warning', + title: 'docker-compose is not functional', + detail: [ + 'Could not run "docker-compose version" successfully.', + 'docker-compose is required for service orchestration with the containerd backend.', + 'It communicates with finch-daemon via the DOCKER_HOST environment variable.', + `Error: ${err.message}`, + ], + url: 'https://docs.lando.dev/config/engine.html', + }); + } + + // Verify buildkitd socket exists (systemd service manages the process) + if (!fs.existsSync(daemon.buildkitSocket)) { + app.addMessage({ + type: 'warning', + title: 'BuildKit daemon is not running', + detail: [ + 'The BuildKit daemon (buildkitd) does not appear to be running.', + 'BuildKit is required for building container images with the containerd backend.', + 'Run "lando setup" to install and start the containerd engine service.', + ], + url: 'https://docs.lando.dev/config/engine.html', + }); + } + } + } catch (err) { + lando.log.debug('containerd health check encountered an error: %s', err.message); + } +}; diff --git a/hooks/app-check-containerd-mounts.js b/hooks/app-check-containerd-mounts.js new file mode 100644 index 000000000..eb87e3aeb --- /dev/null +++ b/hooks/app-check-containerd-mounts.js @@ -0,0 +1,50 @@ +'use strict'; + +const _ = require('lodash'); + +module.exports = async (app, lando) => { + // Only check on macOS with containerd engine + if (_.get(lando, 'config.os.landoPlatform', process.platform) !== 'darwin') return; + + const backend = _.get(lando, 'engine.engineBackend', _.get(lando, 'config.engine', 'auto')); + if (backend !== 'containerd') return; + + const {resolveContainerdMount} = require('../utils/resolve-containerd-mount'); + + // Collect all host-side volume mount paths from compose data + const inaccessible = []; + + _.forEach(app.composeData, service => { + _.forEach(service.data, datum => { + _.forEach(_.get(datum, 'services', {}), (props, serviceName) => { + _.forEach(props.volumes, volume => { + let hostPath; + + // Volumes can be strings ("./src:/app") or objects ({type: "bind", source: "...", target: "..."}) + if (_.isString(volume)) { + const parts = volume.split(':'); + hostPath = parts[0]; + } else if (_.get(volume, 'type') === 'bind' && _.get(volume, 'source')) { + hostPath = volume.source; + } + + if (hostPath) { + const result = resolveContainerdMount(hostPath, {platform: 'darwin'}); + if (!result.accessible) { + inaccessible.push({serviceName, hostPath, warning: result.warning}); + } + } + }); + }); + }); + }); + + if (!_.isEmpty(inaccessible)) { + const paths = inaccessible.map(m => ` - ${m.serviceName}: ${m.hostPath}`).join('\n'); + app.log.warn( + 'Some volume mounts are not accessible in the Lima VM:\n%s\n%s', + paths, + inaccessible[0].warning, + ); + } +}; diff --git a/hooks/app-check-docker-compat.js b/hooks/app-check-docker-compat.js index ed21cc9a6..b9be41df7 100644 --- a/hooks/app-check-docker-compat.js +++ b/hooks/app-check-docker-compat.js @@ -3,6 +3,8 @@ const _ = require('lodash'); module.exports = async (app, lando) => { + if (_.get(lando, 'engine.engineBackend') === 'containerd') return; + _.forEach(_(lando.versions).filter(version => version && version.dockerVersion).value(), thing => { // handle generic unsupported or untested notices if (!thing.satisfied) app.addMessage(require('../messages/unsupported-version-warning')(thing)); diff --git a/hooks/app-reset-orchestrator.js b/hooks/app-reset-orchestrator.js index 0591c1361..b9177db69 100644 --- a/hooks/app-reset-orchestrator.js +++ b/hooks/app-reset-orchestrator.js @@ -5,8 +5,8 @@ module.exports = async (app, lando) => { if (!lando.config.orchestratorBin) lando.config.orchestratorBin = require('../utils/get-compose-x')(lando.config); // because the entire lando 3 runtime was made in a bygone era when we never dreamed of doing stuff like this - // we need this workaround - if (lando._bootstrapLevel >= 3 && !app.engine.composeInstalled) { + // we need this workaround — but skip for containerd engine which manages its own compose + if (lando._bootstrapLevel >= 3 && !app.engine.composeInstalled && (lando.config.engine !== 'containerd')) { app.engine = require('../utils/setup-engine')( lando.config, lando.cache, diff --git a/hooks/app-start-proxy.js b/hooks/app-start-proxy.js index 3db065cfb..dc26632ce 100644 --- a/hooks/app-start-proxy.js +++ b/hooks/app-start-proxy.js @@ -245,6 +245,19 @@ module.exports = async (app, lando) => { 'proxy._lando_.internal', ]}); + // For containerd backend: ensure CNI network configs exist for proxy networks. + // docker-compose via finch-daemon creates networks at the Docker API level but + // NOT at the CNI level. The nerdctl OCI hook needs CNI configs for container + // networking to work. This must happen BEFORE the proxy container starts. + if (lando.engine?.engineBackend === 'containerd') { + const {ContainerdProxyAdapter} = require('../lib/backends/containerd'); + const proxyAdapter = new ContainerdProxyAdapter({ + config: lando.config, + debug: lando.log.debug.bind(lando.log), + }); + proxyAdapter.ensureProxyNetworks(lando.config.proxyName); + } + // Determine what ports we need to discover const protocolStatus = needsProtocolScan(lando.config.proxyCurrentPorts, lando.config.proxyLastPorts); // And then discover! @@ -311,6 +324,20 @@ module.exports = async (app, lando) => { return parseConfig(app.config.proxy, _.compact(_.flatten([sslReady, servedBy, sslReadyV4]))); }) + // For containerd backend: ensure the external proxy edge network has a + // CNI config before app services try to join it. The proxy start above + // already ensures this, but this is a safety net in case the proxy was + // already running from a previous app start. + .then(services => { + if (lando.engine?.engineBackend === 'containerd') { + const ensureCniNetwork = require('../utils/ensure-cni-network'); + ensureCniNetwork(lando.config.proxyNet, { + debug: lando.log.debug.bind(lando.log), + }); + } + return services; + }) + // Map to docker compose things .map(service => { // Throw error but proceed if we don't have the service diff --git a/hooks/lando-add-containerd-version-info.js b/hooks/lando-add-containerd-version-info.js new file mode 100644 index 000000000..622700c14 --- /dev/null +++ b/hooks/lando-add-containerd-version-info.js @@ -0,0 +1,26 @@ +"use strict"; + +module.exports = async lando => { + // Only run for containerd backend + if (!lando.engine || lando.engine.engineBackend !== "containerd") return; + + try { + const versions = await lando.engine.daemon.getVersions(); + lando.log.debug("containerd versions: %o", versions); + + // Add to lando.versions alongside any Docker version info + if (!lando.versions) lando.versions = []; + + for (const [name, version] of Object.entries(versions)) { + if (!version) continue; + lando.versions.push({ + name, + version, + dockerVersion: false, + satisfied: true, + }); + } + } catch (err) { + lando.log.warn("could not retrieve containerd version info: %s", err.message); + } +}; diff --git a/hooks/lando-autostart-engine.js b/hooks/lando-autostart-engine.js index eddef3671..827e27ffd 100644 --- a/hooks/lando-autostart-engine.js +++ b/hooks/lando-autostart-engine.js @@ -4,6 +4,8 @@ // @NOTE: for some reason _SOMETIMES_ autostarting before lando start produces an error but we are just // not going to address it in favor of lando 4 stuff module.exports = async lando => { + if (lando.engine?.engineBackend === 'containerd' || lando.config.engine === 'containerd') return; + if (lando._bootstrapLevel >= 3 && await lando.engine.daemon.isUp() === false) { const debug = require('../utils/debug-shim')(lando.log); const tasks = [{ diff --git a/hooks/lando-doctor-containerd.js b/hooks/lando-doctor-containerd.js new file mode 100644 index 000000000..e5cf9791f --- /dev/null +++ b/hooks/lando-doctor-containerd.js @@ -0,0 +1,134 @@ +"use strict"; + +const {execSync} = require("child_process"); +const fs = require("fs"); +const os = require("os"); +const path = require("path"); + +const getContainerdPaths = require('../utils/get-containerd-paths'); +const getComposeX = require('../utils/get-compose-x'); + +/** + * Check whether a binary exists — either as an absolute path or on $PATH. + * + * @param {string} bin - Absolute path or bare command name. + * @returns {boolean} + * @private + */ +const binExists = bin => { + if (path.isAbsolute(bin)) return fs.existsSync(bin); + try { + execSync(`command -v ${bin}`, {stdio: 'ignore'}); + return true; + } catch { + return false; + } +}; + +/** + * Run containerd engine health checks. + * + * Returns an array of check result objects, each with: + * - title: string — what was checked + * - status: "ok" | "warning" | "error" — result + * - message: string — human-readable description + * + * @param {Object} lando - The Lando instance. + * @returns {Promise>} + */ +const runChecks = async (lando) => { + const checks = []; + const userConfRoot = lando.config.userConfRoot || path.join(os.homedir(), ".lando"); + const binDir = path.join(userConfRoot, "bin"); + const systemBinDir = lando.config.containerdSystemBinDir || '/usr/local/lib/lando/bin'; + const paths = getContainerdPaths(lando.config); + const orchestratorBin = lando.config.orchestratorBin + || getComposeX({...lando.config, userConfRoot}) + || 'docker-compose'; + + // Per BRIEF: nerdctl is only used internally by OCI runtime hooks (invoked + // as root by systemd). It is NOT a user-facing dependency, so we don't + // check for it here. + const bins = { + containerd: lando.config.containerdBin || path.join(systemBinDir, "containerd"), + buildkitd: lando.config.buildkitdBin || path.join(systemBinDir, "buildkitd"), + "finch-daemon": lando.config.finchDaemonBin || path.join(systemBinDir, "finch-daemon"), + "docker-compose": orchestratorBin, + }; + + const sockets = { + containerd: paths.containerdSocket, + buildkitd: paths.buildkitSocket, + "finch-daemon": paths.finchSocket, + }; + + // Check binaries + for (const [name, bin] of Object.entries(bins)) { + const exists = binExists(bin); + checks.push({ + title: `${name} binary`, + status: exists ? "ok" : "error", + message: exists ? `Found at ${bin}` : `Not found at ${bin}. Run "lando setup" to install.`, + }); + } + + // Check sockets (daemon running) + for (const [name, socketPath] of Object.entries(sockets)) { + const exists = fs.existsSync(socketPath); + checks.push({ + title: `${name} daemon`, + status: exists ? "ok" : "warning", + message: exists ? `Socket active at ${socketPath}` : `Socket not found at ${socketPath}. Daemon may not be running.`, + }); + } + + // Check CNI directory permissions on Linux/WSL-native installs only. + // macOS uses Lima, so the host should not have this Linux CNI path. + if (process.platform === 'linux') { + const cniDir = '/etc/lando/cni/finch'; + try { + const cniStats = fs.statSync(cniDir); + const isGroupWritable = (cniStats.mode & 0o020) !== 0; + if (isGroupWritable) { + checks.push({ + title: 'CNI directory permissions', + status: 'ok', + message: `${cniDir} is group-writable`, + }); + } else { + checks.push({ + title: 'CNI directory permissions', + status: 'error', + message: `${cniDir} is not group-writable. Run "lando setup" to fix permissions.`, + }); + } + } catch { + checks.push({ + title: 'CNI directory permissions', + status: 'error', + message: `${cniDir} does not exist. Run "lando setup" to create it.`, + }); + } + } + + // Check finch-daemon connectivity via Dockerode (Docker API) + // Per BRIEF: never shell out to nerdctl from user-facing code. + // finch-daemon provides Docker API compatibility, so we ping it instead. + try { + const finchSocket = sockets['finch-daemon']; + if (fs.existsSync(finchSocket)) { + const Dockerode = require('dockerode'); + const docker = new Dockerode({socketPath: finchSocket}); + await docker.ping(); + checks.push({title: "finch-daemon connectivity", status: "ok", message: "finch-daemon Docker API is responding"}); + } else { + checks.push({title: "finch-daemon connectivity", status: "warning", message: `finch-daemon socket not found at ${finchSocket}. Daemon may not be running.`}); + } + } catch (err) { + checks.push({title: "finch-daemon connectivity", status: "error", message: `finch-daemon is not responding: ${err.message}`}); + } + + return checks; +}; + +module.exports = runChecks; diff --git a/hooks/lando-get-containerd-compat.js b/hooks/lando-get-containerd-compat.js new file mode 100644 index 000000000..1056d57fc --- /dev/null +++ b/hooks/lando-get-containerd-compat.js @@ -0,0 +1,18 @@ +'use strict'; + +const _ = require('lodash'); + +module.exports = async lando => { + // only run if engine bootstrap or above, containerd backend, and daemon is available + if (lando._bootstrapLevel >= 3) { + const backend = _.get(lando, 'engine.engineBackend', _.get(lando, 'config.engine', 'auto')); + if (backend === 'containerd' && lando.engine.dockerInstalled) { + await lando.engine.getCompatibility().then(results => { + lando.log.verbose('checking containerd version compatibility...'); + lando.log.debug('containerd compatibility results', _.keyBy(results, 'name')); + lando.cache.set('versions', _.assign(lando.versions, _.keyBy(results, 'name')), {persist: true}); + lando.versions = lando.cache.get('versions'); + }); + } + } +}; diff --git a/hooks/lando-reset-orchestrator.js b/hooks/lando-reset-orchestrator.js index 85f71a871..0a7a25588 100644 --- a/hooks/lando-reset-orchestrator.js +++ b/hooks/lando-reset-orchestrator.js @@ -1,6 +1,12 @@ 'use strict'; module.exports = async lando => { + // Containerd engine manages its own compose binary — skip Docker-era workaround + if (lando.engine?.engineBackend === 'containerd' || lando.config.engine === 'containerd') { + lando.log.debug('using docker-compose %s', lando.config.orchestratorBin); + return; + } + // if we dont have an orchestrator bin yet then discover it if (!lando.config.orchestratorBin) lando.config.orchestratorBin = require('../utils/get-compose-x')(lando.config); diff --git a/hooks/lando-set-proxy-config.js b/hooks/lando-set-proxy-config.js index 46bf7e25d..e65ae189a 100644 --- a/hooks/lando-set-proxy-config.js +++ b/hooks/lando-set-proxy-config.js @@ -4,6 +4,8 @@ const _ = require('lodash'); const path = require('path'); const url = require('url'); +const getContainerdPaths = require('../utils/get-containerd-paths'); + const ports2Urls = (ports, secure = false, hostname = '127.0.0.1') => _(ports) .map(port => url.format({protocol: (secure) ? 'https' : 'http', hostname, port})) .value(); @@ -23,4 +25,10 @@ module.exports = async lando => { lando.config.proxyScanHttps = ports2Urls(lando.config.proxyHttpsPorts, true, lando.config.proxyBindAddress); // And dependent things lando.config.proxyConfigDir = path.join(lando.config.proxyDir, 'config'); + + // Set dockerSocket for containerd backend (finch-daemon provides Docker API compatibility) + const backend = _.get(lando, 'engine.engineBackend', _.get(lando, 'config.engine', 'auto')); + if (backend === 'containerd') { + lando.config.dockerSocket = getContainerdPaths(lando.config).finchSocket; + } }; diff --git a/hooks/lando-setup-build-engine-darwin.js b/hooks/lando-setup-build-engine-darwin.js index 6e1c0d256..e80bc2663 100644 --- a/hooks/lando-setup-build-engine-darwin.js +++ b/hooks/lando-setup-build-engine-darwin.js @@ -3,6 +3,7 @@ const axios = require('../utils/get-axios')(); const fs = require('fs'); const getDockerDesktopBin = require('../utils/get-docker-desktop-x'); +const getSetupEngine = require('../utils/get-setup-engine'); const os = require('os'); const path = require('path'); const semver = require('semver'); @@ -88,6 +89,8 @@ module.exports = async (lando, options) => { // @NOTE: this is mostly for internal stuff if (options.buildEngine === false) return; + if (getSetupEngine(lando, options) !== 'docker') return; + // get stuff from config/opts const build = getId(options.buildEngine); const version = getVersion(options.buildEngine); @@ -108,12 +111,11 @@ module.exports = async (lando, options) => { // if we are missing any files we can check then terminate here if (lando.engine.dockerInstalled === false || !fs.existsSync(getDockerDesktopBin())) return false; - // if we get here let's make sure the engine is on + // passive check: see if the daemon is already up without trying to start it try { - await lando.engine.daemon.up({max: 1, backoff: 1000}); - return true; + return await lando.engine.daemon.isUp(); } catch (error) { - lando.log.debug('docker install task has not run %j', error); + lando.log.debug('docker engine is not up %j', error); return false; } }, diff --git a/hooks/lando-setup-build-engine-linux.js b/hooks/lando-setup-build-engine-linux.js index 8086b0ddb..e6b3676a2 100644 --- a/hooks/lando-setup-build-engine-linux.js +++ b/hooks/lando-setup-build-engine-linux.js @@ -4,6 +4,8 @@ const axios = require('../utils/get-axios')(); const os = require('os'); const path = require('path'); +const getSetupEngine = require('../utils/get-setup-engine'); + const {color} = require('listr2'); const downloadDockerEngine = (url = 'https://get.docker.com', {debug, task}) => new Promise((resolve, reject) => { @@ -30,6 +32,8 @@ module.exports = async (lando, options) => { // @NOTE: this is mostly for internal stuff if (options.buildEngine === false) return; + if (getSetupEngine(lando, options) !== 'docker') return; + const version = options.buildEngine; const url = 'https://get.docker.com'; @@ -41,15 +45,13 @@ module.exports = async (lando, options) => { version: `Docker Engine ${version}`, hasRun: async () => { // start by looking at the engine install status - // @NOTE: is this always defined? if (lando.engine.dockerInstalled === false) return false; - // if we get here let's make sure the engine is on + // passive check: see if the daemon is already up without trying to start it try { - await lando.engine.daemon.up({max: 1, backoff: 1000}); - return true; + return await lando.engine.daemon.isUp(); } catch (error) { - lando.log.debug('docker install task has not run %j', error); + lando.log.debug('docker engine is not up %j', error); return false; } }, diff --git a/hooks/lando-setup-build-engine-win32.js b/hooks/lando-setup-build-engine-win32.js index 18f73078a..8dcd7a607 100644 --- a/hooks/lando-setup-build-engine-win32.js +++ b/hooks/lando-setup-build-engine-win32.js @@ -3,6 +3,7 @@ const axios = require('../utils/get-axios')(); const fs = require('fs'); const getDockerDesktopBin = require('../utils/get-docker-desktop-x'); +const getSetupEngine = require('../utils/get-setup-engine'); const os = require('os'); const path = require('path'); const semver = require('semver'); @@ -95,6 +96,8 @@ module.exports = async (lando, options) => { // @NOTE: this is mostly for internal stuff if (options.buildEngine === false) return; + if (getSetupEngine(lando, options) !== 'docker') return; + // get stuff from config/opts const build = getId(options.buildEngine); const version = getVersion(options.buildEngine); @@ -115,12 +118,11 @@ module.exports = async (lando, options) => { // if we are missing any files we can check then terminate here if (lando.engine.dockerInstalled === false || !fs.existsSync(getDockerDesktopBin())) return false; - // if we get here let's make sure the engine is on + // passive check: see if the daemon is already up without trying to start it try { - await lando.engine.daemon.up({max: 5, backoff: 1000}); - return true; + return await lando.engine.daemon.isUp(); } catch (error) { - lando.log.debug('docker install task has not run %j', error); + lando.log.debug('docker engine is not up %j', error); return false; } }, @@ -192,4 +194,3 @@ module.exports = async (lando, options) => { }, }); }; - diff --git a/hooks/lando-setup-build-engine-wsl.js b/hooks/lando-setup-build-engine-wsl.js index bdc85b59d..545147656 100644 --- a/hooks/lando-setup-build-engine-wsl.js +++ b/hooks/lando-setup-build-engine-wsl.js @@ -3,6 +3,7 @@ const axios = require('../utils/get-axios')(); const fs = require('fs'); const getDockerDesktopBin = require('../utils/get-docker-desktop-x'); +const getSetupEngine = require('../utils/get-setup-engine'); const getWinEnvar = require('../utils/get-win32-envvar-from-wsl'); const path = require('path'); const semver = require('semver'); @@ -97,6 +98,8 @@ module.exports = async (lando, options) => { // @NOTE: this is mostly for internal stuff if (options.buildEngine === false) return; + if (getSetupEngine(lando, options) !== 'docker') return; + // get stuff from config/opts const build = getId(options.buildEngine); const version = getVersion(options.buildEngine); @@ -117,12 +120,14 @@ module.exports = async (lando, options) => { // if we are missing the docker desktop executable then false if (!fs.existsSync(getDockerDesktopBin())) return false; - // if we get here let's make sure the engine is on + // WSL special case: docker binaries don't exist in the linux environment + // until Docker Desktop has actually started up on Windows, so we need to + // attempt a start here to determine if it's installed try { - await lando.engine.daemon.up({max: 3, backoff: 1000}); + await lando.engine.daemon.up({max: 1, backoff: 1000}); return true; } catch (error) { - lando.log.debug('docker install task has not run %j', error); + lando.log.debug('docker engine is not up %j', error); return false; } }, @@ -201,4 +206,3 @@ module.exports = async (lando, options) => { }, }); }; - diff --git a/hooks/lando-setup-containerd-engine-check.js b/hooks/lando-setup-containerd-engine-check.js new file mode 100644 index 000000000..7136c918e --- /dev/null +++ b/hooks/lando-setup-containerd-engine-check.js @@ -0,0 +1,50 @@ +"use strict"; + +const fs = require("fs"); +const os = require("os"); +const path = require("path"); + +/** + * Pre-flight check hook: warns if containerd engine binaries are missing. + * + * Runs on every Lando startup when `engine: containerd` is set. + * Binaries installed by `lando setup` live in two locations: + * - **System binaries** (`containerd`, `buildkitd`, `runc`): `/usr/local/lib/lando/bin/` + * (requires root to install, owned by the systemd service) + * - **User binaries** (`nerdctl`, `docker-compose`): `~/.lando/bin/` + * (installed per-user, no root required after setup) + * + * @param {Object} lando - The Lando app instance. + * @returns {void} + */ +module.exports = async (lando) => { + const engine = lando.config.engine || "auto"; + // Only check when engine is explicitly containerd + if (engine !== "containerd") return; + + const userConfRoot = lando.config.userConfRoot || path.join(os.homedir(), ".lando"); + const userBinDir = path.join(userConfRoot, "bin"); + const systemBinDir = lando.config.containerdSystemBinDir || "/usr/local/lib/lando/bin"; + + const composeVersion = lando.config.orchestratorVersion || '2.31.0'; + const missing = []; + const bins = { + containerd: lando.config.containerdBin || path.join(systemBinDir, 'containerd'), + buildkitd: lando.config.buildkitdBin || path.join(systemBinDir, 'buildkitd'), + runc: lando.config.runcBin || path.join(systemBinDir, 'runc'), + nerdctl: lando.config.nerdctlBin || path.join(userBinDir, 'nerdctl'), + 'docker-compose': lando.config.orchestratorBin + || path.join(userBinDir, `docker-compose-v${composeVersion}`), + }; + + for (const [name, binPath] of Object.entries(bins)) { + if (!fs.existsSync(binPath)) missing.push(name); + } + + if (missing.length > 0) { + lando.log.warn( + "containerd engine selected but missing binaries: %s. Run \"lando setup\" to install them.", + missing.join(", "), + ); + } +}; diff --git a/hooks/lando-setup-containerd-engine-darwin.js b/hooks/lando-setup-containerd-engine-darwin.js new file mode 100644 index 000000000..2fb54a417 --- /dev/null +++ b/hooks/lando-setup-containerd-engine-darwin.js @@ -0,0 +1,254 @@ +'use strict'; + +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const {execSync} = require('child_process'); + +const getSetupEngine = require('../utils/get-setup-engine'); + +const LIMA_VERSION = '1.0.6'; +const VM_NAME = 'lando'; + +/** + * Get the Lima download URL for the current platform. + * + * Format: lima--Darwin-.tar.gz + * where arch is arm64 (Apple Silicon) or x86_64 (Intel). + */ +const getLimaDownloadUrl = (version = LIMA_VERSION) => { + const arch = process.arch === 'arm64' ? 'aarch64' : 'x86_64'; + return `https://github.com/lima-vm/lima/releases/download/v${version}/lima-${version}-Darwin-${arch}.tar.gz`; +}; + +/** + * Check if limactl binary exists at common locations or in PATH. + */ +const findLimactl = (binDir) => { + // check lando bin dir first + const landoBin = path.join(binDir, 'limactl'); + if (fs.existsSync(landoBin)) return landoBin; + + // check common homebrew / system paths + const commonPaths = ['/opt/homebrew/bin/limactl', '/usr/local/bin/limactl']; + for (const p of commonPaths) { + if (fs.existsSync(p)) return p; + } + + // check PATH + try { + const result = execSync('which limactl', {stdio: 'pipe', encoding: 'utf-8'}).trim(); + if (result) return result; + } catch { + // not found + } + + return null; +}; + +/** + * Check if the Lima VM exists and is running. + */ +const isVMRunning = (limactlBin) => { + try { + const output = execSync(`"${limactlBin}" list ${VM_NAME} --json`, { + stdio: 'pipe', + encoding: 'utf-8', + }).trim(); + + if (!output) return false; + + // limactl list --json outputs NDJSON (one JSON object per line) + const lines = output.split('\n').filter(l => l.trim()); + for (const line of lines) { + try { + const vm = JSON.parse(line); + if (vm.name === VM_NAME && vm.status === 'Running') return true; + } catch { + // skip malformed lines + } + } + + return false; + } catch { + return false; + } +}; + +/** + * Check if the Lima VM exists (regardless of status). + */ +const vmExists = (limactlBin) => { + try { + const output = execSync(`"${limactlBin}" list ${VM_NAME} --json`, { + stdio: 'pipe', + encoding: 'utf-8', + }).trim(); + + if (!output) return false; + + const lines = output.split('\n').filter(l => l.trim()); + for (const line of lines) { + try { + const vm = JSON.parse(line); + if (vm.name === VM_NAME) return true; + } catch { + // skip + } + } + + return false; + } catch { + return false; + } +}; + +/** + * Wait for the VM to reach Running status. + */ +const waitForVM = async (limactlBin, {maxWait = 60000, interval = 2000, debug} = {}) => { + const start = Date.now(); + while (Date.now() - start < maxWait) { + if (isVMRunning(limactlBin)) return true; + debug('waiting for Lima VM "%s" to start...', VM_NAME); + await new Promise(resolve => setTimeout(resolve, interval)); + } + return false; +}; + +/** + * Download Lima tarball with progress reporting. + */ +const downloadLima = (url, {debug, dest, task}) => new Promise((resolve, reject) => { + const download = require('../utils/download-x')(url, {debug, dest}); + download.on('done', result => { + task.title = 'Downloaded Lima'; + resolve(result); + }); + download.on('error', error => reject(error)); + download.on('progress', progress => { + task.title = `Downloading Lima ${require('listr2').color.dim(`[${progress.percentage}%]`)}`; + }); +}); + +module.exports = async (lando, options) => { + const debug = require('../utils/debug-shim')(lando.log); + const {color} = require('listr2'); + const axios = require('../utils/get-axios')(); + + if (getSetupEngine(lando, options) !== 'containerd') return; + + const userConfRoot = lando.config.userConfRoot || path.join(os.homedir(), '.lando'); + const binDir = path.join(userConfRoot, 'bin'); + + const limactlDest = path.join(binDir, 'limactl'); + const url = getLimaDownloadUrl(LIMA_VERSION); + + // ========================================================================= + // TASK 1: Install Lima + // ========================================================================= + options.tasks.push({ + title: 'Installing Lima', + id: 'setup-lima', + description: '@lando/lima (container VM)', + version: `Lima v${LIMA_VERSION}`, + dependsOn: ['setup-nerdctl'], + hasRun: async () => { + return findLimactl(binDir) !== null; + }, + canRun: async () => { + // verify download URL is reachable + await axios.head(url); + return true; + }, + task: async (ctx, task) => { + // ensure bin dir exists + fs.mkdirSync(binDir, {recursive: true}); + + // download the tarball to a temp location + const tmpDir = path.join(os.tmpdir(), `lando-lima-${Date.now()}`); + fs.mkdirSync(tmpDir, {recursive: true}); + + const tarball = path.join(tmpDir, `lima-${LIMA_VERSION}.tar.gz`); + await downloadLima(url, {debug, dest: tarball, task}); + + // extract limactl from the tarball + task.title = `Extracting Lima ${color.dim('...')}`; + execSync(`tar -xzf "${tarball}" -C "${tmpDir}" bin/limactl`, {stdio: 'pipe'}); + + // move limactl to bin dir + const extracted = path.join(tmpDir, 'bin', 'limactl'); + fs.copyFileSync(extracted, limactlDest); + require('../utils/make-executable')(['limactl'], path.dirname(limactlDest)); + + // cleanup + fs.rmSync(tmpDir, {recursive: true, force: true}); + + task.title = `Installed Lima to ${limactlDest}`; + }, + }); + + // ========================================================================= + // TASK 2: Create and start Lima VM + // ========================================================================= + options.tasks.push({ + title: 'Creating Lando container VM', + id: 'setup-lima-vm', + description: '@lando/lima-vm (containerd VM)', + version: `Lima VM ${VM_NAME}`, + dependsOn: ['setup-lima'], + hasRun: async () => { + const bin = findLimactl(binDir); + if (!bin) return false; + return isVMRunning(bin); + }, + canRun: async () => { + const bin = findLimactl(binDir); + if (!bin) throw new Error('limactl not found — Lima must be installed first'); + return true; + }, + task: async (ctx, task) => { + const bin = findLimactl(binDir) || limactlDest; + + // check if VM already exists + const exists = vmExists(bin); + + if (!exists) { + // create the VM + task.title = `Creating Lima VM "${VM_NAME}" ${color.dim('(this may take a minute)')}`; + debug('creating Lima VM "%s"', VM_NAME); + + const runCommand = require('../utils/run-command'); + await runCommand(bin, [ + 'create', + `--name=${VM_NAME}`, + '--containerd=system', + '--cpus=4', + '--memory=4', + '--disk=60', + '--plain', + 'template:default', + ], {debug}); + } + + // start the VM if not already running + if (!isVMRunning(bin)) { + task.title = `Starting Lima VM "${VM_NAME}" ${color.dim('(this may take a minute)')}`; + debug('starting Lima VM "%s"', VM_NAME); + + const runCommand = require('../utils/run-command'); + await runCommand(bin, ['start', VM_NAME], {debug}); + } + + // wait for VM to be running + task.title = `Waiting for Lima VM "${VM_NAME}" to start ${color.dim('...')}`; + const running = await waitForVM(bin, {debug}); + + if (!running) { + throw new Error(`Lima VM "${VM_NAME}" did not reach Running status within 60 seconds`); + } + + task.title = `Lima VM "${VM_NAME}" is running`; + }, + }); +}; diff --git a/hooks/lando-setup-containerd-engine.js b/hooks/lando-setup-containerd-engine.js new file mode 100644 index 000000000..299035591 --- /dev/null +++ b/hooks/lando-setup-containerd-engine.js @@ -0,0 +1,609 @@ +"use strict"; + +const fs = require("fs"); +const os = require("os"); +const path = require("path"); +const getSetupEngine = require('../utils/get-setup-engine'); +const getBuildkitConfig = require('../utils/get-buildkit-config'); +const getContainerdPaths = require('../utils/get-containerd-paths'); +const getNerdctlConfig = require('../utils/get-nerdctl-config'); + +module.exports = async (lando, options) => { + const debug = require("../utils/debug-shim")(lando.log); + const {color} = require("listr2"); + const getUrl = require("../utils/get-containerd-download-url"); + const axios = require("../utils/get-axios")(); + + if (getSetupEngine(lando, options) !== 'containerd') return; + + const userConfRoot = lando.config.userConfRoot || path.join(os.homedir(), ".lando"); + const binDir = path.join(userConfRoot, "bin"); + const runDir = path.join(userConfRoot, "run"); + const configDir = path.join(userConfRoot, "config"); + const cniBinDir = lando.config.cniBinDir || "/usr/local/lib/lando/cni/bin"; + const cniConfDir = lando.config.cniNetconfPath || "/etc/lando/cni/finch"; + + // System-level binary directory for root-owned binaries + const systemBinDir = lando.config.containerdSystemBinDir || "/usr/local/lib/lando/bin"; + + // Socket path — sockets go in /run/lando/ (root-owned, group-accessible via systemd RuntimeDirectory) + const containerdPaths = getContainerdPaths(lando.config); + const socketPath = containerdPaths.containerdSocket; + + const ensurePassword = async (ctx, task, message) => { + if (ctx.password !== undefined || !lando.config.isInteractive) return; + + ctx.password = await task.prompt({ + type: "password", + name: "password", + message, + validate: async input => { + const opts = {debug, ignoreReturnCode: true, password: input}; + const response = await require("../utils/run-elevated")(["echo", "hello there"], opts); + if (response.code !== 0) return response.stderr; + return true; + }, + }); + }; + + // ========================================================================= + // Root-owned binaries: containerd, containerd-shim-runc-v2, runc, buildkitd, buildctl + // These get downloaded to temp, then `sudo cp` to /usr/local/lib/lando/bin/ + // ========================================================================= + + options.tasks.push({ + title: "Authorizing elevated access", + id: "setup-containerd-elevated-access", + description: "@lando/containerd authorization", + version: "elevated access", + hidden: true, + comments: { + "NOT INSTALLED": "Will prompt for sudo password before downloads", + }, + hasRun: async () => { + if (!lando.config.isInteractive) return true; + + const serviceFile = "/etc/systemd/system/lando-containerd.service"; + const shimBin = path.join(systemBinDir, "containerd-shim-runc-v2"); + const buildctlBin = path.join(systemBinDir, "buildctl"); + + return fs.existsSync(path.join(systemBinDir, "containerd")) + && fs.existsSync(shimBin) + && fs.existsSync(path.join(systemBinDir, "runc")) + && fs.existsSync(path.join(systemBinDir, "buildkitd")) + && fs.existsSync(buildctlBin) + && fs.existsSync(path.join(systemBinDir, "finch-daemon")) + && fs.existsSync(path.join(binDir, "nerdctl")) + && fs.existsSync(path.join(cniBinDir, "bridge")) + && fs.existsSync(serviceFile); + }, + canRun: async () => process.platform === "linux", + task: async (ctx, task) => { + await ensurePassword( + ctx, + task, + `Enter computer password for ${lando.config.username} to set up the containerd engine`, + ); + task.title = "Authorized elevated access"; + }, + }); + + // Binary definitions for root-owned binaries (installed to systemBinDir via sudo) + const rootBinaries = [ + { + name: "containerd", + id: "setup-containerd", + bin: lando.config.containerdBin || path.join(systemBinDir, "containerd"), + version: "2.0.4", + tarballEntries: ["bin/containerd", "bin/containerd-shim-runc-v2"], + }, + { + name: "buildkitd", + id: "setup-buildkitd", + bin: lando.config.buildkitdBin || path.join(systemBinDir, "buildkitd"), + version: "0.18.2", + tarballEntries: ["bin/buildkitd", "bin/buildctl"], + dependsOn: ["setup-containerd", "setup-containerd-elevated-access"], + }, + { + name: "finch-daemon", + id: "setup-finch-daemon", + bin: lando.config.finchDaemonBin || path.join(systemBinDir, "finch-daemon"), + version: "0.22.0", + tarballEntries: ["finch-daemon"], + dependsOn: ["setup-containerd"], + // finch-daemon uses a different URL pattern than containerd/nerdctl + customUrl: true, + }, + ]; + + // runc (direct binary download, also root-owned) + const runcVersion = "1.2.5"; + const runcArch = process.arch === "arm64" ? "arm64" : "amd64"; + const runcBin = path.join(systemBinDir, "runc"); + const runcUrl = `https://github.com/opencontainers/runc/releases/download/v${runcVersion}/runc.${runcArch}`; + + options.tasks.push({ + title: "Installing runc", + id: "setup-runc", + description: "@lando/runc (containerd engine)", + version: `runc v${runcVersion}`, + hasRun: async () => fs.existsSync(runcBin), + canRun: async () => { + await axios.head(runcUrl); + return true; + }, + dependsOn: ["setup-containerd"], + task: async (ctx, task) => { + // Download to temp location first + const tmpFile = path.join(os.tmpdir(), `lando-runc-${Date.now()}`); + + task.title = `Downloading runc...`; + const download = require("../utils/download-x")(runcUrl, {debug, dest: tmpFile}); + await new Promise((resolve, reject) => { + download.on("done", result => { + task.title = "Downloaded runc"; + resolve(result); + }); + download.on("error", error => reject(error)); + download.on("progress", progress => { + task.title = `Downloading runc ${color.dim(`[${progress.percentage}%]`)}`; + }); + }); + + await ensurePassword(ctx, task, `Enter computer password for ${lando.config.username} to install runc`); + + // sudo cp to system bin dir + task.title = "Installing runc to system..."; + await require("../utils/run-elevated")( + ["mkdir", "-p", systemBinDir], + {debug, password: ctx.password}, + ); + await require("../utils/run-elevated")( + ["cp", tmpFile, runcBin], + {debug, password: ctx.password}, + ); + await require("../utils/run-elevated")( + ["chmod", "755", runcBin], + {debug, password: ctx.password}, + ); + + // Cleanup temp + try { fs.unlinkSync(tmpFile); } catch { /* ignore */ } + + task.title = `Installed runc to ${runcBin}`; + }, + }); + + // Root-owned tarball binaries (containerd, buildkitd, finch-daemon) + for (const binary of rootBinaries) { + let url; + if (binary.customUrl && binary.name === "finch-daemon") { + const arch = process.arch === "arm64" ? "arm64" : "amd64"; + url = `https://github.com/runfinch/finch-daemon/releases/download/v${binary.version}/finch-daemon-${binary.version}-linux-${arch}.tar.gz`; + } else { + url = getUrl(binary.name === "buildkitd" ? "buildkit" : binary.name, {version: binary.version}); + } + + const task = { + title: `Installing ${binary.name}`, + id: binary.id, + description: `@lando/${binary.name} (containerd engine)`, + version: `${binary.name} v${binary.version}`, + hasRun: async () => fs.existsSync(binary.bin), + canRun: async () => { + await axios.head(url); + return true; + }, + task: async (ctx, task) => { + // Download the tarball to temp + const tmpDir = path.join(os.tmpdir(), `lando-${binary.name}-${Date.now()}`); + fs.mkdirSync(tmpDir, {recursive: true}); + + await new Promise((resolve, reject) => { + const download = require("../utils/download-x")(url, { + debug, + dest: path.join(tmpDir, `${binary.name}.tar.gz`), + }); + download.on("done", resolve); + download.on("error", reject); + download.on("progress", progress => { + task.title = `Downloading ${binary.name} ${color.dim(`[${progress.percentage}%]`)}`; + }); + }); + + // Extract binaries from the tarball to temp + task.title = `Extracting ${binary.name}...`; + const {execSync} = require("child_process"); + const entries = binary.tarballEntries || []; + execSync( + `tar -xzf "${path.join(tmpDir, binary.name + ".tar.gz")}" -C "${tmpDir}" ${entries.map(e => `"${e}"`).join(" ")}`, + {stdio: "pipe"}, + ); + + await ensurePassword(ctx, task, `Enter computer password for ${lando.config.username} to install ${binary.name}`); + + // sudo cp extracted files to system bin dir + task.title = `Installing ${binary.name} to system...`; + await require("../utils/run-elevated")( + ["mkdir", "-p", systemBinDir], + {debug, password: ctx.password}, + ); + + for (const entry of entries) { + const extracted = path.join(tmpDir, entry); + const destPath = path.join(systemBinDir, path.basename(entry)); + await require("../utils/run-elevated")( + ["cp", extracted, destPath], + {debug, password: ctx.password}, + ); + await require("../utils/run-elevated")( + ["chmod", "755", destPath], + {debug, password: ctx.password}, + ); + } + + // Cleanup temp + fs.rmSync(tmpDir, {recursive: true, force: true}); + + task.title = `Installed ${binary.name} to ${systemBinDir}`; + }, + }; + + task.dependsOn = [...(binary.dependsOn || []), "setup-containerd-elevated-access"]; + options.tasks.push(task); + } + + // ========================================================================= + // User-owned binary: nerdctl (only talks to socket, no root needed) + // Stays in ~/.lando/bin/ + // ========================================================================= + + const nerdctlVersion = "2.0.5"; + const nerdctlBin = lando.config.nerdctlBin || path.join(binDir, "nerdctl"); + const nerdctlUrl = getUrl("nerdctl", {version: nerdctlVersion}); + + const cniPluginsVersion = "1.6.2"; + const cniPluginsArch = process.arch === "arm64" ? "arm64" : "amd64"; + const cniPluginsUrl = `https://github.com/containernetworking/plugins/releases/download/v${cniPluginsVersion}/cni-plugins-linux-${cniPluginsArch}-v${cniPluginsVersion}.tgz`; + + options.tasks.push({ + title: "Installing CNI plugins", + id: "setup-cni-plugins", + description: "@lando/cni-plugins (containerd engine)", + version: `cni-plugins v${cniPluginsVersion}`, + hasRun: async () => fs.existsSync(path.join(cniBinDir, "bridge")), + canRun: async () => { + await axios.head(cniPluginsUrl); + return true; + }, + dependsOn: ["setup-containerd", "setup-containerd-elevated-access"], + task: async (ctx, task) => { + const tmpDir = path.join(os.tmpdir(), `lando-cni-plugins-${Date.now()}`); + fs.mkdirSync(tmpDir, {recursive: true}); + + await new Promise((resolve, reject) => { + const download = require("../utils/download-x")(cniPluginsUrl, { + debug, + dest: path.join(tmpDir, "cni-plugins.tgz"), + }); + download.on("done", resolve); + download.on("error", reject); + download.on("progress", progress => { + task.title = `Downloading CNI plugins ${color.dim(`[${progress.percentage}%]`)}`; + }); + }); + + task.title = "Extracting CNI plugins..."; + const {execSync} = require("child_process"); + execSync( + `tar -xzf "${path.join(tmpDir, "cni-plugins.tgz")}" -C "${tmpDir}"`, + {stdio: "pipe"}, + ); + + await ensurePassword(ctx, task, `Enter computer password for ${lando.config.username} to install CNI plugins`); + + task.title = "Installing CNI plugins to system..."; + await require("../utils/run-elevated")( + ["mkdir", "-p", cniBinDir], + {debug, password: ctx.password}, + ); + await require("../utils/run-elevated")( + ["bash", "-c", `for file in \"${tmpDir}\"/*; do [ -f \"$file\" ] && [ -x \"$file\" ] && cp \"$file\" \"${cniBinDir}\"/; done; chmod 755 \"${cniBinDir}\"/*`], + {debug, password: ctx.password}, + ); + + fs.rmSync(tmpDir, {recursive: true, force: true}); + task.title = `Installed CNI plugins to ${cniBinDir}`; + }, + }); + + options.tasks.push({ + title: "Installing nerdctl", + id: "setup-nerdctl", + description: "@lando/nerdctl (containerd engine)", + version: `nerdctl v${nerdctlVersion}`, + hasRun: async () => fs.existsSync(nerdctlBin), + canRun: async () => { + await axios.head(nerdctlUrl); + return true; + }, + dependsOn: ["setup-buildkitd"], + task: async (ctx, task) => { + // Download the tarball + const tmpDir = path.join(os.tmpdir(), `lando-nerdctl-${Date.now()}`); + fs.mkdirSync(tmpDir, {recursive: true}); + fs.mkdirSync(binDir, {recursive: true}); + + await new Promise((resolve, reject) => { + const download = require("../utils/download-x")(nerdctlUrl, { + debug, + dest: path.join(tmpDir, "nerdctl.tar.gz"), + }); + download.on("done", resolve); + download.on("error", reject); + download.on("progress", progress => { + task.title = `Downloading nerdctl ${color.dim(`[${progress.percentage}%]`)}`; + }); + }); + + // Extract only nerdctl (no rootless scripts needed for rootful mode) + task.title = "Extracting nerdctl..."; + const {execSync} = require("child_process"); + execSync( + `tar -xzf "${path.join(tmpDir, "nerdctl.tar.gz")}" -C "${tmpDir}" "nerdctl"`, + {stdio: "pipe"}, + ); + + // Copy to user bin dir + const extracted = path.join(tmpDir, "nerdctl"); + const destPath = path.join(binDir, "nerdctl"); + fs.copyFileSync(extracted, destPath); + require("../utils/make-executable")(["nerdctl"], binDir); + + // Cleanup temp + fs.rmSync(tmpDir, {recursive: true, force: true}); + + task.title = `Installed nerdctl to ${destPath}`; + }, + }); + + // ========================================================================= + // Systemd service configuration task + // Runs AFTER all binary installs are complete + // ========================================================================= + + options.tasks.push({ + title: "Configuring containerd service", + id: "setup-containerd-service", + description: "@lando/containerd-service (systemd)", + version: "containerd service v1.0.0", + dependsOn: ["setup-containerd", "setup-runc", "setup-buildkitd", "setup-finch-daemon", "setup-nerdctl", "setup-cni-plugins"], + hasRun: async () => { + // Check if the systemd service exists, is enabled, AND finch-daemon socket is present + try { + const {execSync} = require("child_process"); + const serviceFile = '/etc/systemd/system/lando-containerd.service'; + const result = execSync("systemctl is-enabled lando-containerd.service 2>/dev/null", { + stdio: "pipe", + encoding: "utf8", + }).trim(); + if (result !== "enabled") return false; + if (!fs.existsSync(serviceFile)) return false; + const serviceContents = fs.readFileSync(serviceFile, 'utf8'); + if (!serviceContents.includes('buildkitd --config')) return false; + if (!serviceContents.includes(containerdPaths.buildkitSocket)) return false; + if (!serviceContents.includes(cniBinDir)) return false; + // Ensure CNI directory has lando group write permissions — without this, + // ensureCniNetwork() hits EACCES at runtime. Also verify the service file + // includes the ExecStartPre fix so permissions are maintained across restarts. + if (!serviceContents.includes(`chgrp lando ${cniConfDir}`)) return false; + // Ensure the service pre-creates /run/containerd/s/ (shim socket directory fix) + if (!serviceContents.includes('/run/containerd/s')) return false; + // Ensure the service sets NERDCTL_TOML so OCI hooks find Lando's CNI config + // (without this, hooks deadlock on /etc/cni/net.d/.nerdctl.lock) + if (!serviceContents.includes('NERDCTL_TOML=')) return false; + // Ensure the service enables IP forwarding (required for container outbound internet) + if (!serviceContents.includes('net.ipv4.ip_forward=1')) return false; + // Ensure the service creates iptables FORWARD rules for Lando subnets + if (!serviceContents.includes('LANDO-FORWARD')) return false; + if (!fs.existsSync(path.join(cniBinDir, 'bridge'))) return false; + try { + const cniStats = fs.statSync(cniConfDir); + if ((cniStats.mode & 0o020) === 0) return false; + } catch { return false; } + if (!fs.existsSync("/run/lando/finch.sock") || !fs.existsSync("/run/lando/containerd.sock")) return false; + if (!fs.existsSync(path.join(configDir, "finch-daemon.toml"))) return false; + if (!fs.existsSync(path.join(configDir, "buildkitd.toml"))) return false; + // Ensure the containerd config uses /run/lando/containerd as state dir (shim socket fix) + try { + const ctrdConfig = fs.readFileSync(path.join(configDir, "containerd-config.toml"), 'utf8'); + if (!ctrdConfig.includes('state = "/run/lando/containerd"')) return false; + } catch { return false; } + return true; + } catch { + return false; + } + }, + canRun: async () => { + // Require Linux for systemd + if (process.platform !== "linux") return false; + return true; + }, + task: async (ctx, task) => { + await ensurePassword(ctx, task, `Enter computer password for ${lando.config.username} to configure containerd service`); + + const homeDir = os.homedir(); + const username = lando.config.username || os.userInfo().username; + const logDir = path.join(userConfRoot, 'logs'); + + // 1. Create lando group if it doesn't exist + task.title = "Creating lando group..."; + await require("../utils/run-elevated")( + ["bash", "-c", "getent group lando >/dev/null 2>&1 || groupadd lando"], + {debug, password: ctx.password}, + ); + + // 2. Add current user to lando group + task.title = `Adding ${username} to lando group...`; + await require("../utils/run-elevated")( + ["usermod", "-aG", "lando", username], + {debug, password: ctx.password}, + ); + + // 3. Write containerd config to ~/.lando/config/containerd-config.toml + task.title = "Writing containerd config..."; + fs.mkdirSync(configDir, {recursive: true}); + fs.mkdirSync(logDir, {recursive: true}); + const configPath = path.join(configDir, "containerd-config.toml"); + // State dir goes under /run/lando/ (tmpfs, created by systemd RuntimeDirectory=lando). + // This ensures shim bundles are cleaned up on reboot — preventing stale-bundle + // "get state: context deadline exceeded" errors. The persistent user-space dir + // (~/.lando/state/containerd) is no longer used for containerd state. + const stateDir = "/run/lando/containerd"; + const rootDir = path.join(userConfRoot, "data", "containerd"); + // rootDir is persistent (images, snapshots); stateDir is created at service + // start by containerd itself (it runs as root under RuntimeDirectory). + fs.mkdirSync(rootDir, {recursive: true}); + + const getContainerdConfig = require("../utils/get-containerd-config"); + const config = getContainerdConfig({ + socketPath, + stateDir, + rootDir, + debug: false, + }); + fs.writeFileSync(configPath, config, "utf8"); + + const buildkitConfigPath = path.join(configDir, 'buildkitd.toml'); + const nerdctlConfigPath = path.join(configDir, 'nerdctl.toml'); + const buildkitCacheDir = path.join(userConfRoot, 'cache', 'buildkit'); + fs.mkdirSync(buildkitCacheDir, {recursive: true}); + fs.writeFileSync(buildkitConfigPath, getBuildkitConfig({ + containerdSocket: socketPath, + buildkitSocket: containerdPaths.buildkitSocket, + cacheDir: buildkitCacheDir, + debug: false, + }), 'utf8'); + fs.writeFileSync(nerdctlConfigPath, getNerdctlConfig({containerdSocket: socketPath, cniPath: cniBinDir}), 'utf8'); + + // 4. Create finch-daemon config so it talks to Lando's isolated containerd socket + const finchConfigPath = path.join(configDir, 'finch-daemon.toml'); + fs.writeFileSync(finchConfigPath, getNerdctlConfig({containerdSocket: socketPath, cniPath: cniBinDir}), 'utf8'); + + // 5. Create systemd service file + task.title = "Creating systemd service..."; + const finchSocket = containerdPaths.finchSocket; + const finchCredSocket = containerdPaths.finchCredentialSocket; + const finchPidFile = path.join(runDir, 'finch-daemon.pid'); + const uid = process.getuid ? process.getuid() : 1000; + const serviceContent = [ + "[Unit]", + "Description=Lando Containerd", + "After=network.target", + "", + "[Service]", + "Type=simple", + "RuntimeDirectory=lando", + // Pre-create /run/containerd/s/ — containerd v2's shim socket directory is + // hardcoded to defaults.DefaultStateDir ("/run/containerd"). Shim socket + // filenames are unique per containerd instance (sha256 of address+ns+id), so + // sharing this directory with system containerd is safe. Without this mkdir + // the first container start fails with ENOENT for the shim socket. + `ExecStartPre=/bin/sh -c "mkdir -p /run/containerd/s ${cniConfDir} ${cniBinDir} 2>/dev/null || true; chgrp lando ${cniConfDir} 2>/dev/null || true; chmod g+w ${cniConfDir} 2>/dev/null || true"`, + // Enable IPv4 forwarding — required for container outbound internet access. + // The CNI bridge plugin with isGateway:true also sets this per-container, + // but doing it here ensures forwarding is enabled before any container starts + // and survives across container restarts without relying on the plugin chain. + 'ExecStartPre=/bin/sh -c "sysctl -w net.ipv4.ip_forward=1 >/dev/null 2>&1 || true"', + // Create iptables FORWARD rules for Lando's container subnets (10.4.0.0/16). + // The CNI firewall plugin manages per-container rules in CNI-FORWARD, but + // the host's default FORWARD policy may be DROP (common on Ubuntu/Debian). + // These rules ensure outbound traffic from containers and return traffic to + // containers is always accepted, regardless of the host firewall configuration. + // Uses a dedicated LANDO-FORWARD chain to avoid interfering with other rules. + 'ExecStartPre=/bin/sh -c "' + [ + 'iptables -N LANDO-FORWARD 2>/dev/null || true', + 'iptables -C FORWARD -j LANDO-FORWARD 2>/dev/null || iptables -I FORWARD 1 -j LANDO-FORWARD', + 'iptables -F LANDO-FORWARD', + 'iptables -A LANDO-FORWARD -s 10.4.0.0/16 -j ACCEPT', + 'iptables -A LANDO-FORWARD -d 10.4.0.0/16 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT', + 'iptables -A LANDO-FORWARD -j RETURN', + ].join('; ') + '"', + `Environment=PATH=${systemBinDir}:/usr/sbin:/usr/bin:/sbin:/bin`, + `Environment=CONTAINERD_ADDRESS=${socketPath}`, + // CRITICAL: NERDCTL_TOML tells nerdctl's OCI hooks where to find Lando's config. + // Without this, hooks run as root and read the default /etc/nerdctl/nerdctl.toml + // (which doesn't exist), falling back to /etc/cni/net.d/ for CNI — causing a + // self-deadlock on /etc/cni/net.d/.nerdctl.lock (flock on two FDs to the same + // file). With this env var, hooks read Lando's nerdctl.toml and use + // /etc/lando/cni/ for CNI configs, avoiding the system CNI directory entirely. + `Environment=NERDCTL_TOML=${nerdctlConfigPath}`, + // Belt-and-suspenders: set standard CNI env vars so CNI plugin libraries + // also resolve to Lando's paths even if nerdctl's config loading is bypassed. + `Environment=CNI_PATH=${cniBinDir}`, + `ExecStart=${systemBinDir}/containerd --config ${configPath}`, + `ExecStartPost=/bin/sh -c "while ! [ -S ${socketPath} ]; do sleep 0.1; done; chgrp lando ${socketPath}; chmod 660 ${socketPath}"`, + `ExecStartPost=/bin/sh -c "${systemBinDir}/buildkitd --config ${buildkitConfigPath} >/dev/null 2>>/run/lando/buildkitd.log &"`, + `ExecStartPost=/bin/sh -c "while ! [ -S ${containerdPaths.buildkitSocket} ]; do sleep 0.1; done; chgrp lando ${containerdPaths.buildkitSocket}; chmod 660 ${containerdPaths.buildkitSocket}"`, + `ExecStartPost=/bin/sh -c "PATH=${binDir}:${systemBinDir}:/usr/sbin:$$PATH ${systemBinDir}/finch-daemon --config-file ${finchConfigPath} --socket-addr ${finchSocket} --socket-owner ${uid} --pidfile ${finchPidFile} --credential-socket-addr ${finchCredSocket} --credential-socket-owner ${uid} &"`, + `ExecStartPost=/bin/sh -c "while ! [ -S ${finchSocket} ]; do sleep 0.1; done; chgrp lando ${finchSocket}; chmod 660 ${finchSocket}"`, + "Restart=always", + "RestartSec=5", + "", + "[Install]", + "WantedBy=multi-user.target", + "", + ].join("\n"); + + // Write service file to temp then sudo cp to /etc/systemd/system/ + const tmpServiceFile = path.join(os.tmpdir(), `lando-containerd-${Date.now()}.service`); + fs.writeFileSync(tmpServiceFile, serviceContent, "utf8"); + + await require("../utils/run-elevated")( + ["cp", tmpServiceFile, "/etc/systemd/system/lando-containerd.service"], + {debug, password: ctx.password}, + ); + try { fs.unlinkSync(tmpServiceFile); } catch { /* ignore */ } + + // 5. /run/lando/ is created automatically by systemd via RuntimeDirectory=lando + // Ensure ~/.lando/run/ still exists for PID files + fs.mkdirSync(runDir, {recursive: true}); + + // 6. Create CNI directories and set group-writable permissions for lando group + // Without this, ensureCniNetwork() hits EACCES when called from user-land + task.title = "Creating CNI directories..."; + await require("../utils/run-elevated")( + ["bash", "-c", `mkdir -p \"${cniConfDir}\" \"${cniBinDir}\"`], + {debug, password: ctx.password}, + ); + task.title = "Setting CNI directory permissions..."; + await require("../utils/run-elevated")( + ["chgrp", "lando", cniConfDir], + {debug, password: ctx.password}, + ); + await require("../utils/run-elevated")( + ["chmod", "g+w", cniConfDir], + {debug, password: ctx.password}, + ); + + // 7. Reload systemd, enable and start the service + task.title = "Enabling and starting containerd service..."; + await require("../utils/run-elevated")( + ["systemctl", "daemon-reload"], + {debug, password: ctx.password}, + ); + await require("../utils/run-elevated")( + ["systemctl", "enable", "lando-containerd.service"], + {debug, password: ctx.password}, + ); + // Use restart (not start) in case the service was already running with old config + await require("../utils/run-elevated")( + ["systemctl", "restart", "lando-containerd.service"], + {debug, password: ctx.password}, + ); + + task.title = "Configured containerd service (lando-containerd.service)"; + }, + }); +}; diff --git a/hooks/lando-setup-engine-select.js b/hooks/lando-setup-engine-select.js new file mode 100644 index 000000000..d2abbb37a --- /dev/null +++ b/hooks/lando-setup-engine-select.js @@ -0,0 +1,78 @@ +"use strict"; + +const fs = require("fs"); +const os = require("os"); +const path = require("path"); + +const getSetupEngine = require('../utils/get-setup-engine'); + +module.exports = async (lando, options) => { + const debug = require("../utils/debug-shim")(lando.log); + + options.tasks.push({ + title: "Selecting container engine", + id: "setup-engine-select", + description: "@lando/engine-select", + version: "engine selection", + hasRun: async () => { + return getSetupEngine(lando, options) !== 'auto'; + }, + canRun: async () => true, + task: async (ctx, task) => { + const engine = lando.config.engine || "auto"; + if (engine !== "auto") { + options.engine = engine; + task.title = `Container engine: ${engine}`; + return; + } + + const cached = lando.cache.get('engine-selection'); + if (cached === 'docker' || cached === 'containerd') { + options.engine = cached; + task.title = `Container engine: ${cached}`; + debug('engine selection from cache: %s', cached); + return; + } + + let selection = "docker"; + + // Non-interactive: auto-detect + if (!process.stdin.isTTY || options.yes) { + // Check if Docker is installed and working + const dockerBin = lando.config.dockerBin || require("../utils/get-docker-x")(); + if (fs.existsSync(dockerBin)) { + selection = "docker"; + debug("auto-selected docker engine (Docker binary found)"); + } else { + // Check if containerd binaries exist + const binDir = path.join(lando.config.userConfRoot || path.join(os.homedir(), ".lando"), "bin"); + const containerdBin = lando.config.containerdBin || path.join(binDir, "containerd"); + if (fs.existsSync(containerdBin)) { + selection = "containerd"; + debug("auto-selected containerd engine (no Docker, containerd found)"); + } else { + selection = "docker"; + debug("auto-selected docker engine (default)"); + } + } + } else { + // Interactive: prompt user + selection = await task.prompt({ + type: "select", + message: "Which container engine would you like to use?", + choices: [ + {name: "Docker (recommended — wider compatibility)", value: "docker"}, + {name: "containerd (experimental — no Docker dependency)", value: "containerd"}, + ], + initial: 0, + }); + } + + options.engine = selection; + lando.config.engine = selection; + lando.cache.set("engine-selection", selection, {persist: true}); + task.title = `Container engine: ${selection}`; + debug("engine selection: %s", selection); + }, + }); +}; diff --git a/hooks/lando-setup-landonet.js b/hooks/lando-setup-landonet.js index b882e0fc4..9aa767f3a 100644 --- a/hooks/lando-setup-landonet.js +++ b/hooks/lando-setup-landonet.js @@ -3,6 +3,7 @@ const _ = require('lodash'); const fs = require('fs'); const getDockerDesktopBin = require('../utils/get-docker-desktop-x'); +const getSetupEngine = require('../utils/get-setup-engine'); /** * Installs the Lando Development Certificate Authority (CA) on Windows systems. @@ -19,8 +20,12 @@ module.exports = async (lando, options) => { if (options.skipNetworking) return; // we need access to dat socket for this to work - const dependsOn = ['linux', 'wsl'] - .includes(lando.config.os.landoPlatform) ? ['setup-build-engine-group', 'setup-build-engine'] : ['setup-build-engine']; + const isContainerd = getSetupEngine(lando, options) === 'containerd'; + const dependsOn = isContainerd + ? ['setup-containerd-service'] + : ['linux', 'wsl'].includes(lando.config.os.landoPlatform) + ? ['setup-build-engine-group', 'setup-build-engine'] + : ['setup-build-engine']; options.tasks.push({ title: `Creating Landonet`, @@ -36,14 +41,15 @@ module.exports = async (lando, options) => { }, hasRun: async () => { // if docker isnt even installed then this is easy - if (lando.engine.dockerInstalled === false) return false; + if (!isContainerd && lando.engine.dockerInstalled === false) return false; // we also want to do an additional check on docker-destkop if (lando.config.os.landoPlatform !== 'linux' && !fs.existsSync(getDockerDesktopBin())) return false; - // otherwise attempt to sus things out + // passive check: see if the daemon is already up without trying to start it try { - await lando.engine.daemon.up({max: 10, backoff: 1000}); + const isUp = await lando.engine.daemon.isUp(); + if (!isUp) return false; const landonet = lando.engine.getNetwork(lando.config.networkBridge); await landonet.inspect(); return lando.versions.networking > 1; @@ -53,9 +59,9 @@ module.exports = async (lando, options) => { } }, task: async (ctx, task) => { - // we reinstantiate instead of using lando.engine.daemon so we can ensure an up-to-date docker bin - const LandoDaemon = require('../lib/daemon'); - const daemon = new LandoDaemon(lando.cache, lando.events, undefined, lando.log); + const daemon = isContainerd + ? lando.engine.daemon + : new (require('../lib/daemon'))(lando.cache, lando.events, undefined, lando.log); // we need docker up for this await daemon.up({max: 5, backoff: 1000}); diff --git a/index.js b/index.js index 090bed015..011481efe 100644 --- a/index.js +++ b/index.js @@ -17,6 +17,16 @@ const defaults = { appLabels: { 'io.lando.container': 'TRUE', }, + engine: 'auto', + containerdBin: null, + nerdctlBin: null, + buildkitdBin: null, + containerdSocket: null, + supportedContainerdVersions: { + containerd: {min: '2.0.0', max: '3.0.0', link: 'https://github.com/containerd/containerd/releases'}, + nerdctl: {min: '2.0.0', max: '3.0.0', link: 'https://github.com/containerd/nerdctl/releases'}, + buildkit: {min: '0.17.0', max: '1.0.0', link: 'https://github.com/moby/buildkit/releases'}, + }, proxy: 'ON', proxyName: 'landoproxyhyperion5000gandalfedition', proxyCache: 'proxyCache', @@ -83,6 +93,9 @@ module.exports = async lando => { // move v3 scripts directories as needed lando.events.on('pre-setup', 0, async () => await require('./hooks/lando-copy-v3-scripts')(lando)); + // Engine selection (before any engine-specific setup) + lando.events.once('pre-setup', async options => await require('./hooks/lando-setup-engine-select')(lando, options)); + // ensure we setup docker if needed lando.events.once('pre-setup', async options => await require(`./hooks/lando-setup-build-engine-${platform}`)(lando, options)); @@ -97,6 +110,17 @@ module.exports = async lando => { // ensure we setup docker-compose if needed lando.events.once('pre-setup', async options => await require('./hooks/lando-setup-orchestrator')(lando, options)); + // ensure we setup containerd engine if needed + lando.events.once('pre-setup', async options => await require('./hooks/lando-setup-containerd-engine')(lando, options)); + + // ensure we setup lima for containerd on macOS + if (platform === 'darwin') { + lando.events.once('pre-setup', async options => await require('./hooks/lando-setup-containerd-engine-darwin')(lando, options)); + } + + // ensure we check containerd engine status + lando.events.once('pre-engine-autostart', async () => await require('./hooks/lando-setup-containerd-engine-check')(lando)); + // ensure we setup landonet lando.events.once('pre-setup', async options => await require('./hooks/lando-setup-landonet')(lando, options)); @@ -124,6 +148,7 @@ module.exports = async lando => { // run engine compat checks lando.events.on('almost-ready', 2, async () => await require('./hooks/lando-get-compat')(lando)); + lando.events.on('almost-ready', 2, async () => await require('./hooks/lando-get-containerd-compat')(lando)); // throw error if engine is not available lando.events.once('pre-engine-autostart', async () => await require('./hooks/lando-setup-check')(lando)); diff --git a/lib/app.js b/lib/app.js index f1dc77a76..cdf350503 100644 --- a/lib/app.js +++ b/lib/app.js @@ -104,7 +104,9 @@ module.exports = class App { */ this.log = new Log(_.merge({}, lando.config, {logName: this.name})); this.shell = new Shell(this.log); - this.engine = require('../utils/setup-engine')( + // Use the engine from lando (created by BackendManager) when available, + // falling back to setup-engine for legacy compatibility + this.engine = lando.engine || require('../utils/setup-engine')( lando.config, lando.cache, lando.events, diff --git a/lib/backend-manager.js b/lib/backend-manager.js new file mode 100644 index 000000000..f1789bbf8 --- /dev/null +++ b/lib/backend-manager.js @@ -0,0 +1,341 @@ +'use strict'; + +const fs = require('fs'); +const os = require('os'); +const path = require('path'); + + +/** + * BackendManager — Factory that creates the right Engine based on config. + * + * This is designed as a **drop-in replacement** for `utils/setup-engine.js`. + * Instead of always creating a Docker-backed Engine, it inspects `config.engine` + * to choose the appropriate backend: + * + * - `"docker"` — Uses DockerDaemon, DockerContainer, DockerCompose (identical to setup-engine.js) + * - `"containerd"` — Uses ContainerdDaemon, ContainerdContainer, docker-compose via finch-daemon + * - `"auto"` (default) — Auto-detects: prefers containerd if binaries exist, falls back to Docker + * + * ## Usage + * + * ```js + * const BackendManager = require('./backend-manager'); + * const manager = new BackendManager(config, cache, events, log, shell); + * const engine = manager.createEngine('lando'); + * ``` + * + * This produces the same `Engine` instance that `setup-engine.js` returns, + * making it a transparent swap. + * + * @since 4.0.0 + */ +class BackendManager { + /** + * Create a BackendManager. + * + * @param {Object} config - The full Lando config object. + * @param {Object} cache - A Lando Cache instance. + * @param {Object} events - A Lando Events instance. + * @param {Object} log - A Lando Log instance. + * @param {Object} shell - A Lando Shell instance. + */ + constructor(config, cache, events, log, shell) { + this.config = config; + this.cache = cache; + this.events = events; + this.log = log; + this.shell = shell; + this.debug = require('../utils/debug-shim')(log); + } + + /** + * Create an Engine with the appropriate backend. + * + * Reads `this.config.engine` to determine which backend to use. + * Returns a fully wired `Engine` instance ready for use by `lando.engine`. + * + * @param {string} [id='lando'] - The Lando instance identifier. + * @return {Engine} A configured Engine instance. + */ + createEngine(id = 'lando') { + const engineType = this.config.engine || 'auto'; + + switch (engineType) { + case 'containerd': + return this._createContainerdEngine(id); + case 'docker': + return this._createDockerEngine(id); + case 'auto': + default: + return this._createAutoEngine(id); + } + } + + /** + * Create a Docker-backed Engine. + * + * This replicates the exact logic from `utils/setup-engine.js`: + * - Instantiates LandoDaemon with the same constructor args + * - Instantiates Landerode with engineConfig + * - Creates a compose function that delegates to `lib/compose.js` via `shell.sh()` + * - Returns `new Engine(daemon, docker, compose, config)` + * + * @param {string} id - The Lando instance identifier. + * @return {Engine} A Docker-backed Engine instance. + * @private + */ + _createDockerEngine(id) { + const Engine = require('./engine'); + const Landerode = require('./docker'); + const LandoDaemon = require('./daemon'); + const dockerCompose = require('./compose'); + + const {orchestratorBin, orchestratorVersion, dockerBin, engineConfig} = this.config; + + const docker = new Landerode(engineConfig, id); + const daemon = new LandoDaemon( + this.cache, + this.events, + dockerBin, + this.log, + this.config.process, + orchestratorBin, + orchestratorVersion, + this.config.userConfRoot, + ); + + const compose = (cmd, datum) => { + const run = dockerCompose[cmd](datum.compose, datum.project, datum.opts || {}); + return this.shell.sh([orchestratorBin].concat(run.cmd), run.opts); + }; + + this.debug('created docker engine backend'); + return new Engine(daemon, docker, compose, this.config); + } + + /** + * Create a containerd-backed Engine. + * + * Uses ContainerdDaemon and ContainerdContainer from `lib/backends/containerd/` + * to wire up an Engine that talks to Lando's own isolated containerd + buildkitd + * stack. + * + * Compose operations use docker-compose (the same binary as the Docker path) + * pointed at finch-daemon's Docker-compatible socket via DOCKER_HOST. This avoids + * nerdctl's rootless-vs-rootful issues entirely: + * + * docker-compose ---> DOCKER_HOST=unix://~/.lando/run/finch.sock ---> finch-daemon ---> containerd + * + * The compose function follows the same `(cmd, datum) => Promise` signature + * as the Docker path: it calls `compose[cmd](...)` from `lib/compose.js` to get a + * `{cmd, opts}` shell descriptor, then executes via `shell.sh([orchestratorBin, ...cmd], opts)`. + * + * @param {string} id - The Lando instance identifier. + * @return {Engine} A containerd-backed Engine instance. + * @private + */ + _createContainerdEngine(id) { + const Engine = require('./engine'); + const {ContainerdDaemon, ContainerdContainer} = require('./backends/containerd'); + const dockerCompose = require('./compose'); + + const userConfRoot = this.config.userConfRoot || path.join(os.homedir(), '.lando'); + const systemBinDir = this.config.containerdSystemBinDir || '/usr/local/lib/lando/bin'; + + // Resolve binary paths — config overrides take precedence, then standard locations + const containerdBin = this.config.containerdBin || path.join(systemBinDir, 'containerd'); + const nerdctlBin = this.config.nerdctlBin || path.join(userConfRoot, 'bin', 'nerdctl'); + const buildkitdBin = this.config.buildkitdBin || path.join(systemBinDir, 'buildkitd'); + const socketPath = this.config.containerdSocket || '/run/lando/containerd.sock'; + const buildkitSocket = this.config.buildkitSocket || '/run/lando/buildkitd.sock'; + // Use docker-compose pointed at finch-daemon's Docker-compatible API via DOCKER_HOST. + // Resolve the compose binary here (config may not have it yet at construction time). + const composeVersion = this.config.orchestratorVersion || '2.31.0'; + const orchestratorBin = this.config.orchestratorBin + || path.join(userConfRoot, 'bin', `docker-compose-v${composeVersion}`); + + // Create the daemon backend + const daemon = new ContainerdDaemon({ + userConfRoot, + containerdBin, + buildkitdBin, + systemBinDir, + nerdctlBin, + socketPath, + events: this.events, + cache: this.cache, + log: this.log, + }); + + // Set daemon.compose to the orchestrator binary path so that + // Engine.composeInstalled (which checks fs.existsSync(config.orchestratorBin)) + // and any code that reads daemon.compose both resolve correctly. + daemon.compose = orchestratorBin; + + // Get the finch-daemon socket path — used by ContainerdContainer via Dockerode + const finchSocket = daemon.finchDaemon.getSocketPath(); + + // Create the container backend — this becomes engine.docker. + // Engine stores it as `this.docker` (no Docker-specific handling) and router.js + // calls the same ContainerBackend interface methods (list, scan, isRunning, remove, + // stop) on it, so ContainerdContainer is a transparent drop-in for Landerode here. + // + // ContainerdContainer uses Dockerode pointed at finch-daemon's Docker-compatible + // socket instead of shelling out to nerdctl. This avoids nerdctl's rootless-vs-rootful + // issues ("rootless containerd not running"). + const rawDocker = new ContainerdContainer({ + finchSocket, + id, + debug: this.debug, + }); + + // Wrap ContainerdContainer methods to return Bluebird promises. + // Lando's router.js uses Bluebird methods (.each, .tap, .map) on the + // return values from docker.list(), docker.isRunning(), etc. + const Promise = require('./promise'); + const docker = new Proxy(rawDocker, { + get(target, prop) { + const value = target[prop]; + if (typeof value === 'function') { + return (...args) => { + const result = value.apply(target, args); + // Wrap Promise-like returns in Bluebird + if (result && typeof result.then === 'function') { + return Promise.resolve(result); + } + return result; + }; + } + return value; + }, + }); + + // Use the same compose.js as the Docker path, but route through + // finch-daemon's Docker-compatible socket via DOCKER_HOST. + const ensureComposeCniNetworks = require('../utils/ensure-compose-cni-networks'); + const removeStaleComposeNetworks = require('../utils/remove-stale-compose-networks'); + /** @type {Object} Shared env vars for all compose invocations */ + const composeEnv = { + ...process.env, + DOCKER_HOST: `unix://${finchSocket}`, + DOCKER_BUILDKIT: '1', + BUILDKIT_HOST: `unix://${buildkitSocket}`, + }; + + const removeComposeCniConflists = require('../utils/remove-compose-cni-conflists'); + const compose = async (cmd, datum) => { + if (cmd === 'start') { + // Two-phase start for containerd backend: + // + // finch-daemon has two interacting issues: + // 1. It auto-creates Docker API networks (without compose labels) when + // it discovers CNI conflist files. docker-compose v2 then rejects + // these: "network was not created by compose". + // 2. When creating a network, it writes CNI conflist files that include + // the portmap plugin. The portmap plugin fails on HostPort:0 (random + // port), which Docker handles via port allocation before container start. + // + // Fix: remove conflist files and stale networks so docker-compose can + // create networks fresh (with proper labels), then overwrite the + // conflist files to remove portmap before starting containers. + + // Phase 0: Remove CNI conflist files and Docker API networks so + // docker-compose sees a clean slate. The conflist files cause + // finch-daemon to report ghost networks that lack compose labels. + removeComposeCniConflists(datum.compose, datum.project, {debug: this.debug}); + await removeStaleComposeNetworks(rawDocker.dockerode, datum.project, this.debug); + + // Phase 1: Create networks, volumes, and containers without starting. + // docker-compose creates networks via finch-daemon (which writes + // conflist files WITH compose labels but also WITH portmap). + this.debug('containerd compose phase 1: creating networks and containers (--no-start)'); + const createOpts = {...(datum.opts || {}), noStart: true, background: false}; + const createRun = dockerCompose['start'](datum.compose, datum.project, createOpts); + await this.shell.sh([orchestratorBin].concat(createRun.cmd), { + ...(createRun.opts || {}), + env: {...composeEnv, ...(createRun.opts?.env || {})}, + }); + + // Phase 2: Overwrite conflist files to remove portmap plugin. + // finch-daemon wrote conflists with portmap during Phase 1. + // Our ensureCniNetwork overwrites them with the correct plugin chain + // (bridge → firewall → tuning, no portmap). The compose labels and + // subnet/bridge config written by finch-daemon are replaced with our + // standard config, but that's fine — the OCI hook only needs the + // plugin chain and network name/subnet. + this.debug('containerd compose phase 2: overwriting conflist files (removing portmap)'); + ensureComposeCniNetworks(datum.compose, datum.project, {debug: this.debug}); + + // Phase 3: Start the created containers. OCI hooks now read our + // conflist (without portmap) instead of finch-daemon's. + this.debug('containerd compose phase 3: starting containers'); + const startRun = dockerCompose[cmd](datum.compose, datum.project, datum.opts || {}); + return this.shell.sh([orchestratorBin].concat(startRun.cmd), { + ...(startRun.opts || {}), + env: {...composeEnv, ...(startRun.opts?.env || {})}, + }); + } + + const run = dockerCompose[cmd](datum.compose, datum.project, datum.opts || {}); + return this.shell.sh([orchestratorBin].concat(run.cmd), { + ...(run.opts || {}), + env: {...composeEnv, ...(run.opts?.env || {})}, + }); + }; + + this.debug('created containerd engine backend'); + return new Engine(daemon, docker, compose, this.config); + } + + /** + * Auto-detect the best available engine backend. + * + * Detection order: + * 1. Check if containerd binaries exist at `~/.lando/bin/containerd` (or config override paths). + * 2. If all three binaries (containerd, nerdctl, buildkitd) exist, use containerd. + * 3. Otherwise, fall back to Docker. + * + * Logs which engine was selected. + * + * @param {string} id - The Lando instance identifier. + * @return {Engine} An Engine instance using the auto-detected backend. + * @private + */ + _createAutoEngine(id) { + const userConfRoot = this.config.userConfRoot || path.join(os.homedir(), '.lando'); + + // Resolve binary paths — config overrides take precedence + const systemBinDir = this.config.containerdSystemBinDir || '/usr/local/lib/lando/bin'; + const containerdBin = this.config.containerdBin || path.join(systemBinDir, 'containerd'); + const nerdctlBin = this.config.nerdctlBin || path.join(userConfRoot, 'bin', 'nerdctl'); + const buildkitdBin = this.config.buildkitdBin || path.join(systemBinDir, 'buildkitd'); + + // Check if all containerd binaries exist + const hasContainerd = fs.existsSync(containerdBin); + const hasNerdctl = fs.existsSync(nerdctlBin); + const hasBuildkitd = fs.existsSync(buildkitdBin); + + if (hasContainerd && hasNerdctl && hasBuildkitd) { + this.debug('auto-detected containerd engine (all binaries found at %s)', path.join(userConfRoot, 'bin')); + return this._createContainerdEngine(id); + } + + // Log what was missing if some but not all binaries were found + if (hasContainerd || hasNerdctl || hasBuildkitd) { + const missing = []; + if (!hasContainerd) missing.push('containerd'); + if (!hasNerdctl) missing.push('nerdctl'); + if (!hasBuildkitd) missing.push('buildkitd'); + this.debug( + 'containerd binaries partially found (missing: %s), falling back to docker', + missing.join(', '), + ); + } else { + this.debug('no containerd binaries found, using docker engine'); + } + + return this._createDockerEngine(id); + } +} + +module.exports = BackendManager; diff --git a/lib/backends/containerd/containerd-container.js b/lib/backends/containerd/containerd-container.js new file mode 100644 index 000000000..dc3575aa5 --- /dev/null +++ b/lib/backends/containerd/containerd-container.js @@ -0,0 +1,486 @@ +'use strict'; + +const _ = require('lodash'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const Docker = require('dockerode'); + +const {ContainerBackend} = require('../engine-backend'); + +const toLandoContainer = require('../../../utils/to-lando-container'); +const dockerComposify = require('../../../utils/docker-composify'); +const getContainerdPaths = require('../../../utils/get-containerd-paths'); +const runCommand = require('../../../utils/run-command'); + +/** + * Helper to determine if any file exists in an array of files. + * + * @param {Array} files - Array of file paths to check. + * @return {boolean} + * @private + */ +const srcExists = (files = []) => _.reduce(files, (exists, file) => fs.existsSync(file) || exists, false); + +/** + * Containerd implementation of the ContainerBackend interface. + * + * Uses Dockerode pointed at the finch-daemon socket to provide all low-level + * container and network operations. finch-daemon provides Docker API v1.43 + * compatibility backed by containerd, which is the same approach used for + * compose (docker-compose + finch-daemon). + * + * This replaces the previous nerdctl-based implementation which failed when + * running as non-root with rootful containerd ("rootless containerd not running"). + * + * @extends ContainerBackend + * @since 4.0.0 + */ +class ContainerdContainer extends ContainerBackend { + /** + * Create a ContainerdContainer backend. + * + * @param {Object} [opts={}] - Configuration options. + * @param {string} [opts.finchSocket] - Path to the finch-daemon Docker-compatible socket. + * @param {string} [opts.id='lando'] - Lando instance identifier for filtering containers. + * @param {Function} [opts.debug] - Debug/logging function. + */ + constructor(opts = {}) { + super(); + + const userConfRoot = opts.userConfRoot ?? path.join(os.homedir(), '.lando'); + const paths = getContainerdPaths({userConfRoot, ...opts}); + + /** @type {string} Path to the finch-daemon socket. */ + this.finchSocket = opts.finchSocket ?? paths.finchSocket; + + /** @type {string} Path to the nerdctl binary. */ + this.nerdctlBin = opts.nerdctlBin ?? path.join(userConfRoot, 'bin', 'nerdctl'); + + /** @type {string} Path to the rootful containerd socket. */ + this.containerdSocket = opts.containerdSocket ?? paths.containerdSocket; + + /** @type {string} containerd namespace. */ + this.containerdNamespace = opts.containerdNamespace ?? 'default'; + + /** @type {string} Lando instance identifier. */ + this.id = opts.id ?? 'lando'; + + /** @type {Function} Debug/logging function. */ + this.debug = opts.debug ?? require('../../../utils/debug-shim')(new (require('../../logger'))()); + + /** @type {Docker} Dockerode instance connected to finch-daemon. */ + this.dockerode = new Docker({socketPath: this.finchSocket}); + } + + // ========================================================================= + // Private helpers + // ========================================================================= + + /** + * Check whether an error represents a "not found" condition. + * + * Covers the various phrasings from Docker API and nerdctl: "no such container", + * "No such container", "no such object", "not found". + * + * @param {Error} err - The error to inspect. + * @return {boolean} `true` if the error indicates a missing resource. + * @private + */ + _isNotFoundError(err) { + const msg = err && err.message ? err.message.toLowerCase() : ''; + return msg.includes('no such container') + || msg.includes('no such object') + || msg.includes('no such network') + || msg.includes('not found'); + } + + async _nerdctl(args = [], opts = {}) { + const env = Object.assign({}, process.env, { + CONTAINERD_ADDRESS: this.containerdSocket, + CONTAINERD_NAMESPACE: this.containerdNamespace, + }, opts.env || {}); + + const result = await runCommand(this.nerdctlBin, [ + '--address', this.containerdSocket, + '--namespace', this.containerdNamespace, + ...args, + ], {debug: this.debug, env}); + + return result.stdout; + } + + // ========================================================================= + // ContainerBackend interface + // ========================================================================= + + /** + * Create a container network. + * + * Creates a network with the Lando container label. Unlike the Docker + * backend, we do NOT use `Internal: true` because containerd bridge + * networks need outbound access and inter-container communication + * works on bridge networks. + * + * @param {string} name - The name of the network to create. + * @param {Object} [opts={}] - Additional network creation options. + * @return {Promise} Network inspect data. + */ + async createNet(name, opts = {}) { + const labels = {'io.lando.container': 'TRUE'}; + + // Merge any extra labels from opts + if (opts.Labels) { + Object.assign(labels, opts.Labels); + } + + await this.dockerode.createNetwork({ + Name: name, + Labels: labels, + Attachable: true, + }); + + return this.getNetwork(name).inspect(); + } + + /** + * Inspect a container and return its full metadata. + * + * Equivalent to `docker inspect `. The Docker API (via finch-daemon) + * returns Docker-compatible JSON. + * + * @param {string} cid - A container identifier (hash, name, or short id). + * @return {Promise} Container inspect data. + * @throws {Error} If the container does not exist. + */ + async scan(cid) { + for (const id of _.uniq([cid, _.isString(cid) ? cid.replace(/_/g, '-') : cid])) { + try { + return await this.dockerode.getContainer(id).inspect(); + } catch (err) { + if (!this._isNotFoundError(err)) throw err; + } + } + + throw new Error(`no such container: ${cid}`); + } + + /** + * Determine whether a container is currently running. + * + * Returns `false` (not throw) if the container does not exist, + * to prevent race conditions when containers are removed between checks. + * + * @param {string} cid - A container identifier. + * @return {Promise} + */ + async isRunning(cid) { + try { + const data = await this.scan(cid); + return _.get(data, 'State.Running', false); + } catch (err) { + if (this._isNotFoundError(err)) return false; + throw err; + } + } + + /** + * List Lando-managed containers. + * + * Replicates the full filtering pipeline from {@link Landerode#list}: + * 1. List all containers via Dockerode's `listContainers({all: true})`. + * 2. Filter out containers with invalid status (e.g. "Removal In Progress"). + * 3. Map through `to-lando-container`. + * 4. Filter to Lando containers (`lando === true`, `instance === this.id`). + * 5. Remove orphaned app containers whose compose source files no longer exist. + * 6. Filter by project/app name if specified. + * 7. Filter by additional `key=value` filter pairs. + * 8. Retry if any container has been up for less than a second. + * 9. Add `running` status flag. + * + * @param {Object} [options={}] - Listing options. + * @param {boolean} [options.all=false] - Include stopped containers. + * @param {string} [options.app] - Filter to a specific app name. + * @param {string} [options.project] - Filter to a specific project name. + * @param {Array} [options.filter] - Additional `key=value` filters. + * @param {string} [separator='_'] - Container name separator. + * @param {number} [_retryCount=0] - Internal retry counter to prevent unbounded recursion. + * @return {Promise>} Array of Lando container descriptors. + */ + async list(options = {}, separator = '_', _retryCount = 0) { + // Get raw container list from Dockerode (Docker API format) + let rawContainers; + try { + rawContainers = await this.dockerode.listContainers({all: true}); + } catch (err) { + // If the API fails (e.g. finch-daemon not running), return empty list + this.debug('listContainers failed: %s', err.message); + return []; + } + + if (!rawContainers || rawContainers.length === 0) return []; + + // Filter out nulls/undefined and invalid statuses + let containers = rawContainers + .filter(_.identity) + .filter(data => (data.Status || '') !== 'Removal In Progress'); + + // Map to Lando containers — Dockerode returns Docker API format which + // toLandoContainer already handles (Labels as object, Id, Status) + containers = containers + .map(container => toLandoContainer(container, separator)); + + // Filter to only Lando containers + containers = containers.filter(data => data.lando === true); + + // Filter to this instance + containers = containers.filter(data => data.instance === this.id); + + // Remove orphaned app containers whose compose source files no longer exist + const cleaned = []; + for (const container of containers) { + if (!srcExists(container.src) && container.kind === 'app') { + try { + await this.remove(container.id, {force: true}); + } catch { + // Ignore removal errors for orphaned containers + } + continue; + } + cleaned.push(container); + } + containers = cleaned; + + // Filter by app/project name + if (options.project) { + containers = _.filter(containers, c => c.app === options.project); + } else if (options.app) { + containers = _.filter(containers, c => c.app === dockerComposify(options.app)); + } + + // Apply additional key=value filters + if (!_.isEmpty(options.filter)) { + containers = _.filter( + containers, + _.fromPairs(_.map(options.filter, filter => filter.split('='))), + ); + } + + // If any container has been up for only a brief moment, retry + // (matches Landerode behavior to avoid transient states) + if (_.find(containers, container => container.status === 'Up Less than a second')) { + if (_retryCount < 10) { + return this.list(options, separator, _retryCount + 1); + } + this.debug('list retry limit reached, proceeding with transient container states'); + } + + // Add running status flag + containers = containers.map(container => { + container.running = container + && typeof container.status === 'string' + && !container.status.includes('Exited'); + return container; + }); + + return containers; + } + + /** + * Remove (delete) a container. + * + * @param {string} cid - A container identifier. + * @param {Object} [opts={v: true, force: false}] - Removal options. + * @param {boolean} [opts.v=true] - Also remove associated anonymous volumes. + * @param {boolean} [opts.force=false] - Force-remove a running container. + * @return {Promise} + */ + async remove(cid, opts = {v: true, force: false}) { + try { + await this.dockerode.getContainer(cid).remove({ + v: opts.v !== false, + force: !!opts.force, + }); + } catch (err) { + // Gracefully handle "no such container" — it's already gone + if (this._isNotFoundError(err)) { + this.debug('container %s already removed, ignoring', cid); + return; + } + throw err; + } + } + + /** + * Stop a running container. + * + * @param {string} cid - A container identifier. + * @param {Object} [opts={}] - Stop options (e.g. `{t: 10}` for timeout in seconds). + * @return {Promise} + */ + async stop(cid, opts = {}) { + try { + await this.dockerode.getContainer(cid).stop(opts); + } catch (err) { + // Gracefully handle "no such container" — it's already gone + if (this._isNotFoundError(err)) { + this.debug('container %s already stopped/removed, ignoring', cid); + return; + } + throw err; + } + } + + /** + * Get a network handle by its id or name. + * + * Returns a lightweight proxy object with `inspect()`, `remove()`, + * `connect()`, and `disconnect()` methods that delegate to Dockerode, + * matching the Dockerode Network handle interface. + * + * @param {string} id - The network id or name. + * @return {Object} A network handle with `inspect()`, `remove()`, `connect()`, and `disconnect()` methods. + */ + getNetwork(id) { + return { + /** @type {string} The network id or name. */ + id, + + /** + * Inspect the network and return its metadata. + * @return {Promise} Network inspect data. + */ + inspect: async () => { + return this.dockerode.getNetwork(id).inspect(); + }, + + /** + * Remove the network. + * @return {Promise} + */ + remove: async () => { + try { + await this.dockerode.getNetwork(id).remove(); + } catch (err) { + if (this._isNotFoundError(err)) { + this.debug('network %s already removed, ignoring', id); + return; + } + throw err; + } + }, + + /** + * Connect a container to this network. + * + * Matches the Dockerode `Network.connect()` interface used by + * `hooks/app-add-2-landonet.js`. + * + * @param {Object} [connectOpts={}] - Connection options. + * @param {string} connectOpts.Container - The container id or name to connect. + * @param {Object} [connectOpts.EndpointConfig] - Endpoint configuration. + * @param {Array} [connectOpts.EndpointConfig.Aliases] - DNS aliases for the container. + * @return {Promise} + */ + connect: (connectOpts = {}) => { + if (!connectOpts.Container) throw new Error('Container is required for network connect'); + return this.dockerode.getNetwork(id).connect(connectOpts); + }, + + /** + * Disconnect a container from this network. + * + * Matches the Dockerode `Network.disconnect()` interface used by + * `hooks/app-add-2-landonet.js`. Silently ignores "not connected" + * errors to match Docker behavior. + * + * @param {Object} [disconnectOpts={}] - Disconnection options. + * @param {string} disconnectOpts.Container - The container id or name to disconnect. + * @param {boolean} [disconnectOpts.Force=false] - Force disconnection. + * @return {Promise} + */ + disconnect: async (disconnectOpts = {}) => { + if (!disconnectOpts.Container) throw new Error('Container is required for network disconnect'); + try { + await this.dockerode.getNetwork(id).disconnect(disconnectOpts); + } catch (err) { + // Match Docker behavior: ignore "not connected" errors + if (err.message && err.message.includes('is not connected')) { + return; + } + throw err; + } + }, + }; + } + + /** + * List networks matching the given filter options. + * + * @param {Object} [opts={}] - Filter options. + * @param {Object} [opts.filters] - Filters object (e.g. `{name: ['mynet']}` or `{id: ['abc']}`). + * @return {Promise>} Array of network objects. + */ + async listNetworks(opts = {}) { + try { + let networks = await this.dockerode.listNetworks(); + + const filters = opts.filters || {}; + if (filters.name) networks = networks.filter(network => filters.name.some(name => (network.Name || '').includes(name))); + if (filters.id) networks = networks.filter(network => filters.id.some(id => (network.ID || network.Id || '').startsWith(id))); + if (filters.label) { + networks = networks.filter(network => { + const labels = network.Labels || {}; + return filters.label.some(label => { + if (_.isString(labels)) return labels.includes(label); + return Object.entries(labels).some(([key, value]) => `${key}=${value}` === label); + }); + }); + } + + return networks; + } catch (err) { + this.debug('listNetworks failed: %s', err.message); + return []; + } + } + + /** + * Get a container handle by its id or name. + * + * Returns a lightweight proxy object with `inspect()`, `remove()`, and + * `stop()` methods that delegate to Dockerode, matching the Dockerode + * Container handle interface. + * + * @param {string} cid - The container id or name. + * @return {Object} A container handle with `inspect()`, `remove()`, and `stop()` methods. + */ + getContainer(cid) { + return { + /** @type {string} The container id or name. */ + id: cid, + + /** + * Inspect the container and return its metadata. + * @return {Promise} Container inspect data. + */ + inspect: () => this.dockerode.getContainer(cid).inspect(), + + /** + * Remove the container. + * @param {Object} [opts] - Removal options. + * @return {Promise} + */ + remove: opts => this.dockerode.getContainer(cid).remove(opts), + + /** + * Stop the container. + * @param {Object} [opts] - Stop options. + * @return {Promise} + */ + stop: opts => this.dockerode.getContainer(cid).stop(opts), + }; + } +} + +module.exports = ContainerdContainer; diff --git a/lib/backends/containerd/containerd-daemon.js b/lib/backends/containerd/containerd-daemon.js new file mode 100644 index 000000000..c81cb1e81 --- /dev/null +++ b/lib/backends/containerd/containerd-daemon.js @@ -0,0 +1,561 @@ +'use strict'; + +const {DaemonBackend} = require('../engine-backend'); + +const fs = require('fs'); +const os = require('os'); +const path = require('path'); + +const perfTimer = require('../../../utils/perf-timer'); +const LimaManager = require('./lima-manager'); +const WslHelper = require('./wsl-helper'); +const FinchDaemonManager = require('./finch-daemon-manager'); + +const Cache = require('../../cache'); +const Events = require('../../events'); +const Log = require('../../logger'); +const Promise = require('../../promise'); + +/** + * Containerd implementation of the DaemonBackend interface. + * + * Manages Lando's **own isolated** containerd + buildkitd + finch-daemon stack. + * On Linux/WSL, all root-level operations (starting daemons, creating sockets, + * managing CNI, etc.) are handled by the `lando-containerd.service` systemd + * unit installed during `lando setup`. At runtime, a normal user in the `lando` + * group simply verifies the service is active and the sockets are responsive — + * no sudo or elevated privileges are needed. + * + * | Path | Purpose | + * |-----------------------------------|-------------------------------| + * | `/run/lando/containerd.sock` | containerd gRPC socket | + * | `/run/lando/buildkitd.sock` | buildkitd gRPC socket | + * | `/run/lando/finch.sock` | finch-daemon Docker API sock | + * | `~/.lando/config/` | containerd/buildkit configs | + * | `/run/lando/containerd/` | containerd state directory | + * | `~/.lando/data/containerd/` | containerd root (images, etc) | + * + * Platform notes: + * - **Linux/WSL**: systemd service owns all daemons; user just talks to sockets. + * - **macOS (darwin)**: runs inside a Lima VM with containerd enabled. + * - **Windows (win32, non-WSL)**: **not yet implemented**. + * + * @extends DaemonBackend + * @since 4.0.0 + */ +class ContainerdDaemon extends DaemonBackend { + /** + * Create a ContainerdDaemon backend. + * + * @param {Object} [opts={}] - Configuration options. + * @param {string} [opts.userConfRoot] - Base directory (default `~/.lando`). + * @param {string} [opts.platform] - Override platform detection. + * @param {string} [opts.containerdBin] - Path to containerd binary. + * @param {string} [opts.buildkitdBin] - Path to buildkitd binary. + * @param {string} [opts.nerdctlBin] - Path to nerdctl binary. + * @param {string} [opts.socketPath] - containerd gRPC socket path. + * @param {string} [opts.buildkitSocket] - buildkitd gRPC socket path. + * @param {string} [opts.stateDir] - containerd state directory. + * @param {Object} [opts.events] - A Lando Events instance. + * @param {Object} [opts.cache] - A Lando Cache instance. + * @param {Object} [opts.log] - A Lando Log instance. + */ + constructor(opts = {}) { + super(); + + // Ensure /usr/sbin and /sbin are in PATH for CNI plugins (iptables) and containerd shims + if (process.platform === 'linux' && process.env.PATH && !process.env.PATH.includes('/usr/sbin')) { + process.env.PATH = `/usr/sbin:/sbin:${process.env.PATH}`; + } + + const userConfRoot = opts.userConfRoot ?? path.join(os.homedir(), '.lando'); + + /** @type {string} */ + this.platform = opts.platform ?? process.landoPlatform ?? process.platform; + + /** @type {boolean} */ + this.isRunning = false; + + /** @type {Object} */ + this.events = opts.events ?? new Events(); + + /** @type {Object} */ + this.cache = opts.cache ?? new Cache(); + + /** @type {Object} */ + this.log = opts.log ?? new Log(); + + /** @type {Function} */ + this.debug = require('../../../utils/debug-shim')(this.log); + + /** @type {boolean} Whether to emit debug-level logging in the containerd config. */ + this.debugMode = opts.debug === true; + + // Binary paths + // containerd/buildkitd live in the system-wide Lando bin dir (installed by setup hook) + const systemBinDir = opts.systemBinDir ?? '/usr/local/lib/lando/bin'; + // User-local binaries (nerdctl) stay under ~/.lando/bin + const binDir = path.join(userConfRoot, 'bin'); + + /** @type {string} Path to the containerd binary (system-wide). */ + this.containerdBin = opts.containerdBin ?? path.join(systemBinDir, 'containerd'); + + /** @type {string} Path to the buildkitd binary. */ + this.buildkitdBin = opts.buildkitdBin ?? path.join(systemBinDir, 'buildkitd'); + + /** @type {string} Path to the buildctl binary (alongside buildkitd). */ + this.buildctlBin = path.join(path.dirname(this.buildkitdBin), 'buildctl'); + + /** @type {string} Path to the nerdctl binary (used as the "docker" equivalent). */ + this.nerdctlBin = opts.nerdctlBin ?? path.join(binDir, 'nerdctl'); + + // Socket paths — sockets go in /run/lando/ (root-owned, group-accessible via systemd) + const socketDir = '/run/lando'; + + /** @type {string} containerd gRPC socket. */ + this.socketPath = opts.socketPath ?? path.join(socketDir, 'containerd.sock'); + + /** @type {string} buildkitd gRPC socket. */ + this.buildkitSocket = opts.buildkitSocket ?? path.join(socketDir, 'buildkitd.sock'); + + // Directories + /** @type {string} Log directory for daemon stderr output. */ + this.logDir = path.join(userConfRoot, 'logs'); + + /** @type {string} containerd --state directory (under /run/, ephemeral). */ + this.stateDir = opts.stateDir ?? '/run/lando/containerd'; + + /** @type {string} containerd --root directory (images, snapshots, etc). */ + this.rootDir = path.join(userConfRoot, 'data', 'containerd'); + + // DaemonBackend interface properties + /** + * @type {string|false} + * NOTE: this.compose holds the nerdctl binary path, NOT a docker-compose + * compatible binary. nerdctl requires the subcommand `nerdctl compose ...` + * rather than being invoked directly as `docker-compose`. Set to false + * until a proper NerdctlComposeBackend is implemented. + */ + this.compose = false; + + /** @type {string|false} Path to nerdctl (analogous to docker CLI). */ + this.docker = this.nerdctlBin; + + /** @type {string} Path to containerd binary (used by Engine to check containerd availability). */ + this.containerd = this.containerdBin; + + /** @type {string} Path to nerdctl binary. */ + this.nerdctl = this.nerdctlBin; + + // Config paths (written by setup, read at runtime) + this.configDir = path.join(userConfRoot, 'config'); + this.configPath = path.join(this.configDir, 'containerd-config.toml'); + this.buildkitConfigPath = path.join(this.configDir, 'buildkit-config.toml'); + + // Lima VM manager for macOS containerd support + /** @type {LimaManager|null} */ + this.lima = null; + if (this.platform === 'darwin') { + this.lima = new LimaManager({ + limactl: opts.limactl ?? 'limactl', + vmName: opts.limaVmName ?? 'lando', + cpus: opts.limaCpus ?? 4, + memory: opts.limaMemory ?? 4, + disk: opts.limaDisk ?? 60, + debug: this.debug, + }); + } + + // WSL2 support + /** @type {WslHelper|null} */ + this.wslHelper = null; + if (WslHelper.isWsl()) { + this.wslHelper = new WslHelper({debug: this.debug, userConfRoot}); + } + + // Finch daemon for Docker API compatibility (Traefik proxy) + this.finchDaemon = new FinchDaemonManager({ + finchDaemonBin: opts.finchDaemonBin || path.join(binDir, 'finch-daemon'), + containerdSocket: this.socketPath, + socketPath: opts.finchDaemonSocket || path.join(socketDir, 'finch.sock'), + logDir: this.logDir, + debug: this.debug, + }); + } + + /** + * Verify that the lando-containerd systemd service is active and all + * sockets are responsive. No daemons are spawned — the systemd service + * owns all of that. + * + * @param {boolean|Object} [retry=true] - Retry configuration. + * @returns {Promise} + */ + async up(retry = true) { + // Normalize retry opts (same pattern as Docker daemon) + if (retry === true) retry = {max: 25, backoff: 1000}; + else if (retry === false) retry = {max: 0}; + + // Platform guard + this._assertPlatformSupported(); + + // Short-circuit: if the containerd binary doesn't exist, there's nothing to start + if (this.platform !== 'darwin' && !fs.existsSync(this.containerdBin)) { + throw new Error(`containerd binary not found at ${this.containerdBin}, skipping start`); + } + + await this.events.emit('pre-engine-up'); + + // macOS: delegate to Lima VM + if (this.platform === 'darwin' && this.lima) { + const limaStarter = async () => { + try { + await this.lima.createVM(); + await this.lima.startVM(); + this.socketPath = this.lima.getSocketPath(); + this.debug('containerd engine started via Lima VM, socket at %s', this.socketPath); + return Promise.resolve(); + } catch (error) { + this.debug('could not start containerd via Lima with %o', error?.message); + return Promise.reject(error); + } + }; + + await Promise.retry(limaStarter, retry); + this.isRunning = true; + await this.events.emit('post-engine-up'); + return; + } + + // Ensure user-level directories exist + this._ensureDirectories(); + + // Verify systemd service is active and sockets are responsive + const starter = async () => { + const isUp = await this.isUp(); + if (isUp) return Promise.resolve(); + + const upTimer = this.debugMode ? perfTimer('containerd-engine-up') : null; + + try { + // Check that the systemd service is active + const runCommand = require('../../../utils/run-command'); + try { + await runCommand('systemctl', ['is-active', '--quiet', 'lando-containerd.service'], { + debug: this.debug, + }); + } catch { + throw new Error( + 'lando-containerd.service is not active. ' + + 'Run "lando setup" to install and start the containerd engine service.', + ); + } + + // Verify all three sockets exist and are responsive + await this._waitForSocket(this.socketPath, 'containerd', 10); + await this._healthCheck(); + await this._waitForSocket(this.buildkitSocket, 'buildkitd', 10); + await this._waitForSocket(this.finchDaemon.getSocketPath(), 'finch-daemon', 10); + + if (upTimer) this.debug('%s completed in %.1fms', upTimer.label, upTimer.stop()); + this.debug('containerd engine started successfully'); + return Promise.resolve(); + } catch (error) { + if (upTimer) this.debug('%s failed after %.1fms', upTimer.label, upTimer.stop()); + this.debug('could not start containerd engine with %o', error?.message); + return Promise.reject(error); + } + }; + + await Promise.retry(starter, retry); + + this.isRunning = true; + + await this.events.emit('post-engine-up'); + } + + /** + * Shut down the containerd engine from Lando's perspective. + * + * On Linux/WSL the systemd service keeps running for fast restart — + * we just emit events and update state. On macOS the Lima VM is stopped. + * + * @returns {Promise} + */ + async down() { + await this.events.emit('pre-engine-down'); + + // macOS: stop the Lima VM + if (this.platform === 'darwin' && this.lima) { + try { + await this.lima.stopVM(); + this.debug('Lima VM stopped'); + } catch (error) { + this.debug('error stopping Lima VM: %s', error.message); + } + this.isRunning = false; + await this.events.emit('post-engine-down'); + return; + } + + // Windows without VM support is a no-op for now + if (this.platform === 'win32') { + await this.events.emit('post-engine-down'); + return; + } + + // Linux/WSL: systemd service keeps running — just update state + this.isRunning = false; + + await this.events.emit('post-engine-down'); + } + + /** + * Check whether the containerd engine is currently running and reachable. + * + * Uses a short-lived TTL cache (5 seconds) to avoid repeated subprocess + * spawns, matching the Docker daemon pattern. + * + * @param {Object} [cache] - A Lando Cache instance (defaults to `this.cache`). + * @param {string} [docker] - Path to nerdctl binary (defaults to `this.nerdctlBin`). + * @returns {Promise} + */ + async isUp(cache, docker) { + cache = cache ?? this.cache; + docker = docker ?? this.nerdctlBin; + + // Return cached result if fresh + if (cache.get('containerd-engineup') === true) return Promise.resolve(true); + + // macOS: check if the Lima VM is running and the socket exists + if (this.platform === 'darwin' && this.lima) { + try { + const running = await this.lima.isRunning(); + if (!running) { + this.debug('containerd is down: Lima VM "%s" is not running', this.lima.vmName); + return Promise.resolve(false); + } + + const socketPath = this.lima.getSocketPath(); + if (!fs.existsSync(socketPath)) { + this.debug('containerd is down: Lima socket does not exist at %s', socketPath); + return Promise.resolve(false); + } + + this.debug('containerd engine is up via Lima VM.'); + cache.set('containerd-engineup', true, {ttl: 5}); + this.isRunning = true; + this.socketPath = socketPath; + return Promise.resolve(true); + } catch (error) { + this.debug('containerd engine (Lima) is down with error %s', error.message); + return Promise.resolve(false); + } + } + + // Check finch-daemon socket (Docker API compat layer) + const finchSocket = this.finchDaemon ? this.finchDaemon.getSocketPath() : '/run/lando/finch.sock'; + if (!fs.existsSync(finchSocket)) { + this.debug('containerd is down: finch socket does not exist at %s', finchSocket); + return Promise.resolve(false); + } + + if (!fs.existsSync(this.socketPath)) { + this.debug('containerd is down: containerd socket does not exist at %s', this.socketPath); + return Promise.resolve(false); + } + + if (!fs.existsSync(this.buildkitSocket)) { + this.debug('containerd is down: buildkit socket does not exist at %s', this.buildkitSocket); + return Promise.resolve(false); + } + + // Health check via Dockerode against finch-daemon socket + try { + const Docker = require('dockerode'); + const dockerode = new Docker({socketPath: finchSocket}); + await dockerode.ping(); + this.debug('containerd engine is up (via finch-daemon).'); + cache.set('containerd-engineup', true, {ttl: 5}); + this.isRunning = true; + return Promise.resolve(true); + } catch (error) { + this.debug('containerd engine is down with error %s', error.message); + return Promise.resolve(false); + } + } + + /** + * Retrieve version information for containerd, buildkit, and nerdctl. + * + * @returns {Promise<{containerd: string, buildkit: string, nerdctl: string}>} + */ + async getVersions() { + const versions = {containerd: false, buildkit: false, nerdctl: false}; + const runCommand = require('../../../utils/run-command'); + + // containerd --version → "containerd containerd.io x.y.z ..." + try { + const {stdout} = await runCommand(this.containerdBin, ['--version'], { + debug: this.debug, + ignoreReturnCode: true, + }); + const match = stdout.toString().match(/\d+\.\d+\.\d+/); + if (match) versions.containerd = match[0]; + } catch { + this.debug('could not determine containerd version'); + } + + // buildkitd --version → "buildkitd github.com/moby/buildkit x.y.z ..." + try { + const {stdout} = await runCommand(this.buildkitdBin, ['--version'], { + debug: this.debug, + ignoreReturnCode: true, + }); + const match = stdout.toString().match(/\d+\.\d+\.\d+/); + if (match) versions.buildkit = match[0]; + } catch { + this.debug('could not determine buildkitd version'); + } + + // nerdctl --version → "nerdctl version x.y.z" + try { + const {stdout} = await runCommand(this.nerdctlBin, ['--version'], { + debug: this.debug, + ignoreReturnCode: true, + }); + const match = stdout.toString().match(/\d+\.\d+\.\d+/); + if (match) versions.nerdctl = match[0]; + } catch { + this.debug('could not determine nerdctl version'); + } + + return versions; + } + + /** + * Prune the BuildKit build cache. + * + * Runs `buildctl prune --all` to remove all cached build layers. This is + * useful for reclaiming disk space when caches grow too large. + * + * @returns {Promise} + */ + async pruneBuildCache() { + const {execSync} = require('child_process'); + try { + execSync(`"${this.buildctlBin}" prune --all`, { + stdio: 'pipe', + env: {...process.env, BUILDKIT_HOST: 'unix://' + this.buildkitSocket}, + }); + this.debug('build cache pruned'); + } catch (err) { + this.debug('failed to prune build cache: %s', err.message); + } + } + + // ========================================================================= + // Private helpers + + /** + * Assert that the current platform is supported. + * + * @throws {Error} If on bare Windows (non-WSL). + * @private + */ + _assertPlatformSupported() { + if (this.platform === 'win32') { + throw new Error( + 'containerd engine on Windows (non-WSL) is not yet implemented. ' + + 'Please use WSL2 or the Docker backend on Windows for now.', + ); + } + } + + /** + * Create required user-level directories if they don't exist. + * + * NOTE: `stateDir` (`/run/lando/containerd`) is intentionally excluded — it + * lives under `/run/lando/` which is root-owned (created by systemd + * `RuntimeDirectory=lando`). The containerd daemon itself creates the state + * subdirectory when it starts as root. User-land code must never attempt to + * mkdir inside `/run/lando/`. + * + * @private + */ + _ensureDirectories() { + for (const dir of [this.rootDir, this.logDir, this.configDir]) { + fs.mkdirSync(dir, {recursive: true}); + } + } + + /** + * Check whether this environment uses the systemd-managed service. + * + * @returns {boolean} + * @private + */ + _usesSystemdService() { + return ['linux', 'wsl'].includes(this.platform) && fs.existsSync('/etc/systemd/system/lando-containerd.service'); + } + + /** + * Wait for a Unix socket to appear on disk. + * + * Polls for socket file existence. Actual daemon liveness is verified + * separately by `_healthCheck()` (Dockerode ping against finch-daemon). + * + * @param {string} socketPath - Path to the socket file. + * @param {string} label - Human-readable name for debug logging. + * @param {number} [maxAttempts=10] - Maximum poll attempts. + * @returns {Promise} + * @private + */ + async _waitForSocket(socketPath, label, maxAttempts = 10) { + const delay = ms => new Promise(resolve => setTimeout(resolve, ms)); + + for (let i = 0; i < maxAttempts; i++) { + if (fs.existsSync(socketPath)) { + this.debug('%s socket ready at %s', label, socketPath); + return; + } + this.debug('waiting for %s socket (attempt %d/%d)...', label, i + 1, maxAttempts); + await delay(500); + } + + throw new Error(`${label} socket did not appear at ${socketPath} after ${maxAttempts} attempts`); + } + + /** + * Run a quick health check via Dockerode against finch-daemon to verify + * the engine is responsive. + * + * @returns {Promise} + * @private + */ + async _healthCheck() { + const finchSocket = this.finchDaemon ? this.finchDaemon.getSocketPath() : '/run/lando/finch.sock'; + const Docker = require('dockerode'); + const dockerode = new Docker({socketPath: finchSocket}); + await dockerode.ping(); + } + + /** + * Remove a file if it exists (used for cleanup). + * + * @param {string} filePath - Path to the file to remove. + * @private + */ + _cleanupFile(filePath) { + try { + if (fs.existsSync(filePath)) { + fs.unlinkSync(filePath); + this.debug('cleaned up %s', filePath); + } + } catch (error) { + this.debug('failed to clean up %s: %s', filePath, error.message); + } + } +} + +module.exports = ContainerdDaemon; diff --git a/lib/backends/containerd/finch-daemon-manager.js b/lib/backends/containerd/finch-daemon-manager.js new file mode 100644 index 000000000..d9f8b91fc --- /dev/null +++ b/lib/backends/containerd/finch-daemon-manager.js @@ -0,0 +1,168 @@ +'use strict'; + +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const {spawn} = require('child_process'); + +const getContainerdPaths = require('../../../utils/get-containerd-paths'); +const getFinchDaemonConfig = require('../../../utils/get-nerdctl-config'); + +class FinchDaemonManager { + constructor(opts = {}) { + const userConfRoot = opts.userConfRoot || path.join(os.homedir(), '.lando'); + const paths = getContainerdPaths({userConfRoot, ...opts}); + + this.finchDaemonBin = opts.finchDaemonBin || path.join(userConfRoot, 'bin', 'finch-daemon'); + this.containerdSocket = opts.containerdSocket || paths.containerdSocket; + this.socketPath = opts.socketPath || paths.finchSocket; + this.credentialSocketPath = opts.credentialSocketPath || paths.finchCredentialSocket; + this.pidFile = path.join(userConfRoot, 'run', 'finch-daemon.pid'); + this.logDir = opts.logDir || path.join(userConfRoot, 'logs'); + this.configDir = opts.configDir || path.join(userConfRoot, 'config'); + this.configPath = opts.configPath || path.join(this.configDir, 'finch-daemon.toml'); + this.namespace = opts.namespace || 'default'; + this.cniNetconfPath = opts.cniNetconfPath || '/etc/lando/cni/finch'; + this.cniPath = opts.cniPath || '/usr/local/lib/lando/cni/bin'; + this.debug = opts.debug || require('../../../utils/debug-shim')(opts.log); + } + + async start() { + if (this._isProcessRunning()) { + this.debug('finch-daemon already running'); + return; + } + + fs.mkdirSync(path.dirname(this.socketPath), {recursive: true}); + fs.mkdirSync(path.dirname(this.pidFile), {recursive: true}); + fs.mkdirSync(this.logDir, {recursive: true}); + fs.mkdirSync(this.configDir, {recursive: true}); + + fs.writeFileSync(this.configPath, getFinchDaemonConfig({ + containerdSocket: this.containerdSocket, + namespace: this.namespace, + cniNetconfPath: this.cniNetconfPath, + cniPath: this.cniPath, + }), 'utf8'); + + // Clean up stale socket + if (fs.existsSync(this.socketPath)) { + fs.unlinkSync(this.socketPath); + } + if (fs.existsSync(this.credentialSocketPath)) { + fs.unlinkSync(this.credentialSocketPath); + } + + const args = this.getStartArgs(); + + this.debug('starting finch-daemon: %s %o', this.finchDaemonBin, args); + + const logFile = path.join(this.logDir, 'finch-daemon.log'); + const stderrFd = fs.openSync(logFile, 'a'); + const child = spawn(this.finchDaemonBin, args, { + detached: true, + stdio: ['ignore', 'ignore', stderrFd], + }); + child.unref(); + + if (child.pid) this.debug('finch-daemon spawned with pid %d', child.pid); + + fs.closeSync(stderrFd); + } + + async stop() { + if (!fs.existsSync(this.pidFile)) return; + + const pid = parseInt(fs.readFileSync(this.pidFile, 'utf8').trim(), 10); + if (isNaN(pid)) { + this._cleanup(); + return; + } + + try { + process.kill(pid, 0); + } catch { + this._cleanup(); + return; + } + + try { + process.kill(pid, 'SIGTERM'); + } catch { + // noop + } + + const delay = ms => new Promise(resolve => setTimeout(resolve, ms)); + for (let i = 0; i < 5; i++) { + await delay(1000); + try { + process.kill(pid, 0); + } catch { + this._cleanup(); + return; + } + } + + try { + process.kill(pid, 'SIGKILL'); + } catch { + // noop + } + await delay(500); + this._cleanup(); + } + + async isRunning() { + if (!this._isProcessRunning()) return false; + return fs.existsSync(this.socketPath); + } + + getSocketPath() { return this.socketPath; } + + getStartArgs() { + const owner = String(process.getuid ? process.getuid() : 1000); + + return [ + '--socket-addr', this.socketPath, + '--socket-owner', owner, + '--pidfile', this.pidFile, + '--config-file', this.configPath, + '--credential-socket-addr', this.credentialSocketPath, + '--credential-socket-owner', owner, + '--debug', + ]; + } + + _isProcessRunning() { + try { + if (!fs.existsSync(this.pidFile)) return false; + const pid = parseInt(fs.readFileSync(this.pidFile, 'utf8').trim(), 10); + if (isNaN(pid)) return false; + process.kill(pid, 0); + return true; + } catch (err) { + if (err.code === 'EPERM') return true; + return false; + } + } + + _cleanup() { + try { + if (fs.existsSync(this.pidFile)) fs.unlinkSync(this.pidFile); + } catch { + // noop + } + try { + if (fs.existsSync(this.socketPath)) fs.unlinkSync(this.socketPath); + } catch { + // noop + } + try { + if (fs.existsSync(this.credentialSocketPath)) fs.unlinkSync(this.credentialSocketPath); + } catch { + // noop + } + } +} + +module.exports = FinchDaemonManager; diff --git a/lib/backends/containerd/index.js b/lib/backends/containerd/index.js new file mode 100644 index 000000000..72945c05e --- /dev/null +++ b/lib/backends/containerd/index.js @@ -0,0 +1,34 @@ +'use strict'; + +/** + * @module backends/containerd + * @description Containerd backend implementations for Lando's pluggable engine architecture. + * + * Exports concrete implementations of the DaemonBackend and ContainerBackend + * interfaces that manage Lando's own isolated containerd + buildkitd + finch-daemon stack. + * + * Compose operations use `docker-compose` pointed at finch-daemon via `DOCKER_HOST` + * (configured in `BackendManager._createContainerdEngine()`), NOT `nerdctl compose`. + * + * @example + * const {ContainerdDaemon, ContainerdContainer} = require('./backends/containerd'); + * + * const daemon = new ContainerdDaemon({ + * userConfRoot: '~/.lando', + * events, + * cache, + * log, + * }); + * + * const container = new ContainerdContainer({ + * finchSocket: daemon.finchDaemon.getSocketPath(), + * id: 'myapp', + * }); + * + * @since 4.0.0 + */ +const ContainerdDaemon = require('./containerd-daemon'); +const ContainerdContainer = require('./containerd-container'); +const ContainerdProxyAdapter = require('./proxy-adapter'); + +module.exports = {ContainerdDaemon, ContainerdContainer, ContainerdProxyAdapter}; diff --git a/lib/backends/containerd/lima-manager.js b/lib/backends/containerd/lima-manager.js new file mode 100644 index 000000000..bceda54f3 --- /dev/null +++ b/lib/backends/containerd/lima-manager.js @@ -0,0 +1,238 @@ +'use strict'; + +const os = require('os'); +const path = require('path'); + +/** + * Manages a Lima VM for running containerd on macOS. + * + * Lima is a lightweight Linux VM tool designed specifically for running + * containerd on macOS. This class wraps the `limactl` CLI to create, start, + * stop, and interact with a Lima VM that hosts the containerd daemon. + * + * The VM exposes the containerd socket at: + * ~/.lima//sock/containerd.sock + * + * @since 4.0.0 + */ +class LimaManager { + /** + * Create a LimaManager instance. + * + * @param {Object} [opts={}] - Configuration options. + * @param {string} [opts.limactl] - Path to limactl binary (default: "limactl"). + * @param {string} [opts.vmName] - Name of the Lima VM (default: "lando"). + * @param {number} [opts.cpus] - CPUs for the VM (default: 4). + * @param {number} [opts.memory] - Memory in GB for the VM (default: 4). + * @param {number} [opts.disk] - Disk in GB for the VM (default: 60). + * @param {Function} [opts.debug] - Debug logging function. + */ + constructor(opts = {}) { + /** @type {string} Path to the limactl binary. */ + this.limactl = opts.limactl ?? 'limactl'; + + /** @type {string} Name of the Lima VM. */ + this.vmName = opts.vmName ?? 'lando'; + + /** @type {number} Number of CPUs for the VM. */ + this.cpus = opts.cpus ?? 4; + + /** @type {number} Memory in GB for the VM. */ + this.memory = opts.memory ?? 4; + + /** @type {number} Disk in GB for the VM. */ + this.disk = opts.disk ?? 60; + + /** @type {Function} Debug logging function. */ + this.debug = opts.debug ?? (() => {}); + } + + /** + * Check if the Lima VM exists. + * + * Runs `limactl list --json` and checks for a VM matching `this.vmName`. + * + * @returns {Promise} True if the VM exists, false otherwise. + */ + async vmExists() { + try { + const result = await this._run(['list', '--json']); + const vms = this._parseListOutput(result.stdout); + return vms.some(vm => vm.name === this.vmName); + } catch (error) { + this.debug('error checking if Lima VM exists: %s', error.message); + return false; + } + } + + /** + * Create the Lima VM if it does not already exist. + * + * Runs: + * limactl create --name= --containerd=system \ + * --cpus=N --memory=N --disk=N --tty=false template:default + * + * @returns {Promise} + * @throws {Error} If VM creation fails. + */ + async createVM() { + if (await this.vmExists()) { + this.debug('Lima VM "%s" already exists, skipping creation', this.vmName); + return; + } + + this.debug('creating Lima VM "%s" (cpus=%d, memory=%dG, disk=%dG)', + this.vmName, this.cpus, this.memory, this.disk); + + await this._run([ + 'create', + `--name=${this.vmName}`, + '--containerd=system', + `--cpus=${this.cpus}`, + `--memory=${this.memory}`, + `--disk=${this.disk}`, + '--tty=false', + 'template:default', + ]); + + this.debug('Lima VM "%s" created successfully', this.vmName); + } + + /** + * Start the Lima VM. + * + * @returns {Promise} + * @throws {Error} If the VM cannot be started. + */ + async startVM() { + if (await this.isRunning()) { + this.debug('Lima VM "%s" is already running', this.vmName); + return; + } + + this.debug('starting Lima VM "%s"', this.vmName); + await this._run(['start', this.vmName]); + this.debug('Lima VM "%s" started', this.vmName); + } + + /** + * Stop the Lima VM. + * + * @returns {Promise} + * @throws {Error} If the VM cannot be stopped. + */ + async stopVM() { + if (!await this.isRunning()) { + this.debug('Lima VM "%s" is not running, skipping stop', this.vmName); + return; + } + + this.debug('stopping Lima VM "%s"', this.vmName); + await this._run(['stop', this.vmName]); + this.debug('Lima VM "%s" stopped', this.vmName); + } + + /** + * Check if the Lima VM is currently running. + * + * Runs `limactl list --json` and checks if the VM status is "Running". + * + * @returns {Promise} True if the VM is running, false otherwise. + */ + async isRunning() { + try { + const result = await this._run(['list', '--json']); + const vms = this._parseListOutput(result.stdout); + const vm = vms.find(v => v.name === this.vmName); + return vm?.status === 'Running'; + } catch (error) { + this.debug('error checking if Lima VM is running: %s', error.message); + return false; + } + } + + /** + * Get the containerd socket path exposed by Lima. + * + * Lima exposes the containerd socket at: + * ~/.lima//sock/containerd.sock + * + * @returns {string} The full path to the containerd socket. + */ + getSocketPath() { + return path.join(os.homedir(), '.lima', this.vmName, 'sock', 'containerd.sock'); + } + + /** + * Execute a command inside the Lima VM. + * + * Runs: limactl shell -- + * + * @param {string[]} args - Command and arguments to run inside the VM. + * @returns {Promise<{stdout: string, stderr: string, code: number}>} + * @throws {Error} If the command fails. + */ + async exec(args) { + this.debug('executing in Lima VM "%s": %o', this.vmName, args); + return this._run(['shell', this.vmName, '--', ...args]); + } + + /** + * Run nerdctl inside the Lima VM with sudo. + * + * Runs: limactl shell -- sudo nerdctl + * + * @param {string[]} args - Arguments to pass to nerdctl. + * @returns {Promise<{stdout: string, stderr: string, code: number}>} + * @throws {Error} If the command fails. + */ + async nerdctl(args) { + this.debug('running nerdctl in Lima VM "%s": %o', this.vmName, args); + return this._run(['shell', this.vmName, '--', 'sudo', 'nerdctl', ...args]); + } + + // ========================================================================= + // Private helpers + // ========================================================================= + + /** + * Run a limactl command via run-command utility. + * + * @param {string[]} args - Arguments to pass to limactl. + * @returns {Promise<{stdout: string, stderr: string, code: number}>} + * @private + */ + async _run(args) { + const runCommand = require('../../../utils/run-command'); + this.debug('running: %s %o', this.limactl, args); + return runCommand(this.limactl, args, {debug: this.debug}); + } + + /** + * Parse the output of `limactl list --json`. + * + * limactl outputs one JSON object per line (NDJSON), one per VM. + * + * @param {string} stdout - The raw stdout from `limactl list --json`. + * @returns {Object[]} Array of VM objects with at least { name, status }. + * @private + */ + _parseListOutput(stdout) { + const output = (stdout ?? '').toString().trim(); + if (!output) return []; + + return output.split('\n') + .filter(line => line.trim()) + .map(line => { + try { + return JSON.parse(line); + } catch { + this.debug('failed to parse limactl JSON line: %s', line); + return null; + } + }) + .filter(Boolean); + } +} + +module.exports = LimaManager; diff --git a/lib/backends/containerd/nerdctl-compose.js b/lib/backends/containerd/nerdctl-compose.js new file mode 100644 index 000000000..3edcd5ff3 --- /dev/null +++ b/lib/backends/containerd/nerdctl-compose.js @@ -0,0 +1,260 @@ +'use strict'; + +const {ComposeBackend} = require('../engine-backend'); +const compose = require('../../compose'); +const {getContainerdAuthConfig} = require('../../../utils/setup-containerd-auth'); + +/** + * nerdctl compose implementation of the ComposeBackend interface. + * + * @deprecated This class is **not used in production**. The containerd engine path + * uses `docker-compose` pointed at finch-daemon via `DOCKER_HOST` instead. + * See `BackendManager._createContainerdEngine()` in `lib/backend-manager.js`. + * This file is retained for reference only and may be removed in a future release. + * + * Wraps the existing `lib/compose.js` module — the same one used by DockerCompose — + * and transforms every returned `{cmd, opts}` shell descriptor so that commands target + * `nerdctl compose` instead of `docker compose`. + * + * ### How it works + * + * `compose.js` builds command arrays like: + * ``` + * ['--project-name', 'myapp', '--file', 'docker-compose.yml', 'up', '--detach', ...] + * ``` + * + * The shell execution layer prepends the binary path, so for Docker you get: + * ``` + * docker compose --project-name myapp --file docker-compose.yml up --detach ... + * ``` + * + * For nerdctl the equivalent is: + * ``` + * nerdctl --address /run/lando/containerd.sock compose --project-name myapp --file docker-compose.yml up --detach ... + * ``` + * + * So we delegate to `compose.*()` for all the complex flag-mapping and option-parsing + * logic, then prepend nerdctl's global connection flags to the resulting cmd array. + * The shell layer prepends the nerdctl binary path. + * + * @extends ComposeBackend + * @since 4.0.0 + */ +class NerdctlCompose extends ComposeBackend { + /** + * Create a NerdctlCompose backend. + * + * @param {Object} [opts={}] - Configuration options. + * @param {string} [opts.socketPath='/run/lando/containerd.sock'] - Path to the + * containerd socket. Passed as `--address` to nerdctl before the `compose` subcommand. + * @param {Object} [opts.authConfig] - Registry auth configuration from `getContainerdAuthConfig()`. + * When provided, its `env` object is merged into command opts to ensure nerdctl + * finds the Docker config for private registry authentication. + * @param {string} [opts.namespace='default'] - containerd namespace. + * @param {string} [opts.buildkitHost] - BuildKit endpoint for compose-triggered builds. + * @param {string} [opts.nerdctlConfig] - Optional path to `nerdctl.toml`. + */ + constructor(opts = {}) { + super(); + + /** + * Path to the containerd socket. + * @type {string} + */ + this.socketPath = opts.socketPath || '/run/lando/containerd.sock'; + this.namespace = opts.namespace || 'default'; + this.buildkitHost = opts.buildkitHost; + this.nerdctlConfig = opts.nerdctlConfig; + + /** + * Registry auth configuration. + * @type {{dockerConfig: string, env: Object, configExists: boolean, credentialHelpers: string[]}} + */ + this.authConfig = opts.authConfig || getContainerdAuthConfig(); + } + + /** + * Transform a compose.js shell descriptor for nerdctl. + * + * Prepends nerdctl global connection flags to the cmd array so that + * the shell layer produces: + * nerdctl --address --namespace compose <...existing args...> + * + * @param {{cmd: string[], opts: Object}} result - Shell descriptor from compose.js. + * @returns {{cmd: string[], opts: Object}} Transformed shell descriptor for nerdctl. + * @private + */ + _transform(result) { + const authEnv = this.authConfig && this.authConfig.env ? this.authConfig.env : {}; + const engineEnv = { + CONTAINERD_ADDRESS: this.socketPath, + CONTAINERD_NAMESPACE: this.namespace, + }; + if (this.buildkitHost) engineEnv.BUILDKIT_HOST = this.buildkitHost; + if (this.nerdctlConfig) engineEnv.NERDCTL_TOML = this.nerdctlConfig; + + const opts = Object.assign({}, result.opts, { + env: Object.assign({}, result.opts?.env || process.env, engineEnv, authEnv), + }); + + return { + cmd: ['--address', this.socketPath, '--namespace', this.namespace, 'compose', ...result.cmd], + opts, + }; + } + + /** + * Build container images for the specified services. + * + * Filters `opts.local` against `opts.services` to determine which services + * to build. If no local services match, falls back to a no-op `ps` command. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Build options. + * @param {Array} [opts.services] - Services to build (default: all). + * @param {Array} [opts.local] - Services with local Dockerfiles. + * @param {boolean} [opts.noCache=false] - Bypass the build cache. + * @param {boolean} [opts.pull=true] - Pull base images before building. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + build(composeFiles, project, opts) { + return this._transform(compose.build(composeFiles, project, opts)); + } + + /** + * Get the container ID(s) for services in a compose project. + * + * Equivalent to `nerdctl compose ps -q`. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Options (e.g. `{services: ['web']}`). + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + getId(composeFiles, project, opts) { + return this._transform(compose.getId(composeFiles, project, opts)); + } + + /** + * Send a SIGKILL to containers in a compose project. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Kill options. + * @param {Array} [opts.services] - Services to kill. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + kill(composeFiles, project, opts) { + return this._transform(compose.kill(composeFiles, project, opts)); + } + + /** + * Retrieve log output from containers in a compose project. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Logging options. + * @param {boolean} [opts.follow=false] - Tail the logs. + * @param {boolean} [opts.timestamps=false] - Include timestamps. + * @param {Array} [opts.services] - Services to get logs from. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + logs(composeFiles, project, opts) { + return this._transform(compose.logs(composeFiles, project, opts)); + } + + /** + * Pull images for services in a compose project. + * + * Filters `opts.pullable` against `opts.services` to determine which services + * to pull. If no pullable services match, falls back to a no-op `ps` command. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Pull options. + * @param {Array} [opts.services] - Services to pull. + * @param {Array} [opts.pullable] - Services whose images can be pulled. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + pull(composeFiles, project, opts) { + return this._transform(compose.pull(composeFiles, project, opts)); + } + + /** + * Remove containers (and optionally volumes/networks) for a compose project. + * + * Uses `nerdctl compose down` when `opts.purge` is `true`, otherwise + * `nerdctl compose rm`. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Removal options. + * @param {boolean} [opts.purge=false] - Full teardown. + * @param {boolean} [opts.force=true] - Force removal. + * @param {boolean} [opts.volumes=true] - Remove anonymous volumes. + * @param {Array} [opts.services] - Services to remove. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + remove(composeFiles, project, opts) { + return this._transform(compose.remove(composeFiles, project, opts)); + } + + /** + * Execute a command inside a running service container. + * + * Maps to `nerdctl compose exec` semantics. Handles background-ampersand + * detection and converts to `--detach` mode automatically (delegated to + * compose.js). + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Run/exec options. + * @param {Array} opts.cmd - The command and arguments to execute. + * @param {Array} [opts.services] - The service to run in. + * @param {string} [opts.user] - User to execute as. + * @param {Object} [opts.environment] - Additional environment variables. + * @param {boolean} [opts.detach=false] - Run in background. + * @param {boolean} [opts.noTTY] - Disable pseudo-TTY allocation. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + run(composeFiles, project, opts) { + return this._transform(compose.run(composeFiles, project, opts)); + } + + /** + * Start containers for a compose project. + * + * Equivalent to `nerdctl compose up` with detach and orphan removal defaults. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Start options. + * @param {Array} [opts.services] - Services to start. + * @param {boolean} [opts.background=true] - Run in detached mode. + * @param {boolean} [opts.recreate=false] - Force-recreate containers. + * @param {boolean} [opts.noRecreate=true] - Do not recreate existing containers. + * @param {boolean} [opts.removeOrphans=true] - Remove orphaned containers. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + start(composeFiles, project, opts) { + return this._transform(compose.start(composeFiles, project, opts)); + } + + /** + * Stop running containers in a compose project. + * + * Equivalent to `nerdctl compose stop`. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Stop options. + * @param {Array} [opts.services] - Services to stop. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + stop(composeFiles, project, opts) { + return this._transform(compose.stop(composeFiles, project, opts)); + } +} + +module.exports = NerdctlCompose; diff --git a/lib/backends/containerd/proxy-adapter.js b/lib/backends/containerd/proxy-adapter.js new file mode 100644 index 000000000..4a8491019 --- /dev/null +++ b/lib/backends/containerd/proxy-adapter.js @@ -0,0 +1,100 @@ +'use strict'; + +const getContainerdPaths = require('../../../utils/get-containerd-paths'); +const ensureCniNetwork = require('../../../utils/ensure-cni-network'); + +/** + * Containerd proxy adapter for Traefik integration. + * + * Traefik's Docker provider discovers containers by watching the Docker socket + * for events and reading container labels. When Lando uses the containerd + * backend, finch-daemon provides Docker API v1.43 compatibility on a Unix + * socket at `/run/lando/finch.sock`. + * + * This adapter handles the CNI network bridging concern for proxy operation: + * docker-compose via finch-daemon creates networks at the Docker API level but + * not at the CNI level. The nerdctl OCI hook needs CNI configs for container + * networking. This adapter pre-creates CNI conflist files for proxy networks + * (e.g. `_edge`). + * + * Socket mapping is handled elsewhere: + * - `hooks/lando-set-proxy-config.js` sets `lando.config.dockerSocket` to finch socket + * - `builders/_proxy.js` mounts it as `/var/run/docker.sock` inside the Traefik container + * + * @since 4.0.0 + */ +class ContainerdProxyAdapter { + /** + * Create a ContainerdProxyAdapter. + * + * @param {Object} [opts={}] - Configuration options. + * @param {Object} [opts.config={}] - Lando config object. + * @param {string} [opts.finchSocket] - Path to finch-daemon socket. Defaults to /run/lando/finch.sock. + * @param {Function} [opts.debug] - Debug/logging function. + */ + constructor(opts = {}) { + const config = opts.config || {}; + const paths = getContainerdPaths(config); + + /** @type {string} */ + this.finchSocket = opts.finchSocket || paths.finchSocket; + + /** @type {Function} */ + this.debug = opts.debug || (() => {}); + } + + /** + * Ensure CNI network configs exist for all proxy-related networks. + * + * The proxy uses an `_edge` network (e.g. `landoproxyhyperion5000gandalfedition_edge`). + * docker-compose via finch-daemon creates this at the Docker API level, but the + * nerdctl OCI hook needs a CNI conflist file for container networking to work. + * + * This must be called BEFORE `lando.engine.start()` for the proxy, so the + * CNI config exists when containers are created. + * + * @param {string} proxyName - The proxy project name (e.g. 'landoproxyhyperion5000gandalfedition'). + * @param {Object} [opts={}] - Options passed through to ensureCniNetwork. + * @param {string} [opts.cniNetconfPath] - CNI config directory override. + * @return {Object} Results keyed by network name, values are booleans (true = created). + */ + ensureProxyNetworks(proxyName, opts = {}) { + const debugFn = opts.debug || this.debug; + const results = {}; + + // The proxy compose defines `networks: { edge: { driver: 'bridge' } }`, + // which docker-compose names as `${proxyName}_edge`. + const edgeNetwork = `${proxyName}_edge`; + results[edgeNetwork] = ensureCniNetwork(edgeNetwork, {...opts, debug: debugFn}); + + // Also ensure the default network exists (compose may create one) + const defaultNetwork = `${proxyName}_default`; + results[defaultNetwork] = ensureCniNetwork(defaultNetwork, {...opts, debug: debugFn}); + + if (results[edgeNetwork]) { + debugFn('created CNI config for proxy edge network: %s', edgeNetwork); + } + + return results; + } + + /** + * Ensure the CNI config exists for an app's proxy edge network reference. + * + * When an app service is added to the proxy network via + * `networks: { lando_proxyedge: { name: proxyNet, external: true } }`, + * the CNI config for that external network must already exist. + * + * This is typically the same network as the proxy's edge network, but + * calling this is a safety net to ensure it exists before app compose up. + * + * @param {string} proxyNet - The proxy network name (e.g. 'landoproxyhyperion5000gandalfedition_edge'). + * @param {Object} [opts={}] - Options passed through to ensureCniNetwork. + * @return {boolean} true if a conflist was created, false if it already existed. + */ + ensureAppProxyNetwork(proxyNet, opts = {}) { + return ensureCniNetwork(proxyNet, {...opts, debug: opts.debug || this.debug}); + } +} + +module.exports = ContainerdProxyAdapter; diff --git a/lib/backends/containerd/wsl-helper.js b/lib/backends/containerd/wsl-helper.js new file mode 100644 index 000000000..b4d001946 --- /dev/null +++ b/lib/backends/containerd/wsl-helper.js @@ -0,0 +1,47 @@ +"use strict"; + +const fs = require("fs"); +const os = require("os"); +const path = require("path"); +const {execSync} = require("child_process"); + +class WslHelper { + constructor(opts = {}) { + this.debug = opts.debug || (() => {}); + this.userConfRoot = opts.userConfRoot || path.join(os.homedir(), ".lando"); + } + + static isWsl() { + if (process.platform !== "linux") return false; + try { + const version = fs.readFileSync("/proc/version", "utf8"); + return version.toLowerCase().includes("microsoft"); + } catch { + return false; + } + } + + async isDockerDesktopRunning() { + const sockets = [ + "/mnt/wsl/docker-desktop/docker-desktop-proxy", + "/var/run/docker.sock", + ]; + return sockets.some(s => fs.existsSync(s)); + } + + async ensureSocketPermissions(socketPath) { + const dir = path.dirname(socketPath); + try { + fs.mkdirSync(dir, {recursive: true}); + const uid = process.getuid(); + const gid = process.getgid(); + fs.chownSync(dir, uid, gid); + this.debug("ensured socket directory permissions for %s", dir); + } catch (err) { + this.debug("could not set socket directory permissions: %s", err.message); + } + } + +} + +module.exports = WslHelper; diff --git a/lib/backends/docker/docker-compose.js b/lib/backends/docker/docker-compose.js new file mode 100644 index 000000000..4c501245e --- /dev/null +++ b/lib/backends/docker/docker-compose.js @@ -0,0 +1,182 @@ +'use strict'; + +const {ComposeBackend} = require('../engine-backend'); +const compose = require('../../compose'); + +/** + * Docker Compose implementation of the ComposeBackend interface. + * + * Wraps the existing `lib/compose.js` module, delegating every orchestration + * command to the corresponding exported function. Each method returns a + * synchronous `{cmd, opts}` shell descriptor exactly as the existing module + * does — the shell execution layer handles actual command invocation. + * + * @extends ComposeBackend + * @since 4.0.0 + */ +class DockerCompose extends ComposeBackend { + /** + * Create a DockerCompose backend. + * + * No configuration is required — the underlying compose module is + * stateless and uses the same flag-mapping and option-parsing logic + * that Lando has always used. + */ + constructor() { + super(); + } + + /** + * Build container images for the specified services. + * + * Filters `opts.local` against `opts.services` to determine which services + * to build. If no local services match, falls back to a no-op `ps` command. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Build options. + * @param {Array} [opts.services] - Services to build (default: all). + * @param {Array} [opts.local] - Services with local Dockerfiles. + * @param {boolean} [opts.noCache=false] - Bypass the build cache. + * @param {boolean} [opts.pull=true] - Pull base images before building. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + build(composeFiles, project, opts) { + return compose.build(composeFiles, project, opts); + } + + /** + * Get the container ID(s) for services in a compose project. + * + * Equivalent to `docker-compose ps -q`. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Options (e.g. `{services: ['web']}`). + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + getId(composeFiles, project, opts) { + return compose.getId(composeFiles, project, opts); + } + + /** + * Send a SIGKILL to containers in a compose project. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Kill options. + * @param {Array} [opts.services] - Services to kill. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + kill(composeFiles, project, opts) { + return compose.kill(composeFiles, project, opts); + } + + /** + * Retrieve log output from containers in a compose project. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Logging options. + * @param {boolean} [opts.follow=false] - Tail the logs. + * @param {boolean} [opts.timestamps=false] - Include timestamps. + * @param {Array} [opts.services] - Services to get logs from. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + logs(composeFiles, project, opts) { + return compose.logs(composeFiles, project, opts); + } + + /** + * Pull images for services in a compose project. + * + * Filters `opts.pullable` against `opts.services` to determine which services + * to pull. If no pullable services match, falls back to a no-op `ps` command. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Pull options. + * @param {Array} [opts.services] - Services to pull. + * @param {Array} [opts.pullable] - Services whose images can be pulled. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + pull(composeFiles, project, opts) { + return compose.pull(composeFiles, project, opts); + } + + /** + * Remove containers (and optionally volumes/networks) for a compose project. + * + * Uses `docker-compose down` when `opts.purge` is `true`, otherwise + * `docker-compose rm`. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Removal options. + * @param {boolean} [opts.purge=false] - Full teardown. + * @param {boolean} [opts.force=true] - Force removal. + * @param {boolean} [opts.volumes=true] - Remove anonymous volumes. + * @param {Array} [opts.services] - Services to remove. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + remove(composeFiles, project, opts) { + return compose.remove(composeFiles, project, opts); + } + + /** + * Execute a command inside a running service container. + * + * Maps to `docker-compose exec` semantics. Handles background-ampersand + * detection and converts to `--detach` mode automatically. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Run/exec options. + * @param {Array} opts.cmd - The command and arguments to execute. + * @param {Array} [opts.services] - The service to run in. + * @param {string} [opts.user] - User to execute as. + * @param {Object} [opts.environment] - Additional environment variables. + * @param {boolean} [opts.detach=false] - Run in background. + * @param {boolean} [opts.noTTY] - Disable pseudo-TTY allocation. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + run(composeFiles, project, opts) { + return compose.run(composeFiles, project, opts); + } + + /** + * Start containers for a compose project. + * + * Equivalent to `docker-compose up` with detach and orphan removal defaults. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Start options. + * @param {Array} [opts.services] - Services to start. + * @param {boolean} [opts.background=true] - Run in detached mode. + * @param {boolean} [opts.recreate=false] - Force-recreate containers. + * @param {boolean} [opts.noRecreate=true] - Do not recreate existing containers. + * @param {boolean} [opts.removeOrphans=true] - Remove orphaned containers. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + start(composeFiles, project, opts) { + return compose.start(composeFiles, project, opts); + } + + /** + * Stop running containers in a compose project. + * + * Equivalent to `docker-compose stop`. + * + * @param {Array} composeFiles - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Stop options. + * @param {Array} [opts.services] - Services to stop. + * @returns {{cmd: string[], opts: Object}} Shell descriptor. + */ + stop(composeFiles, project, opts) { + return compose.stop(composeFiles, project, opts); + } +} + +module.exports = DockerCompose; diff --git a/lib/backends/docker/docker-container.js b/lib/backends/docker/docker-container.js new file mode 100644 index 000000000..178b6cd02 --- /dev/null +++ b/lib/backends/docker/docker-container.js @@ -0,0 +1,153 @@ +'use strict'; + +const {ContainerBackend} = require('../engine-backend'); +const Landerode = require('../../docker'); + +/** + * Docker implementation of the ContainerBackend interface. + * + * Wraps the existing {@link Landerode} class (which extends Dockerode), + * delegating all low-level container and network operations to it. This + * preserves the full container management logic while conforming to the + * pluggable backend interface introduced in Lando 4. + * + * @extends ContainerBackend + * @since 4.0.0 + */ +class DockerContainer extends ContainerBackend { + /** + * Create a DockerContainer backend. + * + * @param {Object} [opts={}] - Dockerode connection options (e.g. `{socketPath}`, `{host, port}`). + * @param {string} [id='lando'] - The Lando instance identifier used for filtering containers. + */ + constructor(opts = {}, id = 'lando') { + super(); + + /** + * The underlying Landerode (Dockerode) instance. + * @type {Landerode} + * @private + */ + this._docker = new Landerode(opts, id); + + /** @type {string} */ + this.id = id; + } + + /** + * Create a Docker network. + * + * The network is created as **attachable** and **internal** by default, + * matching the existing Landerode behavior. + * + * @param {string} name - The name of the network to create. + * @param {Object} [opts={}] - Additional network creation options. + * @returns {Promise} Network inspect data. + */ + async createNet(name, opts = {}) { + return this._docker.createNet(name, opts); + } + + /** + * Inspect a container and return its full metadata. + * + * Equivalent to `docker inspect `. + * + * @param {string} cid - A container identifier (hash, name, or short id). + * @returns {Promise} Container inspect data. + */ + async scan(cid) { + return this._docker.scan(cid); + } + + /** + * Determine whether a container is currently running. + * + * Returns `false` (not throw) if the container does not exist. + * + * @param {string} cid - A container identifier. + * @returns {Promise} + */ + async isRunning(cid) { + return this._docker.isRunning(cid); + } + + /** + * List Lando-managed containers. + * + * Delegates to {@link Landerode#list} which handles filtering by + * Lando labels, orphan removal, project/app filtering, and status enrichment. + * + * @param {Object} [options={}] - Listing options. + * @param {boolean} [options.all=false] - Include stopped containers. + * @param {string} [options.app] - Filter to a specific app name. + * @param {string} [options.project] - Filter to a specific project name. + * @param {Array} [options.filter] - Additional `key=value` filters. + * @param {string} [separator='_'] - Container name separator. + * @returns {Promise>} Array of Lando container descriptors. + */ + async list(options, separator) { + return this._docker.list(options, separator); + } + + /** + * Remove (delete) a container. + * + * @param {string} cid - A container identifier. + * @param {Object} [opts={v: true, force: false}] - Removal options. + * @returns {Promise} + */ + async remove(cid, opts) { + return this._docker.remove(cid, opts); + } + + /** + * Stop a running container. + * + * @param {string} cid - A container identifier. + * @param {Object} [opts={}] - Stop options (e.g. `{t: 10}` for timeout). + * @returns {Promise} + */ + async stop(cid, opts) { + return this._docker.stop(cid, opts); + } + + /** + * Get a network handle by its id or name. + * + * Returns a lightweight Dockerode proxy object that lazily calls the + * Docker API when methods are invoked. + * + * @param {string} id - The network id or name. + * @returns {Object} A Dockerode Network handle. + */ + getNetwork(id) { + return this._docker.getNetwork(id); + } + + /** + * List networks matching the given filter options. + * + * @param {Object} [opts={}] - Filter options (see Docker API `NetworkList`). + * @returns {Promise>} Array of network objects. + */ + async listNetworks(opts) { + return this._docker.listNetworks(opts); + } + + /** + * Get a container handle by its id or name. + * + * Returns a lightweight Dockerode proxy object that lazily calls the + * Docker API when methods are invoked. + * + * @param {string} cid - The container id or name. + * @returns {Object} A Dockerode Container handle. + */ + getContainer(cid) { + return this._docker.getContainer(cid); + } +} + +module.exports = DockerContainer; diff --git a/lib/backends/docker/docker-daemon.js b/lib/backends/docker/docker-daemon.js new file mode 100644 index 000000000..32f6023ea --- /dev/null +++ b/lib/backends/docker/docker-daemon.js @@ -0,0 +1,139 @@ +'use strict'; + +const {DaemonBackend} = require('../engine-backend'); +const LandoDaemon = require('../../daemon'); + +/** + * Docker implementation of the DaemonBackend interface. + * + * Wraps the existing {@link LandoDaemon} class, delegating all lifecycle + * operations (start, stop, health-check, version retrieval) to it. This + * preserves the full platform-specific logic for macOS Docker Desktop, + * Linux docker engine, and WSL while conforming to the pluggable backend + * interface introduced in Lando 4. + * + * @extends DaemonBackend + * @since 4.0.0 + */ +class DockerDaemon extends DaemonBackend { + /** + * Create a DockerDaemon backend. + * + * Accepts the same parameters as {@link LandoDaemon} and creates an + * internal instance that handles all the real work. + * + * @param {Object} [cache] - A Lando Cache instance. + * @param {Object} [events] - A Lando Events instance. + * @param {string} [docker] - Path to the docker binary. + * @param {Object} [log] - A Lando Log instance. + * @param {string} [context='node'] - Execution context (`'node'` or `'browser'`). + * @param {string} [compose] - Path to the docker-compose binary. + * @param {string} [orchestratorVersion] - The orchestrator version string. + * @param {string} [userConfRoot] - Path to the user config root directory. + */ + constructor( + cache, + events, + docker, + log, + context, + compose, + orchestratorVersion, + userConfRoot, + ) { + super(); + + /** + * The underlying LandoDaemon instance that performs all actual work. + * @type {LandoDaemon} + * @private + */ + this._daemon = new LandoDaemon( + cache, + events, + docker, + log, + context, + compose, + orchestratorVersion, + userConfRoot, + ); + } + + // ── Live-proxy properties ────────────────────────────────────────── + // These getters (and setter for isRunning) delegate directly to the + // underlying _daemon instance so callers always see the current value + // rather than a stale snapshot copied at construction time. + + /** @type {string} */ + get platform() { return this._daemon.platform; } + + /** @type {boolean} */ + get isRunning() { return this._daemon.isRunning; } + set isRunning(val) { this._daemon.isRunning = val; } + + /** @type {Object} */ + get events() { return this._daemon.events; } + + /** @type {string|false} */ + get compose() { return this._daemon.compose; } + + /** @type {string|false} */ + get docker() { return this._daemon.docker; } + + /** + * Start the Docker engine. + * + * Delegates to {@link LandoDaemon#up} which handles all platform-specific + * start logic (macOS `open`, Linux systemd scripts, Windows/WSL PowerShell). + * + * @param {boolean|Object} [retry=true] - Retry configuration. + * @param {string} [password] - Optional sudo password for Linux. + * @returns {Promise} + */ + async up(retry, password) { + return this._daemon.up(retry, password); + } + + /** + * Stop the Docker engine. + * + * Delegates to {@link LandoDaemon#down}. No-ops on macOS, Windows, and WSL; + * only actually stops the daemon on Linux in a node context. + * + * @returns {Promise} + */ + async down() { + return this._daemon.down(); + } + + /** + * Check whether the Docker engine is currently running. + * + * Delegates to {@link LandoDaemon#isUp} with optional caching. + * + * @param {Object} [cache] - A Lando Cache instance for short-lived TTL caching. + * @param {string} [docker] - Path to the docker binary to probe. + * @returns {Promise} + */ + async isUp(cache, docker) { + // Pass `undefined` for the `log` parameter — LandoDaemon.isUp() accepts + // (log, cache, docker) but never uses `log` (it relies on this.debug + // internally). The DaemonBackend interface drops the unused param. + return this._daemon.isUp(undefined, cache, docker); + } + + /** + * Retrieve version information for Docker and related tooling. + * + * Returns an object with `compose`, `engine`, and `desktop` version strings + * depending on the current platform. + * + * @returns {Promise<{compose: string, engine: string|false, desktop: string|false}>} + */ + async getVersions() { + return this._daemon.getVersions(); + } +} + +module.exports = DockerDaemon; diff --git a/lib/backends/docker/index.js b/lib/backends/docker/index.js new file mode 100644 index 000000000..32abd588e --- /dev/null +++ b/lib/backends/docker/index.js @@ -0,0 +1,24 @@ +'use strict'; + +/** + * @module backends/docker + * @description Docker backend implementations for Lando's pluggable engine architecture. + * + * Exports concrete implementations of the DaemonBackend, ContainerBackend, and + * ComposeBackend interfaces that wrap the existing Docker-based code + * (LandoDaemon, Landerode, compose.js). + * + * @example + * const {DockerDaemon, DockerContainer, DockerCompose} = require('./backends/docker'); + * + * const daemon = new DockerDaemon(cache, events, dockerPath, log); + * const container = new DockerContainer({socketPath: '/var/run/docker.sock'}); + * const compose = new DockerCompose(); + * + * @since 4.0.0 + */ +const DockerDaemon = require('./docker-daemon'); +const DockerContainer = require('./docker-container'); +const DockerCompose = require('./docker-compose'); + +module.exports = {DockerDaemon, DockerContainer, DockerCompose}; diff --git a/lib/backends/engine-backend.js b/lib/backends/engine-backend.js new file mode 100644 index 000000000..4faf67a53 --- /dev/null +++ b/lib/backends/engine-backend.js @@ -0,0 +1,984 @@ +'use strict'; + +/** + * @module backends + * @file Engine backend interfaces for Lando's pluggable container runtime support. + * + * These base classes define the contracts that any engine backend (Docker, containerd/nerdctl, etc.) + * must implement. Each class corresponds to a layer of the engine architecture: + * + * - **DaemonBackend**: Manages the container engine lifecycle (start, stop, health checks, versions). + * - **ContainerBackend**: Low-level container and network operations (inspect, list, remove, stop). + * - **ComposeBackend**: Orchestration commands that operate on compose files and projects (build, start, stop, run, etc.). + * - **EngineBackend**: Top-level facade that composes a DaemonBackend, ContainerBackend, and ComposeBackend + * and exposes all 14 public Engine methods as a unified interface. + * + * Subclasses must override every method; the base implementations throw "Not implemented" errors + * to ensure missing methods are caught early during development. + * + * ## Architecture Notes + * + * ### Auto-Start Behavior + * + * The `Engine` class wraps every command in an `eventWrapper` (see `lib/router.js:27-33`) that + * ensures the container engine daemon is running before any operation executes. The sequence is: + * + * 1. Emit `pre-engine-autostart` + * 2. Emit `engine-autostart` + * 3. Call `daemon.up()` — starts the engine if it is not already running + * 4. Emit `pre-engine-{name}` (e.g. `pre-engine-build`) + * 5. Execute the actual backend operation + * 6. Emit `post-engine-{name}` (e.g. `post-engine-build`) + * + * This auto-start-on-every-command behavior is owned by the `Engine` layer, **not** by the + * backend. Backend implementations should assume the daemon is already running when their + * methods are called. If a backend needs custom pre-flight checks, it should do so internally + * without relying on the Engine's event wrapper. + * + * ### Shell Execution Layer + * + * ComposeBackend methods return synchronous `{cmd: string[], opts: Object}` shell descriptors. + * The actual shell execution is handled by a separate layer — the `compose` function wrapper + * that `Engine` passes around. This means: + * - ComposeBackend is **not** responsible for running commands + * - ComposeBackend builds the command arrays; the shell layer executes them + * - A containerd/nerdctl backend CAN return the same `{cmd, opts}` shape with different commands + * - This preserves backward compatibility with the existing shell infrastructure + * + * @since 4.0.0 + */ + +/** + * Helper that builds a descriptive "Not implemented" error. + * + * @param {string} backendName - The name of the backend interface (e.g. "DaemonBackend"). + * @param {string} methodName - The name of the method that was called. + * @returns {Error} An error with a helpful message. + * @private + */ +const notImplemented = (backendName, methodName) => { + return new Error( + `${backendName}.${methodName}() is not implemented. ` + + `Subclasses must override this method to provide a concrete implementation.`, + ); +}; + +// --------------------------------------------------------------------------- +// DaemonBackend +// --------------------------------------------------------------------------- + +/** + * Base class for daemon / engine-lifecycle backends. + * + * A DaemonBackend is responsible for starting and stopping the underlying container engine + * (e.g. Docker Desktop, the Docker systemd service, or the containerd daemon) and for + * reporting whether the engine is currently reachable and what versions are installed. + * + * **This is an abstract class.** It cannot be instantiated directly — you must extend it + * and provide concrete implementations of all methods. + * + * Concrete implementations must set the following properties in their constructor: + * + * | Property | Type | Description | + * |--------------|-----------------|------------------------------------------------------------| + * | `platform` | `string` | The OS platform (`'darwin'`, `'linux'`, `'win32'`, `'wsl'`)| + * | `isRunning` | `boolean` | Whether the engine is believed to be running | + * | `events` | `Events` | A Lando `Events` instance for lifecycle hooks | + * | `compose` | `string\|false` | Path to the compose binary, or `false` if unavailable | + * | `docker` | `string\|false` | Path to the docker/nerdctl binary, or `false` | + * + * @since 4.0.0 + */ +class DaemonBackend { + /** + * @throws {Error} If instantiated directly (abstract class guard). + */ + constructor() { + if (new.target === DaemonBackend) { + throw new Error('DaemonBackend is abstract and cannot be instantiated directly. Extend it and provide a concrete implementation.'); + } + } + + /** + * Start the container engine. + * + * Implementations should: + * 1. Emit `pre-engine-up` before attempting to start. + * 2. Detect the current platform and invoke the appropriate start mechanism. + * 3. Retry according to `retry` settings if the engine is slow to come up. + * 4. Emit `post-engine-up` once the engine is confirmed reachable. + * + * @param {boolean|Object} [retry=true] - Retry configuration. `true` uses default retry + * settings (`{max: 25, backoff: 1000}`), `false` disables retries, or pass an object + * with `{max, backoff}` for custom settings. + * @param {string} [password] - Optional sudo password for platforms that need elevated + * privileges to start the engine (e.g. Linux systemd service). + * @returns {Promise} Resolves when the engine is up and reachable. + * @throws {Error} If the engine cannot be started after all retries. + */ + async up(retry, password) { // eslint-disable-line no-unused-vars + throw notImplemented('DaemonBackend', 'up'); + } + + /** + * Stop the container engine. + * + * Implementations should: + * 1. Emit `pre-engine-down`. + * 2. Gracefully shut down the container engine (or no-op on platforms where the + * engine is shared, e.g. Docker Desktop on macOS/Windows). + * 3. Emit `post-engine-down`. + * + * **Note:** The existing Docker implementation (`LandoDaemon.down()`) is a no-op on + * macOS, Windows, and WSL — it only actually stops the daemon on Linux in a node + * context. The `password` parameter may be needed for elevated shutdown on Linux + * but is not currently part of this signature. Implementations that require sudo + * should obtain the password from their own configuration. + * + * @returns {Promise} Resolves when the engine has been stopped (or the stop + * was intentionally skipped). + */ + async down() { + throw notImplemented('DaemonBackend', 'down'); + } + + /** + * Check whether the container engine is currently running and reachable. + * + * Implementations typically execute a lightweight command (e.g. `docker ps`) and + * cache the result with a short TTL to avoid repeated subprocess spawns. + * + * **Note:** The `log` parameter that existed in the original `LandoDaemon.isUp()` signature + * has been removed because it was never used by the implementation. If your implementation + * needs logging, inject the logger via the constructor instead. + * + * @param {Object} [cache] - A Lando Cache instance for short-lived TTL caching. + * Defaults to `this.cache` in the existing Docker implementation. + * @param {string} [docker] - Path to the docker/nerdctl binary to probe. + * Defaults to `this.docker` in the existing Docker implementation. + * @returns {Promise} `true` if the engine is reachable, `false` otherwise. + */ + async isUp(cache, docker) { // eslint-disable-line no-unused-vars + throw notImplemented('DaemonBackend', 'isUp'); + } + + /** + * Retrieve version information for the container engine and related tooling. + * + * The returned object should include at minimum: + * - `compose` — The compose/orchestrator version string. + * - `engine` — The engine version string (Linux) or `false`. + * - `desktop` — The desktop app version string (macOS/Windows) or `false`. + * + * @returns {Promise<{compose: string, engine: string|false, desktop: string|false}>} + * An object containing version strings. + */ + async getVersions() { + throw notImplemented('DaemonBackend', 'getVersions'); + } +} + +// --------------------------------------------------------------------------- +// ContainerBackend +// --------------------------------------------------------------------------- + +/** + * Base class for low-level container and network operations. + * + * A ContainerBackend provides the primitive operations that Lando needs to interact + * with individual containers and Docker/containerd networks. In the current Docker + * implementation this is the `Landerode` class (which extends Dockerode). + * + * **This is an abstract class.** It cannot be instantiated directly — you must extend it + * and provide concrete implementations of all methods. + * + * Implementations may be backed by Dockerode, nerdctl commands, the containerd gRPC + * API, or any other container runtime. + * + * ### Proxy/Handle Objects + * + * `getContainer(cid)` and `getNetwork(id)` return lightweight **proxy/handle objects**, + * not data. In the Docker implementation (Landerode extends Dockerode), these are + * Dockerode proxy objects that lazily call the Docker API when you invoke methods on them. + * + * The returned container object must support at minimum: `.inspect()`, `.remove(opts)`, + * `.stop(opts)`. + * + * The returned network object must support at minimum: `.inspect()`, `.remove()`. + * + * For a containerd backend, these should return objects with compatible method signatures. + * + * ### Internal List Method + * + * The `list()` method in the Docker implementation internally calls `this.listContainers()` + * (inherited from Dockerode) to get raw container data, then filters and transforms it. + * A containerd backend implementing `list()` must include all listing + filtering logic + * internally — there is no separate `listContainers()` in the interface. + * + * @since 4.0.0 + */ +class ContainerBackend { + /** + * @throws {Error} If instantiated directly (abstract class guard). + */ + constructor() { + if (new.target === ContainerBackend) { + throw new Error('ContainerBackend is abstract and cannot be instantiated directly. Extend it and provide a concrete implementation.'); + } + } + + /** + * Create a container network. + * + * The network should be created as **attachable** and **internal** by default + * (matching the current Docker implementation). + * + * @param {string} name - The name of the network to create. + * @param {Object} [opts={}] - Additional network creation options (driver, labels, etc.). + * Merged with defaults; see the Docker API `NetworkCreate` spec for available fields. + * @returns {Promise} A Promise resolving to network inspect data. + * @throws {Error} If the network cannot be created. + */ + async createNet(name, opts) { // eslint-disable-line no-unused-vars + throw notImplemented('ContainerBackend', 'createNet'); + } + + /** + * Inspect a container and return its full metadata. + * + * Equivalent to `docker inspect `. + * + * @param {string} cid - A container identifier (hash, name, or short id). + * @returns {Promise} A Promise resolving to the container's inspect data. + * @throws {Error} If the container does not exist or cannot be inspected. + */ + async scan(cid) { // eslint-disable-line no-unused-vars + throw notImplemented('ContainerBackend', 'scan'); + } + + /** + * Determine whether a container is currently running. + * + * Should return `false` (not throw) if the container does not exist, to avoid + * race conditions when containers are removed between checks. + * + * @param {string} cid - A container identifier. + * @returns {Promise} `true` if the container is running, `false` otherwise. + */ + async isRunning(cid) { // eslint-disable-line no-unused-vars + throw notImplemented('ContainerBackend', 'isRunning'); + } + + /** + * List Lando-managed containers. + * + * Implementations must: + * 1. List all containers (optionally filtered by `options`). + * 2. Filter to only Lando-managed containers (by label or naming convention). + * 3. Remove orphaned app containers whose compose source files no longer exist. + * 4. Support filtering by `options.project`, `options.app`, and `options.filter`. + * + * @param {Object} [options={}] - Listing options. + * @param {boolean} [options.all=false] - Include stopped containers. + * @param {string} [options.app] - Filter to containers for a specific app name. + * @param {string} [options.project] - Filter to containers for a specific project name. + * @param {Array} [options.filter] - Additional `key=value` filters. + * @param {string} [separator='_'] - The separator used in container naming + * (e.g. `'_'` for docker-compose v1, `'-'` for v2). + * @returns {Promise>} An array of Lando container descriptor objects, + * each containing at minimum `{id, name, app, src, kind, lando, instance, status, running}`. + */ + async list(options, separator) { // eslint-disable-line no-unused-vars + throw notImplemented('ContainerBackend', 'list'); + } + + /** + * Remove (delete) a container. + * + * @param {string} cid - A container identifier. + * @param {Object} [opts={v: true, force: false}] - Removal options. + * @param {boolean} [opts.v=true] - Also remove associated anonymous volumes. + * @param {boolean} [opts.force=false] - Force-remove a running container. + * @returns {Promise} Resolves when the container has been removed. + */ + async remove(cid, opts) { // eslint-disable-line no-unused-vars + throw notImplemented('ContainerBackend', 'remove'); + } + + /** + * Stop a running container. + * + * @param {string} cid - A container identifier. + * @param {Object} [opts={}] - Stop options (e.g. `{t: 10}` for timeout in seconds). + * @returns {Promise} Resolves when the container has been stopped. + */ + async stop(cid, opts) { // eslint-disable-line no-unused-vars + throw notImplemented('ContainerBackend', 'stop'); + } + + /** + * Get a network handle by its id or name. + * + * Returns a lightweight **proxy object** that lazily calls the container engine API + * when methods are invoked. This does NOT fetch network data — it returns a handle. + * + * The returned object must support at minimum: + * - `.inspect()` — Returns a Promise with the network's metadata. + * - `.remove()` — Returns a Promise that resolves when the network is removed. + * + * In the Docker implementation, this returns a Dockerode `Network` object. + * + * @param {string} id - The network id or name. + * @returns {Object} A network handle object (implementation-specific). + */ + getNetwork(id) { // eslint-disable-line no-unused-vars + throw notImplemented('ContainerBackend', 'getNetwork'); + } + + /** + * List networks matching the given filter options. + * + * @param {Object} [opts={}] - Filter options. See the Docker API `NetworkList` + * endpoint for available filters. + * @returns {Promise>} An array of network objects. + */ + async listNetworks(opts) { // eslint-disable-line no-unused-vars + throw notImplemented('ContainerBackend', 'listNetworks'); + } + + /** + * Get a container handle by its id or name. + * + * Returns a lightweight **proxy object** that lazily calls the container engine API + * when methods are invoked. This does NOT fetch container data — it returns a handle. + * + * The returned object must support at minimum: + * - `.inspect()` — Returns a Promise with the container's metadata. + * - `.remove(opts)` — Returns a Promise that resolves when the container is removed. + * - `.stop(opts)` — Returns a Promise that resolves when the container is stopped. + * + * In the Docker implementation, this returns a Dockerode `Container` object. + * + * @param {string} cid - The container id or name. + * @returns {Object} A container handle object (implementation-specific). + */ + getContainer(cid) { // eslint-disable-line no-unused-vars + throw notImplemented('ContainerBackend', 'getContainer'); + } +} + +// --------------------------------------------------------------------------- +// ComposeBackend +// --------------------------------------------------------------------------- + +/** + * Base class for compose/orchestration operations. + * + * A ComposeBackend translates high-level orchestration intents (build, start, stop, run, etc.) + * into shell command descriptors that the Lando shell layer can execute. In the current + * implementation this maps to `docker-compose` / `docker compose` CLI commands via `lib/compose.js`. + * + * **This is an abstract class.** It cannot be instantiated directly — you must extend it + * and provide concrete implementations of all methods. + * + * ### Return Type Convention + * + * Each method returns a **synchronous** shell descriptor object: + * + * ```js + * { + * cmd: string[], // The command and arguments to execute (e.g. ['--project-name', 'myapp', ...]) + * opts: { + * mode: string, // Execution mode (e.g. 'spawn') + * cstdio: *, // Custom stdio configuration + * silent: boolean // Whether to suppress output + * } + * } + * ``` + * + * These are **not Promises** — they are plain objects. The shell execution is handled by a + * separate layer (the `compose` function wrapper that `Engine` passes around). The `compose` + * wrapper receives the method name and data, calls the appropriate ComposeBackend method to + * get the `{cmd, opts}` descriptor, then executes it via `lando.shell.sh()`. + * + * A containerd/nerdctl backend CAN return the same `{cmd, opts}` shape — just with different + * command arrays (e.g. `nerdctl compose` instead of `docker compose`). This preserves backward + * compatibility with the existing shell infrastructure. + * + * ### Method Signatures + * + * Each method receives: + * - `compose` — An array of paths to compose files. + * - `project` — The project name (typically the Lando app name). + * - `opts` — An options object whose shape varies per command. + * + * @since 4.0.0 + */ +class ComposeBackend { + /** + * @throws {Error} If instantiated directly (abstract class guard). + */ + constructor() { + if (new.target === ComposeBackend) { + throw new Error('ComposeBackend is abstract and cannot be instantiated directly. Extend it and provide a concrete implementation.'); + } + } + + /** + * Build container images for the specified services. + * + * Typically pulls base images first, then builds any services that have local Dockerfiles. + * The router's `build()` handles the pull-then-build sequencing — it calls `compose('pull', datum)` + * first, then `compose('build', datum)`. Implementations of this method only need to handle + * the build step itself. + * + * @param {Array} compose - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Build options. + * @param {Array} [opts.services] - Specific services to build (default: all). + * @param {Array} [opts.local] - Services with local Dockerfiles. + * @param {boolean} [opts.noCache=false] - Bypass the build cache. + * @param {boolean} [opts.pull=true] - Pull base images before building. + * @returns {{cmd: string[], opts: Object}} A shell descriptor for the build command. + */ + build(compose, project, opts) { // eslint-disable-line no-unused-vars + throw notImplemented('ComposeBackend', 'build'); + } + + /** + * Get the container ID(s) for services in a compose project. + * + * Equivalent to `docker-compose ps -q`. + * + * @param {Array} compose - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Options (e.g. `{services: ['web']}`). + * @returns {{cmd: string[], opts: Object}} A shell descriptor for the ps command. + */ + getId(compose, project, opts) { // eslint-disable-line no-unused-vars + throw notImplemented('ComposeBackend', 'getId'); + } + + /** + * Send a SIGKILL to containers in a compose project. + * + * @param {Array} compose - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Kill options. + * @param {Array} [opts.services] - Specific services to kill. + * @returns {{cmd: string[], opts: Object}} A shell descriptor for the kill command. + */ + kill(compose, project, opts) { // eslint-disable-line no-unused-vars + throw notImplemented('ComposeBackend', 'kill'); + } + + /** + * Retrieve log output from containers in a compose project. + * + * @param {Array} compose - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Logging options. + * @param {boolean} [opts.follow=false] - Tail the logs (`-f`). + * @param {boolean} [opts.timestamps=false] - Include timestamps. + * @param {Array} [opts.services] - Specific services to get logs from. + * @returns {{cmd: string[], opts: Object}} A shell descriptor for the logs command. + */ + logs(compose, project, opts) { // eslint-disable-line no-unused-vars + throw notImplemented('ComposeBackend', 'logs'); + } + + /** + * Pull images for services in a compose project. + * + * @param {Array} compose - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Pull options. + * @param {Array} [opts.services] - Specific services to pull. + * @param {Array} [opts.pullable] - Services whose images can be pulled + * (as opposed to locally-built images). + * @returns {{cmd: string[], opts: Object}} A shell descriptor for the pull command. + */ + pull(compose, project, opts) { // eslint-disable-line no-unused-vars + throw notImplemented('ComposeBackend', 'pull'); + } + + /** + * Remove containers (and optionally volumes/networks) for a compose project. + * + * When `opts.purge` is `true`, this should perform the equivalent of + * `docker-compose down` (remove everything). Otherwise, it should use + * `docker-compose rm`. + * + * @param {Array} compose - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Removal options. + * @param {boolean} [opts.purge=false] - Full teardown (volumes + networks). + * @param {boolean} [opts.force=true] - Force removal without confirmation. + * @param {boolean} [opts.volumes=true] - Remove anonymous volumes. + * @param {Array} [opts.services] - Specific services to remove. + * @returns {{cmd: string[], opts: Object}} A shell descriptor for the remove/down command. + */ + remove(compose, project, opts) { // eslint-disable-line no-unused-vars + throw notImplemented('ComposeBackend', 'remove'); + } + + /** + * Execute a command inside a running service container. + * + * Equivalent to `docker-compose exec` (not `docker-compose run` — this executes in an + * already-running container, not a new one). Supports both attached (interactive) + * and detached execution modes. + * + * **Note:** Despite being named `run()`, this maps to `exec` semantics in the Docker + * implementation. The compose.js code builds a `docker-compose exec` shell command. + * The naming is retained for backward compatibility. + * + * @param {Array} compose - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Run/exec options. + * @param {Array} opts.cmd - The command and its arguments to execute. + * @param {Array} [opts.services] - The service to run the command in. + * @param {string} [opts.user] - User to execute as (e.g. `'root'`, `'node'`, `'uid:gid'`). + * @param {Object} [opts.environment] - Additional environment variables (`{KEY: 'value'}`). + * @param {boolean} [opts.detach=false] - Run the command in the background. + * @param {boolean} [opts.noTTY] - Disable pseudo-TTY allocation. + * @returns {{cmd: string[], opts: Object}} A shell descriptor for the exec command. + */ + run(compose, project, opts) { // eslint-disable-line no-unused-vars + throw notImplemented('ComposeBackend', 'run'); + } + + /** + * Start containers for a compose project. + * + * Equivalent to `docker-compose up`. By default containers are started in the + * background with orphan removal enabled. + * + * @param {Array} compose - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Start options. + * @param {Array} [opts.services] - Specific services to start (default: all). + * @param {boolean} [opts.background=true] - Run in detached mode. + * @param {boolean} [opts.recreate=false] - Force-recreate containers. + * @param {boolean} [opts.noRecreate=true] - Do not recreate existing containers. + * @param {boolean} [opts.removeOrphans=true] - Remove orphaned containers. + * @returns {{cmd: string[], opts: Object}} A shell descriptor for the up command. + */ + start(compose, project, opts) { // eslint-disable-line no-unused-vars + throw notImplemented('ComposeBackend', 'start'); + } + + /** + * Stop running containers in a compose project. + * + * Equivalent to `docker-compose stop`. + * + * @param {Array} compose - Paths to docker-compose files. + * @param {string} project - The project/app name. + * @param {Object} [opts={}] - Stop options. + * @param {Array} [opts.services] - Specific services to stop (default: all). + * @returns {{cmd: string[], opts: Object}} A shell descriptor for the stop command. + */ + stop(compose, project, opts) { // eslint-disable-line no-unused-vars + throw notImplemented('ComposeBackend', 'stop'); + } +} + +// --------------------------------------------------------------------------- +// EngineBackend +// --------------------------------------------------------------------------- + +/** + * Top-level engine backend that composes a DaemonBackend, ContainerBackend, and ComposeBackend. + * + * This is the primary interface that the Lando `Engine` class consumes. It acts as a **facade** + * that mirrors all 14 public methods from `Engine` and delegates to the three specialized backends. + * + * Concrete implementations (e.g. `DockerBackend`, `ContainerdBackend`) should either: + * + * 1. Extend this class and override every method, or + * 2. Accept concrete `DaemonBackend`, `ContainerBackend`, and `ComposeBackend` instances + * in their constructor and rely on the default dispatch implementations provided here. + * + * ## Dual-Path Dispatch Pattern + * + * Several Engine operations support **two invocation styles** (see `lib/router.js`): + * + * 1. **Compose-based**: `{compose: [...], project: 'myapp', opts: {...}}` — routes through + * the ComposeBackend (e.g. `docker-compose rm`, `docker-compose ps`). + * 2. **ID-based**: `{id: 'abc123'}` or `{name: 'myapp_web_1'}` or `{cid: 'abc123'}` — routes + * directly to ContainerBackend methods (e.g. `docker.remove()`, `docker.stop()`). + * + * The default implementations in this class encode this dispatch logic so that concrete + * backends inherit sensible defaults. Backends can override individual methods to change + * the dispatch behavior. + * + * ## Auto-Start Behavior + * + * The `Engine` layer wraps every call to the backend in an `eventWrapper` that auto-starts + * the daemon before each operation. Backend methods should **not** attempt to start the daemon + * themselves — they can assume it is already running when called. See the module-level + * documentation for the full event sequence. + * + * ## Empty-Services Short-Circuit + * + * The `Engine` layer short-circuits `start()`, `stop()`, and `destroy()` when + * `data.opts.services` (or `data.services`) is an empty array — returning `Promise.resolve()` + * immediately without calling the backend. This is needed because Docker Compose v2 fails + * when invoked with zero services (unlike v1 which silently no-oped). Backend implementations + * do NOT need to handle this case; the Engine handles it before delegating. + * + * The `Engine` class (`lib/engine.js`) will be updated to accept an `EngineBackend` instance + * instead of separate daemon/docker/compose dependencies. + * + * @since 4.0.0 + */ +class EngineBackend { + /** + * Create an EngineBackend. + * + * @param {Object} [opts={}] - Configuration options. + * @param {DaemonBackend} [opts.daemon] - The daemon backend instance. + * @param {ContainerBackend} [opts.container] - The container backend instance. + * @param {ComposeBackend} [opts.compose] - The compose backend instance. + */ + constructor({daemon, container, compose} = {}) { + /** + * The daemon lifecycle backend. + * @type {DaemonBackend} + */ + this.daemon = daemon; + + /** + * The low-level container operations backend. + * @type {ContainerBackend} + */ + this.container = container; + + /** + * The compose/orchestration backend. + * @type {ComposeBackend} + */ + this.compose = compose; + } + + /** + * Get the name of this engine backend. + * + * Used for logging, configuration selection, and user-facing messages. + * Subclasses should override to return a descriptive name (e.g. `'docker'`, `'containerd'`). + * + * @returns {string} The backend name. + */ + get name() { + throw notImplemented('EngineBackend', 'name (getter)'); + } + + /** + * Verify that the engine backend and all its dependencies are properly installed. + * + * Implementations should check for the presence of required binaries (docker/nerdctl, + * compose tooling, etc.) and return an object describing what is and isn't available. + * + * @returns {Promise<{installed: boolean, binaries: Object}>} Installation status. + */ + async verifyInstallation() { + throw notImplemented('EngineBackend', 'verifyInstallation'); + } + + // ------------------------------------------------------------------------- + // Facade Methods — Mirror the 14 public methods from Engine (lib/engine.js) + // ------------------------------------------------------------------------- + + /** + * Build container images for the specified compose object. + * + * The default implementation pulls base images first, then builds any services that + * have local Dockerfiles — matching the behavior in `router.build()`. + * + * Dispatches through the compose backend only (no ID-based path for builds). + * + * @param {Object} data - A compose object or array of compose objects. + * @param {Array} data.compose - Paths to docker-compose files. + * @param {string} data.project - The project/app name. + * @param {Object} [data.opts] - Build options. + * @param {Array} [data.opts.services] - Services to build (default: all). + * @param {boolean} [data.opts.noCache=true] - Bypass the build cache. + * @param {boolean} [data.opts.pull=true] - Pull base images before building. + * @returns {Promise} + */ + async build(data) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'build'); + } + + /** + * Create a Docker/container network. + * + * Delegates to `this.container.createNet(name)`. + * + * @param {string} name - The name of the network to create. + * @returns {Promise} A Promise resolving to network inspect data. + */ + async createNetwork(name) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'createNetwork'); + } + + /** + * Remove containers for a compose object or a specific container by ID. + * + * **Dual-path dispatch:** + * - If `data.compose` exists → delegates to compose backend (`remove` command). + * - If `data.id` / `data.name` / `data.cid` exists → delegates to `this.container.remove()`. + * + * **Note:** The Engine layer short-circuits this method when `data.opts.services` is an + * empty array, returning immediately without calling the backend. See the class-level + * documentation on empty-services short-circuit. + * + * @param {Object} data - Remove criteria. + * @param {string} [data.id] - A docker-recognizable container id or name. + * @param {Array} [data.compose] - Paths to docker-compose files. + * @param {string} [data.project] - The project/app name. + * @param {Object} [data.opts] - Removal options. + * @param {Array} [data.opts.services] - Services to remove. + * @param {boolean} [data.opts.volumes=true] - Also remove volumes. + * @param {boolean} [data.opts.force=false] - Force removal. + * @param {boolean} [data.opts.purge=false] - Full teardown (implies volumes + force). + * @returns {Promise} + */ + async destroy(data) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'destroy'); + } + + /** + * Check whether a specific service/container exists. + * + * **Dual-path dispatch:** + * - If `data.compose` exists → uses compose backend `getId` to check for container IDs. + * - If `data.id` / `data.name` / `data.cid` exists → checks against `this.container.list()`. + * + * @param {Object} data - Search criteria. + * @param {string} [data.id] - A docker-recognizable container id or name. + * @param {Array} [data.compose] - Paths to docker-compose files. + * @param {string} [data.project] - The project/app name. + * @param {Object} [data.opts] - Options. + * @param {Array} [data.opts.services] - Services to check. + * @returns {Promise} Whether the service/container exists. + */ + async exists(data) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'exists'); + } + + /** + * Get version compatibility information for the engine and related tooling. + * + * Retrieves version strings from `this.daemon.getVersions()` and compares them against + * the supported version ranges from configuration. Returns an array of compatibility + * info objects. + * + * This is Engine-level logic that does semver comparison. The backend provides the raw + * version data via `DaemonBackend.getVersions()`. + * + * @param {Object} [supportedVersions] - Version compatibility configuration keyed by + * component name (e.g. `{compose: {min, max, ...}, engine: {min, max, ...}}`). + * @returns {Promise>} An array of compatibility info objects, each containing + * `{name, version, satisfied, wants, link, ...}`. + */ + async getCompatibility(supportedVersions) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'getCompatibility'); + } + + /** + * Get a network handle by its id or name. + * + * Delegates to `this.container.getNetwork(id)`. Returns a proxy/handle object, + * not network data. See ContainerBackend.getNetwork() for details. + * + * @param {string} id - The network id or name. + * @returns {Object} A network handle object. + */ + getNetwork(id) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'getNetwork'); + } + + /** + * List networks matching the given filter options. + * + * Delegates to `this.container.listNetworks(opts)`. + * + * @param {Object} [opts] - Filter options. + * @returns {Promise>} An array of network objects. + */ + async getNetworks(opts) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'getNetworks'); + } + + /** + * Determine whether a container is currently running. + * + * Delegates to `this.container.isRunning(data)` where `data` is a container id string. + * + * @param {string} data - A docker-recognizable container id or name. + * @returns {Promise} `true` if running, `false` otherwise. + */ + async isRunning(data) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'isRunning'); + } + + /** + * List all Lando-managed containers, optionally filtered. + * + * Delegates to `this.container.list(options, separator)`. + * + * @param {Object} [options={}] - Filter options. + * @param {boolean} [options.all=false] - Include stopped containers. + * @param {string} [options.app] - Filter by app name. + * @param {Array} [options.filter] - Additional key=value filters. + * @param {string} [separator='_'] - Container name separator (config-driven). + * @returns {Promise>} An array of Lando container descriptor objects. + */ + async list(options, separator) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'list'); + } + + /** + * Get log output from containers in a compose project. + * + * Dispatches through the compose backend only (no ID-based path for logs). + * + * @param {Object} data - A compose object. + * @param {Array} data.compose - Paths to docker-compose files. + * @param {string} data.project - The project/app name. + * @param {Object} [data.opts] - Logging options. + * @param {boolean} [data.opts.follow=false] - Tail the logs. + * @param {boolean} [data.opts.timestamps=true] - Include timestamps. + * @returns {Promise} + */ + async logs(data) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'logs'); + } + + /** + * Execute a command on a running container. + * + * This is the most complex Engine operation. The **full orchestration lifecycle** is managed + * by the Engine/router layer (see `router.run()` in `lib/router.js:75-106`), **not** by the + * backend. The backend provides the primitives; the Engine orchestrates them. + * + * ### Full `run()` Lifecycle (owned by Engine/router): + * + * 1. **Merge CLI env vars** — `opts.environment` is merged with CLI-injected env vars + * via `get-cli-env()`. + * 2. **Escape string commands** — If `data.cmd` is a string, it is shell-escaped into + * an array. + * 3. **Check if container is running** — Calls `container.isRunning(containerId)`. + * 4. **Start if needed** — If the container is NOT running, calls `start()` first (using + * compose backend). The `started` flag tracks whether the container was already running. + * 5. **Execute the command** — Calls `compose('run', ...)` which maps to `ComposeBackend.run()` + * (i.e. `docker-compose exec` semantics). + * 6. **Conditionally stop** — After execution, if the container was NOT originally running + * (or if `opts.last` is true), the container is stopped. + * 7. **Conditionally remove** — If the container was NOT originally running AND + * `opts.autoRemove` is true, the container is destroyed. + * + * ### Build Step Flags (`prestart` / `last`): + * + * During `lando rebuild`, multiple `run()` calls happen sequentially for build steps. + * - `opts.prestart = true` — This is a build step, not a user command. + * - `opts.last = true` — This is the final build step. + * + * When `prestart` is true and `last` is false, the container is kept running between + * build steps to avoid stop/start churn. On the last build step (`last: true`), all + * containers are stopped (services filter is cleared) to ensure a clean state. + * + * ### Backend's Role: + * + * The backend only provides the primitives: `container.isRunning()`, `compose.start()`, + * `compose.run()`, `compose.stop()`, and `compose.remove()`. The orchestration logic + * (steps 1-7 above) stays in the Engine/router layer. + * + * @param {Object} data - A run object. + * @param {string} data.id - The container id or name to run the command on. + * @param {string|Array} data.cmd - The command to execute. + * @param {Object} [data.opts] - Run options. + * @param {string} [data.opts.mode='collect'] - `'collect'` or `'attach'`. + * @param {Array} [data.opts.env=[]] - Additional env vars (`KEY=VALUE`). + * @param {string} [data.opts.user='root'] - User to run as. + * @param {boolean} [data.opts.detach=false] - Run in background. + * @param {boolean} [data.opts.autoRemove=false] - Remove container after run. + * @param {boolean} [data.opts.prestart=false] - Whether this is a build step. + * @param {boolean} [data.opts.last=false] - Whether this is the final build step. + * @returns {Promise} + */ + async run(data) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'run'); + } + + /** + * Inspect a container and return comprehensive metadata. + * + * **Dual-path dispatch:** + * - If `data.compose` exists → uses compose backend `getId` to resolve the container ID, + * then calls `this.container.scan()` with the resolved ID. + * - If `data.id` / `data.name` / `data.cid` exists → calls `this.container.scan()` directly. + * + * @param {Object} data - Search criteria. + * @param {string} [data.id] - A docker-recognizable container id or name. + * @param {Array} [data.compose] - Paths to docker-compose files. + * @param {string} [data.project] - The project/app name. + * @param {Object} [data.opts] - Options. + * @param {Array} [data.opts.services] - Services to scan. + * @returns {Promise} Container metadata (inspect data). + */ + async scan(data) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'scan'); + } + + /** + * Start containers for a compose object. + * + * Dispatches through the compose backend (`docker-compose up`). + * + * **Note:** The Engine layer short-circuits this method when `data.opts.services` is an + * empty array, returning immediately without calling the backend. See the class-level + * documentation on empty-services short-circuit. + * + * @param {Object} data - A compose object. + * @param {Array} data.compose - Paths to docker-compose files. + * @param {string} data.project - The project/app name. + * @param {Object} [data.opts] - Start options. + * @param {Array} [data.opts.services] - Services to start (default: all). + * @param {boolean} [data.opts.background=true] - Run in detached mode. + * @param {boolean} [data.opts.recreate=false] - Force-recreate containers. + * @param {boolean} [data.opts.removeOrphans=true] - Remove orphaned containers. + * @returns {Promise} + */ + async start(data) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'start'); + } + + /** + * Stop containers for a compose object or a specific container by ID. + * + * **Dual-path dispatch:** + * - If `data.compose` exists → delegates to compose backend (`stop` or `kill` command, + * depending on `data.kill` flag). + * - If `data.id` / `data.name` / `data.cid` exists → delegates to `this.container.stop()`. + * + * **Note:** The Engine layer short-circuits this method when `data.opts.services` is an + * empty array, returning immediately without calling the backend. See the class-level + * documentation on empty-services short-circuit. + * + * @param {Object} data - Stop criteria. + * @param {string} [data.id] - A docker-recognizable container id or name. + * @param {Array} [data.compose] - Paths to docker-compose files. + * @param {string} [data.project] - The project/app name. + * @param {Object} [data.opts] - Stop options. + * @param {Array} [data.opts.services] - Services to stop (default: all). + * @returns {Promise} + */ + async stop(data) { // eslint-disable-line no-unused-vars + throw notImplemented('EngineBackend', 'stop'); + } +} + +module.exports = {DaemonBackend, ContainerBackend, ComposeBackend, EngineBackend}; diff --git a/lib/backends/index.js b/lib/backends/index.js new file mode 100644 index 000000000..e8fed97ab --- /dev/null +++ b/lib/backends/index.js @@ -0,0 +1,23 @@ +'use strict'; + +/** + * @module backends + * @description Pluggable engine backend interfaces for Lando. + * + * Exports the base classes that define the contracts for any container engine backend + * (Docker, containerd/nerdctl, etc.). Concrete implementations should extend these + * classes and override every method. + * + * @example + * const {EngineBackend, DaemonBackend, ContainerBackend, ComposeBackend} = require('./backends'); + * + * class MyDaemon extends DaemonBackend { + * async up(retry, password) { ... } + * async down() { ... } + * async isUp(cache, docker) { ... } + * async getVersions() { ... } + * } + * + * @since 4.0.0 + */ +module.exports = require('./engine-backend'); diff --git a/lib/compose.js b/lib/compose.js index 34cc67c18..ffeb5589c 100644 --- a/lib/compose.js +++ b/lib/compose.js @@ -12,6 +12,7 @@ const composeFlags = { noCache: '--no-cache', noRecreate: '--no-recreate', noDeps: '--no-deps', + noStart: '--no-start', noTTY: '-T', pull: '--pull', q: '--quiet', diff --git a/lib/engine.js b/lib/engine.js index b86c919b4..32ccfcc2e 100644 --- a/lib/engine.js +++ b/lib/engine.js @@ -23,9 +23,19 @@ module.exports = class Engine { data, run, ); + // engine backend indicator + this.engineBackend = config.engine || 'auto'; + // Determine install status - this.composeInstalled = fs.existsSync(config.orchestratorBin); - this.dockerInstalled = this.daemon.docker !== false; + // When engine is containerd, dockerInstalled reflects containerd availability + if (this.engineBackend === 'containerd') { + this.composeInstalled = fs.existsSync(config.orchestratorBin); + this.dockerInstalled = this.daemon.containerd !== false + && fs.existsSync(this.daemon.containerd); + } else { + this.composeInstalled = fs.existsSync(config.orchestratorBin); + this.dockerInstalled = this.daemon.docker !== false; + } // set the compose separator this.separator = _.get(config, 'orchestratorSeparator', '_'); @@ -33,6 +43,9 @@ module.exports = class Engine { // Grab the supported ranges for our things this.supportedVersions = config.dockerSupportedVersions; + // Supported version ranges for containerd backend + this.supportedContainerdVersions = config.supportedContainerdVersions; + // platform this.platform = process.landoPlatform ?? process.platform; } @@ -181,12 +194,12 @@ module.exports = class Engine { getCompatibility(supportedVersions = this.supportedVersions) { const semver = require('semver'); - // normalize supported versions stuff - supportedVersions = _(supportedVersions) + // helper to normalize a supported versions object into comparison-ready format + const normalize = sv => _(sv) .map((data, name) => _.merge({}, data, {name})) .map(data => ([data.name, { satisfies: data.satisfies || `${data.min} - ${data.max}`, - link: data.link[this.platform], + link: _.isObject(data.link) && !_.isArray(data.link) ? (data.link[this.platform] || data.link) : data.link, tested: data.tested || 'x.x.x', recommendUpdate: data.recommendUpdate || 'x.x.x', }])) @@ -194,18 +207,39 @@ module.exports = class Engine { .value(); return this.daemon.getVersions().then(versions => { - // Remove the things we don't need depending on platform - // @TODO: Should daemon.getVersions just do this automatically? - if (this.platform === 'linux') delete versions.desktop; - else delete versions.engine; + // Detect containerd backend: versions have containerd key instead of desktop/engine + const isContainerd = Object.prototype.hasOwnProperty.call(versions, 'containerd'); + + let normalizedVersions; + if (isContainerd) { + // containerd format: {containerd, buildkit, nerdctl} + normalizedVersions = normalize(this.supportedContainerdVersions); - // handle skip - if (versions?.engine === 'skip') delete versions.engine; - if (versions?.desktop === 'skip') delete versions.desktop; + // Remove false values (binaries that couldn't be versioned) + Object.keys(versions).forEach(key => { + if (versions[key] === false || versions[key] === 'skip') { + delete versions[key]; + } + }); + } else { + // Docker format: {desktop, engine, compose} + normalizedVersions = normalize(supportedVersions); + + // Remove the things we don't need depending on platform + // @TODO: Should daemon.getVersions just do this automatically? + if (this.platform === 'linux') delete versions.desktop; + else delete versions.engine; + + // handle skip + if (versions?.engine === 'skip') delete versions.engine; + if (versions?.desktop === 'skip') delete versions.desktop; + } // do the version comparison return _(versions).map((version, name) => { - const reqs = supportedVersions[name]; + const reqs = normalizedVersions[name]; + // skip versions we don't have supported ranges for + if (!reqs) return null; return { name, link: reqs.link, @@ -215,10 +249,11 @@ module.exports = class Engine { wants: reqs.satisfies, tested: reqs.tested, update: reqs.recommendUpdate, - dockerVersion: true, + dockerVersion: !isContainerd, version: semver.clean(version), }; }) + .compact() .value(); }); } @@ -496,4 +531,3 @@ module.exports = class Engine { return this.engineCmd('stop', data); } }; - diff --git a/lib/lando.js b/lib/lando.js index 496dadd61..f5a8830dd 100644 --- a/lib/lando.js +++ b/lib/lando.js @@ -5,6 +5,8 @@ const fs = require('fs'); const glob = require('glob'); const path = require('path'); +const {getContainerdAuthConfig} = require('../utils/setup-containerd-auth'); + // Bootstrap levels const BOOTSTRAP_LEVELS = { config: 1, @@ -105,14 +107,43 @@ const bootstrapEngine = lando => { const Shell = require('./shell'); lando.shell = new Shell(lando.log); lando.scanUrls = require('../utils/legacy-scan')(lando.log); - lando.engine = require('../utils/setup-engine')( - lando.config, - lando.cache, - lando.events, - lando.log, - lando.shell, - lando.config.instance, - ); + + // Use BackendManager to create the engine (replaces setup-engine.js) + // lando.engine = require('../utils/setup-engine')( + // lando.config, lando.cache, lando.events, lando.log, lando.shell, lando.config.instance, + // ); + const BackendManager = require('./backend-manager'); + const backendManager = new BackendManager(lando.config, lando.cache, lando.events, lando.log, lando.shell); + lando.engine = backendManager.createEngine(lando.config.instance); + lando.backendManager = backendManager; + + // When using containerd, update global orchestratorBin to docker-compose + // (used via finch-daemon) so lando-reset-orchestrator.js doesn't replace + // the engine with Docker's setup-engine. + if (lando.engine.engineBackend === 'containerd') { + const composeVersion = lando.config.orchestratorVersion || '2.31.0'; + const composeBin = path.join(lando.config.userConfRoot, 'bin', `docker-compose-v${composeVersion}`); + if (fs.existsSync(composeBin)) { + lando.config.orchestratorBin = composeBin; + } + + const finchSocket = lando.engine.daemon.finchDaemon.getSocketPath(); + lando.config.dockerBin = lando.engine.daemon.nerdctlBin; + lando.config.engineConfig = { + ...lando.config.engineConfig, + authConfig: getContainerdAuthConfig({configPath: lando.config.registryAuth}), + buildkitHost: `unix://${lando.engine.daemon.buildkitSocket}`, + containerdMode: true, + containerdNamespace: 'default', + containerdSocket: lando.engine.daemon.socketPath, + engine: 'containerd', + nerdctlConfig: path.join(lando.config.userConfRoot, 'config', 'nerdctl.toml'), + socketPath: finchSocket, + }; + } + + lando.log.info('engine backend: %s', lando.config.engine || 'auto'); + lando.utils = _.merge({}, require('./utils'), require('./config')); // if we have not wiped the scripts dir to accomodate https://github.com/docker/for-mac/issues/6614#issuecomment-1382224436 @@ -659,31 +690,41 @@ module.exports = class Lando { // pre setup event to mutate the setup tasks await this.events.emit('pre-setup', options); - const results = await Promise.all(options.tasks.map(async task => { - // break it up - const {id, canRun, comments, description, hasRun, requiresRestart, version} = require('../utils/parse-setup-task')(task); // eslint-disable-line max-len - // lets start optimistically - const status = {version, description, id, state: 'INSTALLED'}; - // and slowly spiral down - // @TODO: woiuld be great if hasRun could also return a "comment" eg - // "installed but slightly above desired range" - if (await hasRun() === false) { - try { - await canRun(); - status.state = 'NOT INSTALLED'; - if (comments['NOT INSTALLED']) status.comment = comments['NOT INSTALLED']; - } catch (error) { - status.state = 'CANNOT INSTALL'; - status.comment = error.message; + const results = await Promise.all(options.tasks + .filter(task => task.hidden !== true) + .map(async task => { + // extract status fields with defaults — intentionally NOT calling parse-setup-task here + // because that mutates/wraps the task object and setup() needs to do that exactly once + const slugify = require('slugify'); + const id = task.id ?? slugify(task.title); + const canRun = task.canRun ?? (async () => true); + const comments = task.comments ?? {}; + const description = task.description ?? task.title; + const hasRun = task.hasRun ?? (async () => false); + const requiresRestart = task.requiresRestart ?? false; + const version = task.version; + // lets start optimistically + const status = {version, description, id, state: 'INSTALLED'}; + // and slowly spiral down + // @TODO: woiuld be great if hasRun could also return a "comment" eg + // "installed but slightly above desired range" + if (await hasRun() === false) { + try { + await canRun(); + status.state = 'NOT INSTALLED'; + if (comments['NOT INSTALLED']) status.comment = comments['NOT INSTALLED']; + } catch (error) { + status.state = 'CANNOT INSTALL'; + status.comment = error.message; + } } - } - // if requires restart is a function then run it to reset teh task - if (typeof requiresRestart === 'function') status.restart = await requiresRestart({}, task); - else status.restart = requiresRestart; + // if requires restart is a function then run it to reset teh task + if (typeof requiresRestart === 'function') status.restart = await requiresRestart({}, task); + else status.restart = requiresRestart; - return status; - })); + return status; + })); // pre setup event to mutate the setup tasks await this.events.emit('post-setup', results); diff --git a/lib/shell.js b/lib/shell.js index 9dccabfff..79f99b9e7 100644 --- a/lib/shell.js +++ b/lib/shell.js @@ -119,7 +119,7 @@ module.exports = class Shell { * console.log(results); * }); */ - sh(cmd, {mode = 'exec', detached = false, cwd = process.cwd(), cstdio = 'inherit', silent = false} = {}) { + sh(cmd, {mode = 'exec', detached = false, cwd = process.cwd(), cstdio = 'inherit', silent = false, env = process.env} = {}) { // Log more because this shit important! const id = _.uniqueId('pid'); // Basically just remove the options so things are readable in debug mode @@ -134,13 +134,13 @@ module.exports = class Shell { // Add a record of this process while its running // @NOTE: sadly we can't really do much here in terms of manipulating the process this.running.push(addCommand({cmd, id, mode})); - return exec(cmd, _.merge({}, {silent: true}, {cwd, detached, mode})); + return exec(cmd, _.merge({}, {silent: true}, {cwd, detached, mode, env})); } // Determine stdio const stdio = (process.lando === 'node') ? {stdio: cstdio} : {stdio: ['ignore', 'pipe', 'pipe']}; // Get the run spawn so we can add it - const run = child.spawn(_.first(cmd), _.tail(cmd), _.merge({}, {detached, cwd}, stdio)); + const run = child.spawn(_.first(cmd), _.tail(cmd), _.merge({}, {detached, cwd, env}, stdio)); // Add a record of this process while its running this.running.push(addCommand({cmd, id, mode, process: run})); return spawn(run, stdio, silent, this); diff --git a/messages/buildkitd-not-running.js b/messages/buildkitd-not-running.js new file mode 100644 index 000000000..720579abe --- /dev/null +++ b/messages/buildkitd-not-running.js @@ -0,0 +1,13 @@ +'use strict'; + +module.exports = () => ({ + title: 'BuildKit daemon is not running', + type: 'warning', + detail: [ + 'The BuildKit daemon (buildkitd) is not running.', + 'BuildKit is required for building container images with containerd.', + 'Try running "lando setup" to restart it,', + 'or check ~/.lando/logs/buildkitd.log for errors.', + ], + url: 'https://docs.lando.dev/troubleshooting/containerd.html#buildkit-daemon-is-not-running', +}); diff --git a/messages/compose-failed-containerd.js b/messages/compose-failed-containerd.js new file mode 100644 index 000000000..a0279f896 --- /dev/null +++ b/messages/compose-failed-containerd.js @@ -0,0 +1,14 @@ +'use strict'; + +module.exports = message => ({ + title: 'docker-compose failed (containerd backend)', + type: 'warning', + detail: [ + `${message}`, + 'The containerd engine backend uses docker-compose with finch-daemon', + 'as the Docker API compatibility layer.', + 'Check that all services in your Landofile are compatible', + 'with the containerd backend.', + ], + url: 'https://docs.lando.dev/troubleshooting/containerd.html#docker-compose-failed', +}); diff --git a/messages/containerd-binaries-not-found.js b/messages/containerd-binaries-not-found.js new file mode 100644 index 000000000..6b96bf0fd --- /dev/null +++ b/messages/containerd-binaries-not-found.js @@ -0,0 +1,14 @@ +'use strict'; + +module.exports = () => ({ + title: 'containerd backend binaries not found', + type: 'error', + detail: [ + 'One or more required binaries for the containerd engine backend', + 'were not found at the expected path.', + 'The containerd backend requires containerd, buildkitd, finch-daemon,', + 'and docker-compose to be installed.', + 'Run "lando setup" to install them.', + ], + url: 'https://docs.lando.dev/troubleshooting/containerd.html#binaries-not-found', +}); diff --git a/messages/containerd-not-running.js b/messages/containerd-not-running.js new file mode 100644 index 000000000..23d81dc2e --- /dev/null +++ b/messages/containerd-not-running.js @@ -0,0 +1,13 @@ +'use strict'; + +module.exports = () => ({ + title: 'containerd is not running', + type: 'warning', + detail: [ + 'The containerd daemon does not appear to be running.', + 'Try running "lando setup" to install and start containerd,', + 'or start it manually if already installed.', + 'Check ~/.lando/logs/containerd.log for details.', + ], + url: 'https://docs.lando.dev/troubleshooting/containerd.html#containerd-is-not-running', +}); diff --git a/messages/containerd-permission-denied.js b/messages/containerd-permission-denied.js new file mode 100644 index 000000000..0a0667464 --- /dev/null +++ b/messages/containerd-permission-denied.js @@ -0,0 +1,13 @@ +'use strict'; + +module.exports = () => ({ + title: 'containerd requires elevated permissions', + type: 'error', + detail: [ + 'containerd requires elevated permissions to run.', + 'On Linux, add your user to the appropriate group', + 'or run with sudo.', + 'Check ~/.lando/logs/containerd.log for permission errors.', + ], + url: 'https://docs.lando.dev/troubleshooting/containerd.html#permission-denied', +}); diff --git a/messages/containerd-socket-conflict.js b/messages/containerd-socket-conflict.js new file mode 100644 index 000000000..b0d43e63a --- /dev/null +++ b/messages/containerd-socket-conflict.js @@ -0,0 +1,14 @@ +'use strict'; + +module.exports = () => ({ + title: 'containerd socket conflict detected', + type: 'warning', + detail: [ + 'Another containerd instance may be using the socket.', + 'Lando uses its own isolated containerd instance at', + '/run/lando/containerd.sock to avoid conflicts.', + 'If problems persist, stop any other containerd instances', + 'or check for stale socket files.', + ], + url: 'https://docs.lando.dev/troubleshooting/containerd.html#socket-conflict', +}); diff --git a/messages/finch-daemon-not-running.js b/messages/finch-daemon-not-running.js new file mode 100644 index 000000000..ff7a6b70a --- /dev/null +++ b/messages/finch-daemon-not-running.js @@ -0,0 +1,13 @@ +'use strict'; + +module.exports = () => ({ + title: 'finch-daemon is not running', + type: 'warning', + detail: [ + 'The finch-daemon (Docker API compatibility layer) is not running.', + 'finch-daemon provides a Docker-compatible socket for tools like Traefik.', + 'Try running "lando setup" or restarting Lando.', + 'Check ~/.lando/logs/finch-daemon.log for errors.', + ], + url: 'https://docs.lando.dev/troubleshooting/containerd.html#finch-daemon-is-not-running', +}); diff --git a/messages/lima-not-installed.js b/messages/lima-not-installed.js new file mode 100644 index 000000000..4517dd7d8 --- /dev/null +++ b/messages/lima-not-installed.js @@ -0,0 +1,13 @@ +'use strict'; + +module.exports = () => ({ + title: 'Lima is required for containerd on macOS', + type: 'error', + detail: [ + 'Lima is required to run containerd on macOS.', + 'The containerd engine runs inside a Lima virtual machine on macOS', + 'because containerd requires a Linux kernel.', + 'Run "lando setup" to install Lima and create the Lando VM.', + ], + url: 'https://lima-vm.io', +}); diff --git a/messages/lima-vm-not-running.js b/messages/lima-vm-not-running.js new file mode 100644 index 000000000..0f7803bd7 --- /dev/null +++ b/messages/lima-vm-not-running.js @@ -0,0 +1,13 @@ +'use strict'; + +module.exports = () => ({ + title: 'Lando Lima VM is not running', + type: 'warning', + detail: [ + 'The Lando Lima VM is stopped or not yet created.', + 'Lando will attempt to start it automatically.', + 'If this persists, try: limactl start lando', + 'Or run "lando setup" to recreate the VM.', + ], + url: 'https://lima-vm.io', +}); diff --git a/messages/update-containerd-warning.js b/messages/update-containerd-warning.js new file mode 100644 index 000000000..948179e59 --- /dev/null +++ b/messages/update-containerd-warning.js @@ -0,0 +1,23 @@ +'use strict'; + +/** + * Warning message recommending an update for a containerd backend component. + * + * @param {Object} [opts={}] - Component version info. + * @param {string} [opts.name] - Component name (e.g. "containerd", "buildkitd"). + * @param {string} [opts.version] - Currently installed version. + * @param {string} [opts.update] - Recommended version to update to. + * @param {string} [opts.link] - URL for release / update instructions. + * @returns {{type: string, title: string, detail: string[], command: string, url: string}} + */ +module.exports = ({name, version, update, link} = {}) => ({ + type: 'warning', + title: `Recommend updating ${name || 'containerd component'}`, + detail: [ + `You have version ${version || 'unknown'} but we recommend updating to ${update || 'the latest version'}.`, + 'In order to ensure the best stability and support we recommend you update', + 'by running the hidden "lando setup" command.', + ], + command: 'lando setup --skip-common-plugins', + url: link, +}); diff --git a/scripts/benchmark-engines.sh b/scripts/benchmark-engines.sh new file mode 100755 index 000000000..312807ad9 --- /dev/null +++ b/scripts/benchmark-engines.sh @@ -0,0 +1,208 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Lando Engine Benchmark Script +# Compares Docker vs containerd performance for common operations. +# +# Usage: +# ./scripts/benchmark-engines.sh --engine docker --runs 3 +# ./scripts/benchmark-engines.sh --engine containerd --runs 5 +# ./scripts/benchmark-engines.sh --engine both +# +# Operations benchmarked: +# 1. Image pull (alpine:latest) +# 2. Container run (echo hello) +# 3. Container list (ps) +# +# Results are written to a markdown file in /tmp. + +# --------------------------------------------------------------------------- +# Defaults +# --------------------------------------------------------------------------- +ENGINE="both" +RUNS=3 +RESULTS_FILE="/tmp/lando-benchmark-$(date +%s).md" +LANDO_DIR="${LANDO_DIR:-$HOME/.lando}" + +# --------------------------------------------------------------------------- +# Parse flags +# --------------------------------------------------------------------------- +while [[ $# -gt 0 ]]; do + case "$1" in + --engine) + ENGINE="$2" + shift 2 + ;; + --runs) + RUNS="$2" + shift 2 + ;; + --output) + RESULTS_FILE="$2" + shift 2 + ;; + -h|--help) + echo "Usage: $0 [--engine docker|containerd|both] [--runs N] [--output FILE]" + exit 0 + ;; + *) + echo "Unknown flag: $1" >&2 + exit 1 + ;; + esac +done + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- +DOCKER_BIN="${DOCKER_BIN:-docker}" +FINCH_SOCK="${FINCH_SOCK:-/run/lando/finch.sock}" +IMAGE="alpine:latest" + +# Time a command in milliseconds using bash built-in SECONDS or date +# Returns milliseconds to stdout +time_ms() { + local start end + start=$(date +%s%N 2>/dev/null || echo 0) + "$@" >/dev/null 2>&1 + end=$(date +%s%N 2>/dev/null || echo 0) + echo $(( (end - start) / 1000000 )) +} + +# Calculate mean of space-separated numbers +calc_mean() { + local nums=("$@") + local sum=0 + for n in "${nums[@]}"; do + sum=$((sum + n)) + done + echo $((sum / ${#nums[@]})) +} + +# Calculate median of space-separated numbers +calc_median() { + local sorted + sorted=($(printf '%s\n' "$@" | sort -n)) + local count=${#sorted[@]} + local mid=$((count / 2)) + if (( count % 2 == 0 )); then + echo $(( (sorted[mid - 1] + sorted[mid]) / 2 )) + else + echo "${sorted[$mid]}" + fi +} + +# --------------------------------------------------------------------------- +# Benchmark a single engine +# --------------------------------------------------------------------------- +benchmark_engine() { + local engine_name="$1" + local cli_cmd="$2" + local cli_args=("${@:3}") + + echo "## ${engine_name}" >> "$RESULTS_FILE" + echo "" >> "$RESULTS_FILE" + + local pull_times=() + local run_times=() + local ps_times=() + + for i in $(seq 1 "$RUNS"); do + echo " Run ${i}/${RUNS} for ${engine_name}..." + + # Clean up image before pull test to ensure a fresh pull + "$cli_cmd" "${cli_args[@]}" rmi "$IMAGE" >/dev/null 2>&1 || true + + # 1. Image pull + local t + t=$(time_ms "$cli_cmd" "${cli_args[@]}" pull "$IMAGE") + pull_times+=("$t") + + # 2. Container run + t=$(time_ms "$cli_cmd" "${cli_args[@]}" run --rm "$IMAGE" echo hello) + run_times+=("$t") + + # 3. Container list + t=$(time_ms "$cli_cmd" "${cli_args[@]}" ps) + ps_times+=("$t") + done + + # Calculate stats + local pull_mean pull_median run_mean run_median ps_mean ps_median + pull_mean=$(calc_mean "${pull_times[@]}") + pull_median=$(calc_median "${pull_times[@]}") + run_mean=$(calc_mean "${run_times[@]}") + run_median=$(calc_median "${run_times[@]}") + ps_mean=$(calc_mean "${ps_times[@]}") + ps_median=$(calc_median "${ps_times[@]}") + + # Write results table + cat >> "$RESULTS_FILE" < "$RESULTS_FILE" </dev/null 2>&1; then + echo "Benchmarking Docker..." + benchmark_engine "Docker" "$DOCKER_BIN" + else + echo "WARNING: docker not found, skipping Docker benchmark." >&2 + echo "## Docker" >> "$RESULTS_FILE" + echo "" >> "$RESULTS_FILE" + echo "_Skipped: \`docker\` binary not found._" >> "$RESULTS_FILE" + echo "" >> "$RESULTS_FILE" + fi +fi + +# containerd (via finch-daemon Docker API) benchmark +if [[ "$ENGINE" == "containerd" || "$ENGINE" == "both" ]]; then + if [[ -S "$FINCH_SOCK" ]]; then + echo "Benchmarking containerd (docker cli via finch-daemon)..." + # Use docker CLI pointed at finch-daemon — per BRIEF, never shell out to nerdctl + export DOCKER_HOST="unix://${FINCH_SOCK}" + benchmark_engine "containerd (finch-daemon)" "$DOCKER_BIN" + unset DOCKER_HOST + else + echo "WARNING: finch-daemon socket not found at ${FINCH_SOCK}, skipping containerd benchmark." >&2 + echo "## containerd (finch-daemon)" >> "$RESULTS_FILE" + echo "" >> "$RESULTS_FILE" + echo "_Skipped: finch-daemon socket not found at \`${FINCH_SOCK}\`._" >> "$RESULTS_FILE" + echo "" >> "$RESULTS_FILE" + fi +fi + +# Clean up test image from both engines +"$DOCKER_BIN" rmi "$IMAGE" >/dev/null 2>&1 || true +DOCKER_HOST="unix://${FINCH_SOCK}" "$DOCKER_BIN" rmi "$IMAGE" >/dev/null 2>&1 || true + +echo "" +echo "Done! Results written to: ${RESULTS_FILE}" +echo "" +cat "$RESULTS_FILE" diff --git a/scripts/test-compose.yml b/scripts/test-compose.yml new file mode 100644 index 000000000..810aa5052 --- /dev/null +++ b/scripts/test-compose.yml @@ -0,0 +1,5 @@ +services: + web: + image: nginx:alpine + ports: + - "8099:80" diff --git a/scripts/test-containerd-engine.sh b/scripts/test-containerd-engine.sh new file mode 100755 index 000000000..04584555c --- /dev/null +++ b/scripts/test-containerd-engine.sh @@ -0,0 +1,442 @@ +#!/bin/bash +# +# test-containerd-engine.sh +# +# Standalone smoke test for the containerd engine path. +# Exercises the PRODUCTION compose path: docker-compose + finch-daemon + containerd. +# +# This matches how Lando actually runs containers: +# docker-compose ---> finch-daemon (Docker API) ---> containerd + buildkitd +# +# Usage: +# bash scripts/test-containerd-engine.sh +# +# Requirements: +# - containerd, buildkitd, finch-daemon, docker-compose binaries installed +# - Run as root (or with sudo) since containerd requires root privileges +# +set -euo pipefail + +# --------------------------------------------------------------------------- +# Colors & helpers +# --------------------------------------------------------------------------- +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +BOLD='\033[1m' +RESET='\033[0m' + +step_num=0 + +step() { + step_num=$((step_num + 1)) + printf "\n${CYAN}${BOLD}[Step %d]${RESET} %s\n" "$step_num" "$1" +} + +ok() { + printf " ${GREEN}✔ %s${RESET}\n" "$1" +} + +fail() { + printf " ${RED}✘ %s${RESET}\n" "$1" +} + +info() { + printf " ${YELLOW}→ %s${RESET}\n" "$1" +} + +# --------------------------------------------------------------------------- +# Paths & state +# --------------------------------------------------------------------------- +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +COMPOSE_FILE="${SCRIPT_DIR}/test-compose.yml" +TMPDIR_BASE="$(mktemp -d /tmp/lando-containerd-test.XXXXXX)" + +CONTAINERD_SOCKET="${TMPDIR_BASE}/containerd.sock" +CONTAINERD_ROOT="${TMPDIR_BASE}/containerd-root" +CONTAINERD_STATE="${TMPDIR_BASE}/containerd-state" +CONTAINERD_CONFIG="${TMPDIR_BASE}/containerd-config.toml" +CONTAINERD_PID="" + +BUILDKITD_SOCKET="${TMPDIR_BASE}/buildkitd.sock" +BUILDKITD_PID="" + +FINCH_SOCKET="${TMPDIR_BASE}/finch.sock" +FINCH_CONFIG="${TMPDIR_BASE}/finch-daemon.toml" +FINCH_PID="" +CNI_NETCONF_PATH="${TMPDIR_BASE}/cni-conflist" + +CONTAINERD_LOG="${TMPDIR_BASE}/containerd.log" +BUILDKITD_LOG="${TMPDIR_BASE}/buildkitd.log" +FINCH_LOG="${TMPDIR_BASE}/finch-daemon.log" + +# The project name docker-compose will use +COMPOSE_PROJECT="lando-containerd-test" + +# Track whether we need cleanup +CLEANUP_NEEDED=0 + +# --------------------------------------------------------------------------- +# Cleanup handler +# --------------------------------------------------------------------------- +cleanup() { + local exit_code=$? + + printf "\n${CYAN}${BOLD}[Cleanup]${RESET} Tearing down test resources...\n" + + # Stop the compose project via docker-compose + finch-daemon (best effort) + if command -v docker-compose &>/dev/null && [ -S "$FINCH_SOCKET" ]; then + info "Stopping docker-compose project..." + DOCKER_HOST="unix://${FINCH_SOCKET}" \ + docker-compose \ + -f "$COMPOSE_FILE" \ + --project-name "$COMPOSE_PROJECT" \ + down --remove-orphans 2>/dev/null || true + fi + + # Stop finch-daemon + if [ -n "$FINCH_PID" ] && kill -0 "$FINCH_PID" 2>/dev/null; then + info "Stopping finch-daemon (PID $FINCH_PID)..." + kill "$FINCH_PID" 2>/dev/null || true + wait "$FINCH_PID" 2>/dev/null || true + ok "finch-daemon stopped" + fi + + # Stop buildkitd + if [ -n "$BUILDKITD_PID" ] && kill -0 "$BUILDKITD_PID" 2>/dev/null; then + info "Stopping buildkitd (PID $BUILDKITD_PID)..." + kill "$BUILDKITD_PID" 2>/dev/null || true + wait "$BUILDKITD_PID" 2>/dev/null || true + ok "buildkitd stopped" + fi + + # Stop containerd + if [ -n "$CONTAINERD_PID" ] && kill -0 "$CONTAINERD_PID" 2>/dev/null; then + info "Stopping containerd (PID $CONTAINERD_PID)..." + kill "$CONTAINERD_PID" 2>/dev/null || true + wait "$CONTAINERD_PID" 2>/dev/null || true + ok "containerd stopped" + fi + + # Remove temp files + if [ -d "$TMPDIR_BASE" ]; then + info "Removing temp directory: ${TMPDIR_BASE}" + rm -rf "$TMPDIR_BASE" 2>/dev/null || true + ok "temp files cleaned up" + fi + + if [ "$exit_code" -eq 0 ]; then + printf "\n${GREEN}${BOLD}All tests passed!${RESET}\n\n" + else + printf "\n${RED}${BOLD}Test failed (exit code: %d)${RESET}\n" "$exit_code" + printf "${YELLOW}Check logs at:${RESET}\n" + printf " containerd: %s\n" "$CONTAINERD_LOG" + printf " buildkitd: %s\n" "$BUILDKITD_LOG" + printf " finch-daemon: %s\n\n" "$FINCH_LOG" + # Don't remove temp dir on failure so logs are preserved + fi +} + +trap cleanup EXIT + +# --------------------------------------------------------------------------- +# Pre-flight: check binaries +# --------------------------------------------------------------------------- +printf "\n${BOLD}Lando Containerd Engine Smoke Test (Production Path)${RESET}\n" +printf "════════════════════════════════════════════════════════\n" +printf " Tests: docker-compose → finch-daemon → containerd\n\n" + +step "Checking required binaries" + +MISSING=0 + +for bin in containerd buildkitd finch-daemon docker-compose; do + if command -v "$bin" &>/dev/null; then + ok "$bin found at $(command -v "$bin")" + else + fail "$bin not found in PATH" + MISSING=1 + fi +done + +# nerdctl is optional (only used by OCI hooks internally, not by this test) +if command -v nerdctl &>/dev/null; then + info "nerdctl found (optional, used by OCI hooks only): $(command -v nerdctl)" +else + info "nerdctl not found (optional — not needed for the production compose path)" +fi + +if [ "$MISSING" -eq 1 ]; then + fail "Missing required binaries — install them and retry." + exit 1 +fi + +# Check for root (containerd usually requires it) +if [ "$(id -u)" -ne 0 ]; then + printf "\n" + fail "This script must be run as root (containerd requires root privileges)." + info "Try: sudo bash $0" + exit 1 +fi + +# Check compose file exists +if [ ! -f "$COMPOSE_FILE" ]; then + fail "Compose file not found: ${COMPOSE_FILE}" + exit 1 +fi +ok "Compose file found: ${COMPOSE_FILE}" + +# --------------------------------------------------------------------------- +# Step 2: Start containerd +# --------------------------------------------------------------------------- +step "Starting fresh containerd instance" + +mkdir -p "$CONTAINERD_ROOT" "$CONTAINERD_STATE" + +# Generate a minimal containerd config pointing to our temp paths +cat > "$CONTAINERD_CONFIG" < "$CONTAINERD_LOG" 2>&1 & +CONTAINERD_PID=$! +CLEANUP_NEEDED=1 + +info "containerd started with PID ${CONTAINERD_PID}" + +# Wait for the socket to become available +info "Waiting for containerd socket..." +for i in $(seq 1 30); do + if [ -S "$CONTAINERD_SOCKET" ]; then + break + fi + sleep 0.5 +done + +if [ ! -S "$CONTAINERD_SOCKET" ]; then + fail "containerd socket did not appear after 15 seconds" + printf " Log output:\n" + tail -20 "$CONTAINERD_LOG" | sed 's/^/ /' + exit 1 +fi + +ok "containerd is ready" + +# --------------------------------------------------------------------------- +# Step 3: Start buildkitd +# --------------------------------------------------------------------------- +step "Starting buildkitd (connected to containerd)" + +buildkitd \ + --addr "unix://${BUILDKITD_SOCKET}" \ + --containerd-worker=true \ + --containerd-worker-addr="${CONTAINERD_SOCKET}" \ + --oci-worker=false \ + --root "${TMPDIR_BASE}/buildkitd-root" \ + > "$BUILDKITD_LOG" 2>&1 & +BUILDKITD_PID=$! + +info "buildkitd started with PID ${BUILDKITD_PID}" + +# Wait for buildkitd socket +info "Waiting for buildkitd socket..." +for i in $(seq 1 30); do + if [ -S "$BUILDKITD_SOCKET" ]; then + break + fi + sleep 0.5 +done + +if [ ! -S "$BUILDKITD_SOCKET" ]; then + fail "buildkitd socket did not appear after 15 seconds" + printf " Log output:\n" + tail -20 "$BUILDKITD_LOG" | sed 's/^/ /' + exit 1 +fi + +ok "buildkitd is ready" + +# --------------------------------------------------------------------------- +# Step 4: Start finch-daemon (Docker API compatibility layer) +# --------------------------------------------------------------------------- +step "Starting finch-daemon (Docker API → containerd bridge)" + +mkdir -p "$CNI_NETCONF_PATH" + +# Generate a minimal finch-daemon config (TOML) pointing at our containerd +cat > "$FINCH_CONFIG" < "$FINCH_LOG" 2>&1 & +FINCH_PID=$! + +info "finch-daemon started with PID ${FINCH_PID}" + +# Wait for finch-daemon socket +info "Waiting for finch-daemon socket..." +for i in $(seq 1 30); do + if [ -S "$FINCH_SOCKET" ]; then + break + fi + sleep 0.5 +done + +if [ ! -S "$FINCH_SOCKET" ]; then + fail "finch-daemon socket did not appear after 15 seconds" + printf " Log output:\n" + tail -20 "$FINCH_LOG" | sed 's/^/ /' + exit 1 +fi + +# Verify finch-daemon responds to Docker API ping +info "Verifying finch-daemon Docker API compatibility..." +if command -v curl &>/dev/null; then + PING_RESPONSE=$(curl -s --unix-socket "$FINCH_SOCKET" http://localhost/_ping 2>/dev/null || echo "") + if [ "$PING_RESPONSE" = "OK" ]; then + ok "finch-daemon Docker API ping: OK" + else + fail "finch-daemon ping returned: '${PING_RESPONSE}' (expected 'OK')" + info "finch-daemon may still be initializing — continuing" + fi +else + info "curl not available — skipping Docker API ping check" +fi + +ok "finch-daemon is ready" + +# --------------------------------------------------------------------------- +# Step 5: Run docker-compose up via DOCKER_HOST (production path) +# --------------------------------------------------------------------------- +step "Running docker-compose up via DOCKER_HOST (nginx:alpine on port 8099)" + +export DOCKER_HOST="unix://${FINCH_SOCKET}" +export BUILDKIT_HOST="unix://${BUILDKITD_SOCKET}" + +info "DOCKER_HOST=${DOCKER_HOST}" +info "BUILDKIT_HOST=${BUILDKIT_HOST}" + +docker-compose \ + -f "$COMPOSE_FILE" \ + --project-name "$COMPOSE_PROJECT" \ + up -d 2>&1 | sed 's/^/ /' + +if [ "${PIPESTATUS[0]}" -ne 0 ]; then + fail "docker-compose up failed" + exit 1 +fi + +ok "docker-compose up succeeded" + +# --------------------------------------------------------------------------- +# Step 6: Verify the container is running +# --------------------------------------------------------------------------- +step "Verifying container is running" + +# Give the container a moment to start +sleep 2 + +# Use docker-compose ps to check container status (via finch-daemon) +info "Checking container status via docker-compose ps..." +COMPOSE_PS_OUTPUT=$(docker-compose \ + -f "$COMPOSE_FILE" \ + --project-name "$COMPOSE_PROJECT" \ + ps 2>/dev/null || echo "") + +if echo "$COMPOSE_PS_OUTPUT" | grep -qi "up\|running"; then + ok "Found running container(s) for project '${COMPOSE_PROJECT}'" + echo "$COMPOSE_PS_OUTPUT" | sed 's/^/ /' +else + # Fallback: check via Docker API on the finch socket + info "Checking container list via Docker API..." + if command -v curl &>/dev/null; then + CONTAINERS=$(curl -s --unix-socket "$FINCH_SOCKET" \ + "http://localhost/containers/json?filters=%7B%22label%22%3A%5B%22com.docker.compose.project%3D${COMPOSE_PROJECT}%22%5D%7D" 2>/dev/null || echo "[]") + echo " ${CONTAINERS}" | sed 's/^/ /' + fi + fail "No running containers found for project '${COMPOSE_PROJECT}'" + exit 1 +fi + +# Try to hit the nginx endpoint +info "Testing HTTP response on port 8099..." +sleep 1 + +if command -v curl &>/dev/null; then + HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:8099 2>/dev/null || echo "000") +elif command -v wget &>/dev/null; then + HTTP_CODE=$(wget -qO /dev/null --server-response http://127.0.0.1:8099 2>&1 | awk '/HTTP/{print $2}' | tail -1 || echo "000") +else + info "Neither curl nor wget available — skipping HTTP check" + HTTP_CODE="skip" +fi + +if [ "$HTTP_CODE" = "200" ]; then + ok "nginx responded with HTTP 200" +elif [ "$HTTP_CODE" = "skip" ]; then + info "HTTP check skipped (no curl/wget)" +else + fail "Expected HTTP 200, got ${HTTP_CODE}" + info "Container may still be starting — this is not necessarily fatal" +fi + +# --------------------------------------------------------------------------- +# Step 7: Stop the compose project via docker-compose +# --------------------------------------------------------------------------- +step "Stopping docker-compose project" + +docker-compose \ + -f "$COMPOSE_FILE" \ + --project-name "$COMPOSE_PROJECT" \ + down --remove-orphans 2>&1 | sed 's/^/ /' + +ok "Compose project stopped" + +# Verify container is gone via docker-compose ps +sleep 1 +REMAINING=$(docker-compose \ + -f "$COMPOSE_FILE" \ + --project-name "$COMPOSE_PROJECT" \ + ps -q 2>/dev/null | wc -l || echo "0") + +if [ "$REMAINING" -eq 0 ]; then + ok "All containers removed" +else + fail "Some containers still running ($REMAINING remaining)" +fi + +# --------------------------------------------------------------------------- +# Step 8: Cleanup is handled by the EXIT trap +# --------------------------------------------------------------------------- +step "Cleanup (handled by exit trap)" +ok "Cleanup will run automatically on exit" + +printf "\n${GREEN}${BOLD}Smoke test completed successfully!${RESET}\n\n" diff --git a/tasks/doctor.js b/tasks/doctor.js new file mode 100644 index 000000000..b1cd4d61e --- /dev/null +++ b/tasks/doctor.js @@ -0,0 +1,59 @@ +'use strict'; + +const {color, figures} = require('listr2'); + +module.exports = lando => ({ + command: 'doctor', + describe: 'Runs environment health checks', + usage: '$0 doctor', + examples: [ + '$0 doctor', + ], + level: 'tasks', + run: async () => { + const ux = lando.cli.getUX(); + const checks = []; + + if (lando.config.engine === 'containerd') { + checks.push(...await require('../hooks/lando-doctor-containerd')(lando)); + } + + if (checks.length === 0) { + console.log('No doctor checks available for the current engine.'); + return; + } + + const rows = checks.map(check => { + let status; + switch (check.status) { + case 'ok': + status = color.green(figures.tick); + break; + case 'warning': + status = color.yellow(figures.warning); + break; + default: + status = color.red(figures.cross); + break; + } + + return { + check: check.title, + status, + message: check.message, + }; + }); + + console.log(''); + ux.table(rows, { + check: {header: 'CHECK'}, + status: {header: 'STATUS'}, + message: {header: 'MESSAGE'}, + }); + console.log(''); + + if (checks.some(check => check.status === 'error')) { + throw new Error('Doctor found one or more errors.'); + } + }, +}); diff --git a/test/allocate-ports.spec.js b/test/allocate-ports.spec.js new file mode 100644 index 000000000..89cbeb329 --- /dev/null +++ b/test/allocate-ports.spec.js @@ -0,0 +1,100 @@ +'use strict'; + +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const {findFreePort, allocatePorts} = require('../utils/allocate-ports'); + +describe('allocate-ports', () => { + describe('#findFreePort', () => { + it('should return a number', async () => { + const port = await findFreePort(); + expect(port).to.be.a('number'); + }); + + it('should return a port > 0', async () => { + const port = await findFreePort(); + expect(port).to.be.greaterThan(0); + }); + + it('should return a port in the valid range', async () => { + const port = await findFreePort(); + expect(port).to.be.at.least(1024); + expect(port).to.be.at.most(65535); + }); + }); + + describe('#allocatePorts', () => { + it('should rewrite "80" to "127.0.0.1:PORT:80"', async () => { + const result = await allocatePorts(['80']); + result.should.have.lengthOf(1); + result[0].should.match(/^127\.0\.0\.1:\d+:80$/); + const hostPort = parseInt(result[0].split(':')[1], 10); + expect(hostPort).to.be.greaterThan(0); + }); + + it('should rewrite "127.0.0.1::80" to "127.0.0.1:PORT:80"', async () => { + const result = await allocatePorts(['127.0.0.1::80']); + result.should.have.lengthOf(1); + result[0].should.match(/^127\.0\.0\.1:\d+:80$/); + }); + + it('should rewrite "::80" to "127.0.0.1:PORT:80"', async () => { + const result = await allocatePorts(['::80']); + result.should.have.lengthOf(1); + result[0].should.match(/^127\.0\.0\.1:\d+:80$/); + }); + + it('should rewrite ":80" to "127.0.0.1:PORT:80"', async () => { + const result = await allocatePorts([':80']); + result.should.have.lengthOf(1); + result[0].should.match(/^127\.0\.0\.1:\d+:80$/); + }); + + it('should pass through "8080:80" unchanged', async () => { + const result = await allocatePorts(['8080:80']); + result.should.deep.equal(['8080:80']); + }); + + it('should pass through "127.0.0.1:8080:80" unchanged', async () => { + const result = await allocatePorts(['127.0.0.1:8080:80']); + result.should.deep.equal(['127.0.0.1:8080:80']); + }); + + it('should handle null gracefully', async () => { + const result = await allocatePorts(null); + expect(result).to.be.null; + }); + + it('should handle undefined gracefully', async () => { + const result = await allocatePorts(undefined); + expect(result).to.be.undefined; + }); + + it('should handle empty array', async () => { + const result = await allocatePorts([]); + result.should.deep.equal([]); + }); + + it('should pass through object port specs', async () => { + const objPort = {target: 80, published: 8080, protocol: 'tcp'}; + const result = await allocatePorts([objPort]); + result.should.deep.equal([objPort]); + }); + + it('should handle mixed port specs', async () => { + const result = await allocatePorts(['80', '8080:80', '127.0.0.1::443']); + result.should.have.lengthOf(3); + result[0].should.match(/^127\.0\.0\.1:\d+:80$/); + result[1].should.equal('8080:80'); + result[2].should.match(/^127\.0\.0\.1:\d+:443$/); + }); + + it('should preserve custom bind host', async () => { + const result = await allocatePorts(['0.0.0.0::80']); + result.should.have.lengthOf(1); + result[0].should.match(/^0\.0\.0\.0:\d+:80$/); + }); + }); +}); diff --git a/test/app-add-2-landonet.spec.js b/test/app-add-2-landonet.spec.js new file mode 100644 index 000000000..9f2d0e7be --- /dev/null +++ b/test/app-add-2-landonet.spec.js @@ -0,0 +1,684 @@ +'use strict'; + +const chai = require('chai'); +const expect = chai.expect; +const sinon = require('sinon'); +const {EventEmitter} = require('events'); +const Promise = require('./../lib/promise'); + +const hook = require('./../hooks/app-add-2-landonet'); + +describe('app-add-2-landonet', () => { + it('should reconnect app containers to landonet with internal aliases', async () => { + const disconnect = sinon.stub().rejects(new Error('is not connected to network')); + const connect = sinon.stub().resolves(); + const app = { + project: 'docscore', + log: {debug: sinon.stub()}, + }; + const lando = { + config: {networkBridge: 'lando_bridge_network'}, + engine: { + getNetwork: () => ({disconnect, connect}), + list: sinon.stub().returns(Promise.resolve([{id: 'cid-1', service: 'cli', app: 'docscore', name: 'docscore-cli-1'}])), + docker: {dockerode: {listContainers: sinon.stub().resolves([])}}, + }, + }; + + await hook(app, lando); + + expect(disconnect.calledOnce).to.equal(true); + expect(disconnect.firstCall.args[0]).to.deep.equal({Container: 'cid-1', Force: true}); + expect(connect.calledOnce).to.equal(true); + expect(connect.firstCall.args[0]).to.deep.equal({ + Container: 'cid-1', + EndpointConfig: {Aliases: ['cli.docscore.internal']}, + }); + }); + + it('should update container hosts files for containerd backends', async () => { + // Build a mock exec stream that emits 'end' after listeners are attached. + // The hook awaits exec.start(), stores the stream, then wraps it in a + // new Promise and attaches on('end'). We need to delay the 'end' event + // until after all of that happens. + const mockStream = new EventEmitter(); + + const mockExec = { + start: sinon.stub().callsFake(() => { + // Use setTimeout(0) to fire after the microtask queue drains + // (the hook's Promise constructor runs synchronously after await) + setTimeout(() => mockStream.emit('end'), 5); + return Promise.resolve(mockStream); + }), + inspect: sinon.stub().resolves({ExitCode: 0}), + }; + + const mockContainer = { + exec: sinon.stub().resolves(mockExec), + }; + + const mockDockerode = { + getContainer: sinon.stub().returns(mockContainer), + }; + + const app = { + project: 'docscore', + services: ['cli'], + containers: {cli: 'docscore_cli_1'}, + log: {debug: sinon.stub()}, + }; + const lando = { + Promise, + config: { + networkBridge: 'lando_bridge_network', + proxy: 'ON', + proxyContainer: 'landoproxyhyperion5000gandalfedition_proxy_1', + proxyNet: 'landoproxyhyperion5000gandalfedition_edge', + userConfRoot: '/tmp/.lando-test', + }, + engine: { + engineBackend: 'containerd', + docker: {dockerode: mockDockerode}, + exists: sinon.stub().resolves(true), + scan: sinon.stub() + .onFirstCall().resolves({ + Name: '/docscore-cli-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['lando_bridge_network', 'docscore_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.0.0.5'}}}, + }) + .onSecondCall().resolves({Name: '/landoproxyhyperion5000gandalfedition-proxy-1'}), + }, + }; + + await hook(app, lando); + + // updateHosts should be called for each unique target + // Targets: docscore-cli-1, landoproxyhyperion5000gandalfedition-proxy-1 + expect(mockDockerode.getContainer.calledTwice).to.equal(true); + expect(mockDockerode.getContainer.firstCall.args[0]).to.equal('docscore-cli-1'); + expect(mockDockerode.getContainer.secondCall.args[0]).to.equal('landoproxyhyperion5000gandalfedition-proxy-1'); + + // Each container should have exec called with root user and a hosts-update script + expect(mockContainer.exec.calledTwice).to.equal(true); + const execOpts = mockContainer.exec.firstCall.args[0]; + expect(execOpts.User).to.equal('root'); + expect(execOpts.Cmd[0]).to.equal('sh'); + expect(execOpts.Cmd[2]).to.include('10.0.0.5 cli.docscore.internal'); + expect(execOpts.Cmd[2]).to.include('lando-internal-aliases'); + }); + + describe('multi-container containerd orchestration', () => { + /** + * Helper to create a mock exec chain for a single container target. + * + * Returns independent stream/exec/container mocks so that multiple + * containers can be stubbed without interference. + * + * @return {{stream: EventEmitter, exec: Object, container: Object}} + */ + const createMockExecChain = () => { + const stream = new EventEmitter(); + const exec = { + start: sinon.stub().callsFake(() => { + setTimeout(() => stream.emit('end'), 5); + return Promise.resolve(stream); + }), + inspect: sinon.stub().resolves({ExitCode: 0}), + }; + const container = { + exec: sinon.stub().resolves(exec), + }; + return {stream, exec, container}; + }; + + it('should inject ALL aliases into ALL containers for multi-service apps', async () => { + const webMock = createMockExecChain(); + const dbMock = createMockExecChain(); + + // Configure withArgs on separate lines to avoid sinon chaining pitfall: + // chained .withArgs().returns().withArgs() operates on the behavior object, + // not the original stub, which can cause the first arg's return value + // to be overwritten by the second. + const getContainerStub = sinon.stub(); + getContainerStub.withArgs('myapp-web-1').returns(webMock.container); + getContainerStub.withArgs('myapp-db-1').returns(dbMock.container); + const mockDockerode = {getContainer: getContainerStub}; + + const app = { + project: 'myapp', + services: ['web', 'db'], + containers: {}, + log: {debug: sinon.stub()}, + }; + const lando = { + Promise, + config: { + networkBridge: 'lando_bridge_network', + proxy: 'OFF', + proxyNet: 'landoproxy_edge', + }, + engine: { + engineBackend: 'containerd', + docker: {dockerode: mockDockerode}, + scan: sinon.stub() + .onFirstCall().resolves({ + Name: '/myapp-web-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['myapp_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.4.0.2'}}}, + }) + .onSecondCall().resolves({ + Name: '/myapp-db-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['myapp_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.4.0.3'}}}, + }), + }, + }; + + await hook(app, lando); + + // Both containers should be targeted + expect(mockDockerode.getContainer.calledTwice).to.equal(true); + expect(mockDockerode.getContainer.firstCall.args[0]).to.equal('myapp-web-1'); + expect(mockDockerode.getContainer.secondCall.args[0]).to.equal('myapp-db-1'); + + // Both containers should get exec'd with ALL aliases (web + db) + expect(webMock.container.exec.calledOnce).to.equal(true); + expect(dbMock.container.exec.calledOnce).to.equal(true); + + // Verify the web container's hosts script contains BOTH aliases + const webScript = webMock.container.exec.firstCall.args[0].Cmd[2]; + expect(webScript).to.include('10.4.0.2 web.myapp.internal'); + expect(webScript).to.include('10.4.0.3 db.myapp.internal'); + + // Verify the db container's hosts script also contains BOTH aliases + const dbScript = dbMock.container.exec.firstCall.args[0].Cmd[2]; + expect(dbScript).to.include('10.4.0.2 web.myapp.internal'); + expect(dbScript).to.include('10.4.0.3 db.myapp.internal'); + }); + + it('should handle three or more services with unique IPs and aliases', async () => { + const webMock = createMockExecChain(); + const dbMock = createMockExecChain(); + const cacheMock = createMockExecChain(); + + const getContainerStub = sinon.stub(); + getContainerStub.withArgs('proj-web-1').returns(webMock.container); + getContainerStub.withArgs('proj-db-1').returns(dbMock.container); + getContainerStub.withArgs('proj-cache-1').returns(cacheMock.container); + const mockDockerode = {getContainer: getContainerStub}; + + const app = { + project: 'proj', + services: ['web', 'db', 'cache'], + containers: {}, + log: {debug: sinon.stub()}, + }; + const lando = { + Promise, + config: { + networkBridge: 'lando_bridge_network', + proxy: 'OFF', + proxyNet: 'landoproxy_edge', + }, + engine: { + engineBackend: 'containerd', + docker: {dockerode: mockDockerode}, + scan: sinon.stub() + .onCall(0).resolves({ + Name: '/proj-web-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['proj_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.4.0.2'}}}, + }) + .onCall(1).resolves({ + Name: '/proj-db-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['proj_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.4.0.3'}}}, + }) + .onCall(2).resolves({ + Name: '/proj-cache-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['proj_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.4.0.4'}}}, + }), + }, + }; + + await hook(app, lando); + + // All 3 containers should be targeted + expect(mockDockerode.getContainer.calledThrice).to.equal(true); + + // Each container should receive ALL 3 aliases + for (const mock of [webMock, dbMock, cacheMock]) { + const script = mock.container.exec.firstCall.args[0].Cmd[2]; + expect(script).to.include('10.4.0.2 web.proj.internal'); + expect(script).to.include('10.4.0.3 db.proj.internal'); + expect(script).to.include('10.4.0.4 cache.proj.internal'); + } + }); + + it('should continue with remaining services when one scan fails', async () => { + const dbMock = createMockExecChain(); + + const mockDockerode = { + getContainer: sinon.stub() + .withArgs('myapp-db-1').returns(dbMock.container), + }; + + const app = { + project: 'myapp', + services: ['web', 'db'], + containers: {}, + log: {debug: sinon.stub()}, + }; + const lando = { + Promise, + config: { + networkBridge: 'lando_bridge_network', + proxy: 'OFF', + proxyNet: 'landoproxy_edge', + }, + engine: { + engineBackend: 'containerd', + docker: {dockerode: mockDockerode}, + scan: sinon.stub() + // web scan fails + .onFirstCall().rejects(new Error('container not found')) + // db scan succeeds + .onSecondCall().resolves({ + Name: '/myapp-db-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['myapp_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.4.0.3'}}}, + }), + }, + }; + + await hook(app, lando); + + // Only db container should be targeted (web scan failed) + expect(mockDockerode.getContainer.calledOnce).to.equal(true); + expect(mockDockerode.getContainer.firstCall.args[0]).to.equal('myapp-db-1'); + + // db should still get its alias + const dbScript = dbMock.container.exec.firstCall.args[0].Cmd[2]; + expect(dbScript).to.include('10.4.0.3 db.myapp.internal'); + // web alias should NOT be present since scan failed + expect(dbScript).to.not.include('web.myapp.internal'); + }); + + it('should add container to targets but skip alias when IP is not found', async () => { + const webMock = createMockExecChain(); + const dbMock = createMockExecChain(); + + const getContainerStub = sinon.stub(); + getContainerStub.withArgs('myapp-web-1').returns(webMock.container); + getContainerStub.withArgs('myapp-db-1').returns(dbMock.container); + const mockDockerode = {getContainer: getContainerStub}; + + const app = { + project: 'myapp', + services: ['web', 'db'], + containers: {}, + log: {debug: sinon.stub()}, + }; + const lando = { + Promise, + config: { + networkBridge: 'lando_bridge_network', + proxy: 'OFF', + proxyNet: 'landoproxy_edge', + }, + engine: { + engineBackend: 'containerd', + docker: {dockerode: mockDockerode}, + scan: sinon.stub() + .onFirstCall().resolves({ + Name: '/myapp-web-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['myapp_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.4.0.2'}}}, + }) + // db has no IP on any preferred network + .onSecondCall().resolves({ + Name: '/myapp-db-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['some_other_network'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '172.20.0.5'}}}, + }), + }, + }; + + await hook(app, lando); + + // Both containers should be targeted (db is scanned successfully) + expect(mockDockerode.getContainer.calledTwice).to.equal(true); + + // Both should get hosts updated, but only web's alias is in the script + const webScript = webMock.container.exec.firstCall.args[0].Cmd[2]; + expect(webScript).to.include('10.4.0.2 web.myapp.internal'); + expect(webScript).to.not.include('db.myapp.internal'); + + const dbScript = dbMock.container.exec.firstCall.args[0].Cmd[2]; + expect(dbScript).to.include('10.4.0.2 web.myapp.internal'); + expect(dbScript).to.not.include('db.myapp.internal'); + }); + + it('should return early when no services have resolvable IPs', async () => { + const mockDockerode = { + getContainer: sinon.stub(), + }; + + const app = { + project: 'myapp', + services: ['web', 'db'], + containers: {}, + log: {debug: sinon.stub()}, + }; + const lando = { + Promise, + config: { + networkBridge: 'lando_bridge_network', + proxy: 'OFF', + proxyNet: 'landoproxy_edge', + }, + engine: { + engineBackend: 'containerd', + docker: {dockerode: mockDockerode}, + scan: sinon.stub() + // Both services have no IP on preferred networks + .onFirstCall().resolves({ + Name: '/myapp-web-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['alien_net'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '192.168.1.5'}}}, + }) + .onSecondCall().resolves({ + Name: '/myapp-db-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['alien_net'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '192.168.1.6'}}}, + }), + }, + }; + + await hook(app, lando); + + // updateHosts should NOT be called since no aliases were collected + expect(mockDockerode.getContainer.called).to.equal(false); + }); + + it('should return early when app has no services', async () => { + const mockDockerode = { + getContainer: sinon.stub(), + }; + + const app = { + project: 'myapp', + services: [], + containers: {}, + log: {debug: sinon.stub()}, + }; + const lando = { + Promise, + config: { + networkBridge: 'lando_bridge_network', + proxy: 'OFF', + proxyNet: 'landoproxy_edge', + }, + engine: { + engineBackend: 'containerd', + docker: {dockerode: mockDockerode}, + scan: sinon.stub(), + }, + }; + + await hook(app, lando); + + expect(lando.engine.scan.called).to.equal(false); + expect(mockDockerode.getContainer.called).to.equal(false); + }); + + it('should use container name from app.containers map when available', async () => { + const webMock = createMockExecChain(); + const dbMock = createMockExecChain(); + + const getContainerStub = sinon.stub(); + getContainerStub.withArgs('myapp_web_1').returns(webMock.container); + getContainerStub.withArgs('custom-db-name').returns(dbMock.container); + const mockDockerode = {getContainer: getContainerStub}; + + const app = { + project: 'myapp', + services: ['web', 'db'], + // Explicit container name mapping (e.g. from Docker Compose v1 naming) + containers: {web: 'myapp_web_1', db: 'custom-db-name'}, + log: {debug: sinon.stub()}, + }; + const lando = { + Promise, + config: { + networkBridge: 'lando_bridge_network', + proxy: 'OFF', + proxyNet: 'landoproxy_edge', + }, + engine: { + engineBackend: 'containerd', + docker: {dockerode: mockDockerode}, + scan: sinon.stub() + .onFirstCall().resolves({ + Name: '/myapp_web_1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['myapp_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.4.0.2'}}}, + }) + .onSecondCall().resolves({ + Name: '/custom-db-name', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['myapp_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.4.0.3'}}}, + }), + }, + }; + + await hook(app, lando); + + // scan should be called with the container names from the map + expect(lando.engine.scan.firstCall.args[0]).to.deep.equal({id: 'myapp_web_1'}); + expect(lando.engine.scan.secondCall.args[0]).to.deep.equal({id: 'custom-db-name'}); + + // Both containers should get ALL aliases + const webScript = webMock.container.exec.firstCall.args[0].Cmd[2]; + expect(webScript).to.include('10.4.0.2 web.myapp.internal'); + expect(webScript).to.include('10.4.0.3 db.myapp.internal'); + }); + + it('should resolve IP from project_default when bridge network is not configured', async () => { + // In containerd, containers are NOT connected to lando_bridge_network + // via Docker API — the IP comes from ${project}_default instead. + const webMock = createMockExecChain(); + + const mockDockerode = { + getContainer: sinon.stub().returns(webMock.container), + }; + + const app = { + project: 'myapp', + services: ['web'], + containers: {}, + log: {debug: sinon.stub()}, + }; + const lando = { + Promise, + config: { + networkBridge: 'lando_bridge_network', + proxy: 'OFF', + proxyNet: 'landoproxy_edge', + }, + engine: { + engineBackend: 'containerd', + docker: {dockerode: mockDockerode}, + scan: sinon.stub().resolves({ + Name: '/myapp-web-1', + // Only on project_default, NOT on lando_bridge_network + Config: {Labels: {'nerdctl/networks': JSON.stringify(['myapp_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.4.0.2'}}}, + }), + }, + }; + + await hook(app, lando); + + // Should still find IP via myapp_default (second preference) + const script = webMock.container.exec.firstCall.args[0].Cmd[2]; + expect(script).to.include('10.4.0.2 web.myapp.internal'); + }); + + it('should handle multi-network containers by picking the correct ethN index', async () => { + // When a container is on multiple networks, the nerdctl/networks label + // lists them in order, and ethN interfaces correspond to that order. + const webMock = createMockExecChain(); + + const mockDockerode = { + getContainer: sinon.stub().returns(webMock.container), + }; + + const app = { + project: 'myapp', + services: ['web'], + containers: {}, + log: {debug: sinon.stub()}, + }; + const lando = { + Promise, + config: { + networkBridge: 'lando_bridge_network', + proxy: 'OFF', + proxyNet: 'landoproxy_edge', + }, + engine: { + engineBackend: 'containerd', + docker: {dockerode: mockDockerode}, + scan: sinon.stub().resolves({ + Name: '/myapp-web-1', + // project_default is at index 1, not 0 + Config: {Labels: {'nerdctl/networks': JSON.stringify(['some_custom_net', 'myapp_default'])}}, + NetworkSettings: { + Networks: { + 'unknown-eth0': {IPAddress: '172.20.0.5'}, + 'unknown-eth1': {IPAddress: '10.4.0.2'}, + }, + }, + }), + }, + }; + + await hook(app, lando); + + // Should pick the IP from unknown-eth1 (index 1 = myapp_default) + const script = webMock.container.exec.firstCall.args[0].Cmd[2]; + expect(script).to.include('10.4.0.2 web.myapp.internal'); + // Should NOT use the IP from the custom network + expect(script).to.not.include('172.20.0.5'); + }); + + it('should include proxy container as target but not as alias source', async () => { + const webMock = createMockExecChain(); + const dbMock = createMockExecChain(); + const proxyMock = createMockExecChain(); + + const getContainerStub = sinon.stub(); + getContainerStub.withArgs('myapp-web-1').returns(webMock.container); + getContainerStub.withArgs('myapp-db-1').returns(dbMock.container); + getContainerStub.withArgs('landoproxy-proxy-1').returns(proxyMock.container); + const mockDockerode = {getContainer: getContainerStub}; + + const app = { + project: 'myapp', + services: ['web', 'db'], + containers: {}, + log: {debug: sinon.stub()}, + }; + const lando = { + Promise, + config: { + networkBridge: 'lando_bridge_network', + proxy: 'ON', + proxyContainer: 'landoproxy_proxy_1', + proxyNet: 'landoproxy_edge', + }, + engine: { + engineBackend: 'containerd', + docker: {dockerode: mockDockerode}, + exists: sinon.stub().resolves(true), + scan: sinon.stub() + .onCall(0).resolves({ + Name: '/myapp-web-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['myapp_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.4.0.2'}}}, + }) + .onCall(1).resolves({ + Name: '/myapp-db-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['myapp_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.4.0.3'}}}, + }) + // Proxy container scan (third call) + .onCall(2).resolves({Name: '/landoproxy-proxy-1'}), + }, + }; + + await hook(app, lando); + + // All 3 containers (web, db, proxy) should get hosts updates + expect(mockDockerode.getContainer.calledThrice).to.equal(true); + expect(mockDockerode.getContainer.getCall(0).args[0]).to.equal('myapp-web-1'); + expect(mockDockerode.getContainer.getCall(1).args[0]).to.equal('myapp-db-1'); + expect(mockDockerode.getContainer.getCall(2).args[0]).to.equal('landoproxy-proxy-1'); + + // Proxy container should get the app aliases but should NOT contribute its own alias + const proxyScript = proxyMock.container.exec.firstCall.args[0].Cmd[2]; + expect(proxyScript).to.include('10.4.0.2 web.myapp.internal'); + expect(proxyScript).to.include('10.4.0.3 db.myapp.internal'); + }); + + it('should sanitize IPs and aliases to prevent injection in hosts entries', async () => { + const webMock = createMockExecChain(); + + const mockDockerode = { + getContainer: sinon.stub().returns(webMock.container), + }; + + const app = { + project: 'myapp', + services: ['web'], + containers: {}, + log: {debug: sinon.stub()}, + }; + const lando = { + Promise, + config: { + networkBridge: 'lando_bridge_network', + proxy: 'OFF', + proxyNet: 'landoproxy_edge', + }, + engine: { + engineBackend: 'containerd', + docker: {dockerode: mockDockerode}, + scan: sinon.stub().resolves({ + Name: '/myapp-web-1', + Config: {Labels: {'nerdctl/networks': JSON.stringify(['myapp_default'])}}, + NetworkSettings: {Networks: {'unknown-eth0': {IPAddress: '10.4.0.2'}}}, + }), + }, + }; + + await hook(app, lando); + + // The hosts echo line should contain properly sanitized IP and alias. + // The script skeleton uses $(mktemp) and "$tmp" which are expected shell + // constructs. We only verify that the user-data portion (the echo line + // with IP + alias) contains no shell metacharacters. + const script = webMock.container.exec.firstCall.args[0].Cmd[2]; + // Extract the echo lines from the script (the user-data portion) + const echoMatch = script.match(/echo '([^']+)'/g); + expect(echoMatch).to.be.an('array').that.is.not.empty; + for (const line of echoMatch) { + // Each echo line should only contain safe characters: digits, dots, + // colons, alphanumerics, hyphens, underscores, spaces, and the hash + expect(line).to.match(/^echo '[0-9.:]+\s+[a-zA-Z0-9.\-_]+\s+#\s+lando-internal-aliases'$/); + } + }); + }); +}); diff --git a/test/app-add-proxy-2-landonet.spec.js b/test/app-add-proxy-2-landonet.spec.js new file mode 100644 index 000000000..c51ef6077 --- /dev/null +++ b/test/app-add-proxy-2-landonet.spec.js @@ -0,0 +1,39 @@ +'use strict'; + +const chai = require('chai'); +const expect = chai.expect; +const sinon = require('sinon'); + +const hook = require('./../hooks/app-add-proxy-2-landonet'); + +describe('app-add-proxy-2-landonet', () => { + it('should use the scanned container id when reconnecting the proxy', async () => { + const disconnect = sinon.stub().rejects(new Error('is not connected to network')); + const connect = sinon.stub().resolves(); + const app = { + config: {proxy: [{hostname: 'docs.core.lndo.site'}]}, + log: {debug: sinon.stub()}, + }; + const lando = { + config: {proxy: 'ON', networkBridge: 'lando_bridge_network', proxyContainer: 'proxy_app_1'}, + engine: { + getNetwork: () => ({disconnect, connect}), + exists: sinon.stub().resolves(true), + scan: sinon.stub().resolves({ + Id: 'abc123', + NetworkSettings: {Networks: {lando_bridge_network: {Aliases: ['old.alias']}}}, + }), + }, + Promise: Promise, + }; + + await hook(app, lando); + + expect(disconnect.calledOnce).to.equal(true); + expect(disconnect.firstCall.args[0]).to.deep.equal({Container: 'abc123', Force: true}); + expect(connect.calledOnce).to.equal(true); + expect(connect.firstCall.args[0].Container).to.equal('abc123'); + expect(connect.firstCall.args[0].EndpointConfig.Aliases).to.include('docs.core.lndo.site'); + expect(connect.firstCall.args[0].EndpointConfig.Aliases).to.include('old.alias'); + }); +}); diff --git a/test/backend-manager.spec.js b/test/backend-manager.spec.js new file mode 100644 index 000000000..de786f2e9 --- /dev/null +++ b/test/backend-manager.spec.js @@ -0,0 +1,223 @@ +/* + * Tests for backend-manager. + * @file backend-manager.spec.js + */ + +'use strict'; + +// Setup chai. +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const sinon = require('sinon'); +const fs = require('fs'); +const path = require('path'); + +const BackendManager = require('./../lib/backend-manager'); + +// Minimal stubs that satisfy the BackendManager constructor +const stubConfig = (overrides = {}) => ({ + engine: 'docker', + orchestratorBin: '/usr/bin/docker-compose', + orchestratorVersion: '2.0.0', + containerdSystemBinDir: '/tmp/.lando-test/bin', + dockerBin: '/usr/bin/docker', + engineConfig: {}, + process: 'node', + userConfRoot: '/tmp/.lando-test', + ...overrides, +}); + +const stubDeps = () => ({ + cache: {}, + events: {on: sinon.stub(), emit: sinon.stub()}, + log: {debug: sinon.stub(), verbose: sinon.stub(), info: sinon.stub(), warn: sinon.stub(), error: sinon.stub(), silly: sinon.stub()}, + shell: {sh: sinon.stub().resolves('')}, +}); + +describe('backend-manager', () => { + describe('#BackendManager', () => { + it('should be a constructor', () => { + expect(BackendManager).to.be.a('function'); + }); + + it('should store config and dependencies on the instance', () => { + const config = stubConfig(); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + manager.config.should.equal(config); + manager.cache.should.equal(cache); + manager.events.should.equal(events); + manager.log.should.equal(log); + manager.shell.should.equal(shell); + }); + }); + + describe('#createEngine', () => { + it('should return an Engine when engine="docker"', () => { + const config = stubConfig({engine: 'docker'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + const engine = manager.createEngine('test-id'); + expect(engine).to.be.an('object'); + // Engine has these key properties + expect(engine).to.have.property('docker'); + expect(engine).to.have.property('daemon'); + expect(engine).to.have.property('compose'); + }); + + it('should return an Engine when engine="containerd"', () => { + const config = stubConfig({engine: 'containerd'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + const engine = manager.createEngine('test-id'); + expect(engine).to.be.an('object'); + expect(engine).to.have.property('docker'); + expect(engine).to.have.property('daemon'); + expect(engine).to.have.property('compose'); + }); + + it('should wire containerd compose through docker-compose (not nerdctl)', () => { + const config = stubConfig({engine: 'containerd'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + const engine = manager.createEngine('test-id'); + + // Per BRIEF: compose operations use docker-compose with DOCKER_HOST, NOT nerdctl + expect(engine.daemon.compose).to.equal('/usr/bin/docker-compose'); + expect(engine.composeInstalled).to.equal(fs.existsSync('/usr/bin/docker-compose')); + }); + + it('should default to "auto" when engine is not specified', () => { + const config = stubConfig({engine: undefined}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + // auto should work without throwing + const engine = manager.createEngine('test-id'); + expect(engine).to.be.an('object'); + expect(engine).to.have.property('docker'); + expect(engine).to.have.property('daemon'); + }); + + it('should use "auto" for any unrecognized engine value', () => { + const config = stubConfig({engine: 'unknown-value'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + // The switch default falls through to auto + const engine = manager.createEngine('test-id'); + expect(engine).to.be.an('object'); + }); + }); + + describe('#_createAutoEngine', () => { + let existsSyncStub; + + afterEach(() => { + if (existsSyncStub) existsSyncStub.restore(); + }); + + it('should select containerd when all three binaries exist', () => { + existsSyncStub = sinon.stub(fs, 'existsSync'); + // Make all three binary paths return true + existsSyncStub.returns(false); // default + existsSyncStub.withArgs(path.join('/tmp/.lando-test', 'bin', 'containerd')).returns(true); + existsSyncStub.withArgs(path.join('/tmp/.lando-test', 'bin', 'nerdctl')).returns(true); + existsSyncStub.withArgs(path.join('/tmp/.lando-test', 'bin', 'buildkitd')).returns(true); + + const config = stubConfig({engine: 'auto'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + // Spy on the private methods to verify which was called + const containerdSpy = sinon.spy(manager, '_createContainerdEngine'); + const dockerSpy = sinon.spy(manager, '_createDockerEngine'); + + manager._createAutoEngine('test-id'); + + containerdSpy.calledOnce.should.be.true; + dockerSpy.called.should.be.false; + + containerdSpy.restore(); + dockerSpy.restore(); + }); + + it('should fall back to docker when no containerd binaries exist', () => { + existsSyncStub = sinon.stub(fs, 'existsSync'); + existsSyncStub.returns(false); + + const config = stubConfig({engine: 'auto'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + const containerdSpy = sinon.spy(manager, '_createContainerdEngine'); + const dockerSpy = sinon.spy(manager, '_createDockerEngine'); + + manager._createAutoEngine('test-id'); + + dockerSpy.calledOnce.should.be.true; + containerdSpy.called.should.be.false; + + containerdSpy.restore(); + dockerSpy.restore(); + }); + + it('should fall back to docker when only some containerd binaries exist', () => { + existsSyncStub = sinon.stub(fs, 'existsSync'); + existsSyncStub.returns(false); + // Only containerd exists, nerdctl and buildkitd do not + existsSyncStub.withArgs(path.join('/tmp/.lando-test', 'bin', 'containerd')).returns(true); + + const config = stubConfig({engine: 'auto'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + const containerdSpy = sinon.spy(manager, '_createContainerdEngine'); + const dockerSpy = sinon.spy(manager, '_createDockerEngine'); + + manager._createAutoEngine('test-id'); + + dockerSpy.calledOnce.should.be.true; + containerdSpy.called.should.be.false; + + containerdSpy.restore(); + dockerSpy.restore(); + }); + + it('should respect config override paths for binary detection', () => { + existsSyncStub = sinon.stub(fs, 'existsSync'); + existsSyncStub.returns(false); + + // Custom binary paths + const customContainerd = '/opt/custom/containerd'; + const customNerdctl = '/opt/custom/nerdctl'; + const customBuildkitd = '/opt/custom/buildkitd'; + + existsSyncStub.withArgs(customContainerd).returns(true); + existsSyncStub.withArgs(customNerdctl).returns(true); + existsSyncStub.withArgs(customBuildkitd).returns(true); + + const config = stubConfig({ + engine: 'auto', + containerdBin: customContainerd, + nerdctlBin: customNerdctl, + buildkitdBin: customBuildkitd, + }); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + const containerdSpy = sinon.spy(manager, '_createContainerdEngine'); + + manager._createAutoEngine('test-id'); + + containerdSpy.calledOnce.should.be.true; + containerdSpy.restore(); + }); + }); +}); diff --git a/test/containerd-compose-start.spec.js b/test/containerd-compose-start.spec.js new file mode 100644 index 000000000..c12b830ba --- /dev/null +++ b/test/containerd-compose-start.spec.js @@ -0,0 +1,1264 @@ +/* + * Integration tests for the containerd compose start path. + * + * Tests the production compose closure created by + * BackendManager._createContainerdEngine() — the critical glue between + * docker-compose, CNI network bridging, and finch-daemon. + * + * This covers: + * - DOCKER_HOST / DOCKER_BUILDKIT / BUILDKIT_HOST env injection + * - ensureComposeCniNetworks() called only on 'start' + * - Correct shell.sh() invocation (binary + command array + options) + * - Full engine.start() → router.eventWrapper → compose('start', datum) flow + * - Bluebird Proxy wrapping on ContainerdContainer methods + * - Multiple compose commands (stop, remove, build, logs, etc.) + * + * All tests are stub-based and always run — no real containerd required. + * + * @file containerd-compose-start.spec.js + */ + +'use strict'; + +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const sinon = require('sinon'); +const fs = require('fs'); +const mockFs = require('mock-fs'); + +const BackendManager = require('./../lib/backend-manager'); +const BluebirdPromise = require('./../lib/promise'); + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/** + * Minimal stub config for BackendManager with containerd engine. + * @param {Object} [overrides] - Config overrides. + * @return {Object} Stub config. + */ +const stubConfig = (overrides = {}) => ({ + engine: 'containerd', + orchestratorBin: '/usr/bin/docker-compose', + orchestratorVersion: '2.31.0', + containerdSystemBinDir: '/usr/local/lib/lando/bin', + containerdBin: '/usr/local/lib/lando/bin/containerd', + nerdctlBin: '/tmp/.lando-test/bin/nerdctl', + buildkitdBin: '/usr/local/lib/lando/bin/buildkitd', + containerdSocket: '/run/lando/containerd.sock', + buildkitSocket: '/run/lando/buildkitd.sock', + dockerBin: '/usr/bin/docker', + engineConfig: {}, + process: 'node', + userConfRoot: '/tmp/.lando-test', + ...overrides, +}); + +/** + * Minimal stub dependencies for BackendManager. + * Returns an object with cache, events, log, shell stubs. + * The shell.sh stub resolves with an empty string by default. + * events.emit returns Bluebird promises — required because + * router.eventWrapper chains .tap() which is Bluebird-only. + * @return {{cache: Object, events: Object, log: Object, shell: Object}} + */ +const stubDeps = () => ({ + cache: {get: sinon.stub().returns(undefined), set: sinon.stub()}, + events: {on: sinon.stub(), emit: sinon.stub().callsFake(() => BluebirdPromise.resolve())}, + log: { + debug: sinon.stub(), + verbose: sinon.stub(), + info: sinon.stub(), + warn: sinon.stub(), + error: sinon.stub(), + silly: sinon.stub(), + }, + shell: {sh: sinon.stub().resolves('')}, +}); + +/** + * Create a BackendManager and extract the engine's compose closure. + * Also returns the shell stub so callers can inspect shell.sh() calls. + * @param {Object} [configOverrides] - Config overrides. + * @return {{engine: Object, compose: Function, shell: Object, deps: Object}} + */ +const createContainerdEngine = (configOverrides = {}) => { + const config = stubConfig(configOverrides); + const deps = stubDeps(); + const manager = new BackendManager(config, deps.cache, deps.events, deps.log, deps.shell); + const engine = manager.createEngine('test-id'); + + return {engine, compose: engine.compose, shell: deps.shell, deps}; +}; + +// ============================================================================ +// 1. Compose closure — environment variable injection +// ============================================================================ +describe('containerd compose start: env injection', () => { + it('should inject DOCKER_HOST pointing at finch-daemon socket', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + // Two-phase start: shell.sh called twice (Phase 1: --no-start, Phase 3: --detach) + sinon.assert.calledTwice(shell.sh); + // Both calls should have DOCKER_HOST + for (const call of shell.sh.getCalls()) { + const [, opts] = call.args; + expect(opts.env).to.have.property('DOCKER_HOST'); + expect(opts.env.DOCKER_HOST).to.match(/^unix:\/\/.*finch\.sock$/); + } + }); + + it('should inject DOCKER_BUILDKIT=1', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + const [, opts] = shell.sh.firstCall.args; + expect(opts.env).to.have.property('DOCKER_BUILDKIT', '1'); + }); + + it('should inject BUILDKIT_HOST pointing at buildkitd socket', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + const [, opts] = shell.sh.firstCall.args; + expect(opts.env).to.have.property('BUILDKIT_HOST'); + expect(opts.env.BUILDKIT_HOST).to.match(/^unix:\/\/.*buildkitd\.sock$/); + }); + + it('should use the configured finch socket path in DOCKER_HOST', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + const [, opts] = shell.sh.firstCall.args; + // The finch socket is derived from the daemon's finchDaemon.getSocketPath() + // Default path is /run/lando/finch.sock + expect(opts.env.DOCKER_HOST).to.include('/run/lando/finch.sock'); + }); + + it('should use the configured buildkit socket path in BUILDKIT_HOST', async () => { + const {compose, shell} = createContainerdEngine({ + buildkitSocket: '/custom/buildkitd.sock', + }); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + const [, opts] = shell.sh.firstCall.args; + expect(opts.env.BUILDKIT_HOST).to.equal('unix:///custom/buildkitd.sock'); + }); + + it('should preserve process.env in the compose environment', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + const [, opts] = shell.sh.firstCall.args; + // process.env.PATH should be carried through + expect(opts.env).to.have.property('PATH'); + }); + + it('should not forward datum.opts.env (compose.js does not pass env through)', async () => { + // compose.js's buildShell() returns {cmd, opts: {mode, cstdio, silent}} + // — no env property. So datum.opts.env is NOT carried through to shell.sh(). + // The only env vars in the shell opts come from process.env and the + // containerd-specific overrides (DOCKER_HOST, DOCKER_BUILDKIT, BUILDKIT_HOST). + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {env: {MY_CUSTOM_VAR: 'custom_value'}}, + }; + + await compose('start', datum); + + const [, opts] = shell.sh.firstCall.args; + // datum.opts.env is NOT forwarded — compose.js doesn't pass it through + expect(opts.env).to.not.have.property('MY_CUSTOM_VAR'); + }); + + it('should always set DOCKER_HOST to finch socket regardless of process.env', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + const [, opts] = shell.sh.firstCall.args; + // The containerd compose closure always sets DOCKER_HOST to finch socket, + // which comes AFTER ...process.env in the spread, so it overrides any + // DOCKER_HOST that might be in process.env + expect(opts.env.DOCKER_HOST).to.match(/^unix:\/\/.*finch\.sock$/); + }); + + it('should inject env vars for non-start commands too', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('stop', datum); + + const [, opts] = shell.sh.firstCall.args; + expect(opts.env).to.have.property('DOCKER_HOST'); + expect(opts.env.DOCKER_HOST).to.match(/^unix:\/\/.*finch\.sock$/); + expect(opts.env).to.have.property('DOCKER_BUILDKIT', '1'); + expect(opts.env).to.have.property('BUILDKIT_HOST'); + }); +}); + +// ============================================================================ +// 2. Compose closure — shell.sh() invocation +// ============================================================================ +describe('containerd compose start: shell execution', () => { + it('should call shell.sh() with the orchestrator binary as first arg', async () => { + const {compose, shell} = createContainerdEngine({ + orchestratorBin: '/custom/docker-compose', + }); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + // Two-phase start: shell.sh called twice + sinon.assert.calledTwice(shell.sh); + // Both calls should use the orchestrator binary + for (const call of shell.sh.getCalls()) { + const [cmdArray] = call.args; + expect(cmdArray[0]).to.equal('/custom/docker-compose'); + } + }); + + it('should include --project-name in the command array', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'myproject', + opts: {}, + }; + + await compose('start', datum); + + const [cmdArray] = shell.sh.firstCall.args; + const projectIdx = cmdArray.indexOf('--project-name'); + expect(projectIdx).to.be.greaterThan(0); + expect(cmdArray[projectIdx + 1]).to.equal('myproject'); + }); + + it('should include --file with the compose file path', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/path/to/my-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + const [cmdArray] = shell.sh.firstCall.args; + const fileIdx = cmdArray.indexOf('--file'); + expect(fileIdx).to.be.greaterThan(0); + expect(cmdArray[fileIdx + 1]).to.equal('/path/to/my-compose.yml'); + }); + + it('should include "up" sub-command for start', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + const [cmdArray] = shell.sh.firstCall.args; + expect(cmdArray).to.include('up'); + }); + + it('should include --detach flag by default for start', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + // Two-phase start: Phase 1 has --no-start (no --detach), Phase 3 has --detach + sinon.assert.calledTwice(shell.sh); + const [phase1Cmd] = shell.sh.firstCall.args; + const [phase3Cmd] = shell.sh.secondCall.args; + expect(phase1Cmd).to.not.include('--detach'); + expect(phase1Cmd).to.include('--no-start'); + expect(phase3Cmd).to.include('--detach'); + }); + + it('should include --remove-orphans flag by default for start', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + const [cmdArray] = shell.sh.firstCall.args; + expect(cmdArray).to.include('--remove-orphans'); + }); + + it('should pass mode: spawn in opts', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + const [, opts] = shell.sh.firstCall.args; + expect(opts.mode).to.equal('spawn'); + }); + + it('should handle multiple compose files', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml', '/tmp/docker-compose.override.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + const [cmdArray] = shell.sh.firstCall.args; + // Should have two --file flags + const fileIndices = cmdArray.reduce((acc, val, idx) => { + if (val === '--file') acc.push(idx); + return acc; + }, []); + expect(fileIndices).to.have.lengthOf(2); + expect(cmdArray[fileIndices[0] + 1]).to.equal('/tmp/docker-compose.yml'); + expect(cmdArray[fileIndices[1] + 1]).to.equal('/tmp/docker-compose.override.yml'); + }); +}); + +// ============================================================================ +// 3. Compose closure — CNI network bridging +// ============================================================================ +describe('containerd compose start: CNI network bridging', () => { + let ensureCniStub; + + afterEach(() => { + if (ensureCniStub) ensureCniStub.restore(); + mockFs.restore(); + }); + + it('should call ensureComposeCniNetworks on "start" command', async () => { + const {compose, shell} = createContainerdEngine(); + + // Create a mock compose file with a network definition. + // ensureCniNetwork writes to /etc/lando/cni/finch/ with names like + // nerdctl-.conflist + mockFs({ + '/tmp/docker-compose.yml': ` +services: + web: + image: nginx:alpine +networks: + frontend: + driver: bridge +`, + '/etc/lando/cni/finch': {}, + }); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + // Verify shell.sh was called twice (two-phase start) + sinon.assert.calledTwice(shell.sh); + + // Verify CNI conflist files were created + // ensureComposeCniNetworks creates configs for testapp_default and testapp_frontend + const cniDir = '/etc/lando/cni/finch'; + const files = fs.readdirSync(cniDir); + expect(files).to.include('nerdctl-testapp_default.conflist'); + expect(files).to.include('nerdctl-testapp_frontend.conflist'); + }); + + it('should NOT call ensureComposeCniNetworks on "stop" command', async () => { + const {compose, shell: shStub} = createContainerdEngine(); + + // Create a mock compose file — CNI should NOT be created for stop + mockFs({ + '/tmp/docker-compose.yml': ` +services: + web: + image: nginx:alpine +networks: + mynet: + driver: bridge +`, + '/etc/lando/cni/finch': {}, + }); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('stop', datum); + + // shell.sh should still be called (compose stop executes) + sinon.assert.calledOnce(shStub.sh); + + // But no CNI files should be created for stop + const cniDir = '/etc/lando/cni/finch'; + const files = fs.readdirSync(cniDir); + expect(files).to.have.lengthOf(0); + }); + + it('should NOT call ensureComposeCniNetworks on "remove" command', async () => { + const {compose, shell: shStub} = createContainerdEngine(); + + mockFs({ + '/tmp/docker-compose.yml': ` +services: + web: + image: nginx:alpine +`, + '/etc/lando/cni/finch': {}, + }); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {purge: true}, + }; + + await compose('remove', datum); + + sinon.assert.calledOnce(shStub.sh); + + const cniDir = '/etc/lando/cni/finch'; + const files = fs.readdirSync(cniDir); + expect(files).to.have.lengthOf(0); + }); + + it('should create CNI configs for _default and custom networks on start', async () => { + const {compose} = createContainerdEngine(); + + mockFs({ + '/tmp/docker-compose.yml': ` +services: + web: + image: nginx:alpine + api: + image: node:18 +networks: + frontend: + driver: bridge + backend: + driver: bridge +`, + '/etc/lando/cni/finch': {}, + }); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'myapp', + opts: {}, + }; + + await compose('start', datum); + + const cniDir = '/etc/lando/cni/finch'; + const files = fs.readdirSync(cniDir); + + // Should have: myapp_default, myapp_frontend, myapp_backend + expect(files).to.include('nerdctl-myapp_default.conflist'); + expect(files).to.include('nerdctl-myapp_frontend.conflist'); + expect(files).to.include('nerdctl-myapp_backend.conflist'); + }); + + it('should skip external networks when creating CNI configs', async () => { + const {compose} = createContainerdEngine(); + + mockFs({ + '/tmp/docker-compose.yml': ` +services: + web: + image: nginx:alpine +networks: + internal: + driver: bridge + external_net: + external: true +`, + '/etc/lando/cni/finch': {}, + }); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('start', datum); + + const cniDir = '/etc/lando/cni/finch'; + const files = fs.readdirSync(cniDir); + + expect(files).to.include('nerdctl-testapp_default.conflist'); + expect(files).to.include('nerdctl-testapp_internal.conflist'); + // External network should NOT have a conflist + expect(files).to.not.include('nerdctl-testapp_external_net.conflist'); + expect(files).to.not.include('nerdctl-external_net.conflist'); + }); +}); + +// ============================================================================ +// 4. Compose closure — all compose commands +// ============================================================================ +describe('containerd compose start: all compose commands', () => { + it('should generate "stop" sub-command for stop', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('stop', datum); + + sinon.assert.calledOnce(shell.sh); + const [cmdArray] = shell.sh.firstCall.args; + expect(cmdArray).to.include('stop'); + }); + + it('should generate "down" sub-command for remove with purge', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {purge: true}, + }; + + await compose('remove', datum); + + sinon.assert.calledOnce(shell.sh); + const [cmdArray] = shell.sh.firstCall.args; + expect(cmdArray).to.include('down'); + }); + + it('should generate "rm" sub-command for remove without purge', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {purge: false}, + }; + + await compose('remove', datum); + + sinon.assert.calledOnce(shell.sh); + const [cmdArray] = shell.sh.firstCall.args; + expect(cmdArray).to.include('rm'); + }); + + it('should generate "logs" sub-command for logs', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('logs', datum); + + sinon.assert.calledOnce(shell.sh); + const [cmdArray] = shell.sh.firstCall.args; + expect(cmdArray).to.include('logs'); + }); + + it('should generate "ps" sub-command for getId', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('getId', datum); + + sinon.assert.calledOnce(shell.sh); + const [cmdArray] = shell.sh.firstCall.args; + expect(cmdArray).to.include('ps'); + }); + + it('should generate "kill" sub-command for kill', async () => { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await compose('kill', datum); + + sinon.assert.calledOnce(shell.sh); + const [cmdArray] = shell.sh.firstCall.args; + expect(cmdArray).to.include('kill'); + }); + + it('should inject DOCKER_HOST for every compose command type', async () => { + const commands = ['start', 'stop', 'remove', 'logs', 'getId', 'kill']; + + for (const cmd of commands) { + const {compose, shell} = createContainerdEngine(); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: cmd === 'remove' ? {purge: true} : {}, + }; + + await compose(cmd, datum); + + if (cmd === 'start') { + // Two-phase start: shell.sh called twice + sinon.assert.calledTwice(shell.sh); + } else { + sinon.assert.calledOnce(shell.sh); + } + // All calls should have DOCKER_HOST + for (const call of shell.sh.getCalls()) { + const [, opts] = call.args; + expect(opts.env.DOCKER_HOST).to.match(/^unix:\/\/.*finch\.sock$/, + `DOCKER_HOST should be set for "${cmd}" command`); + } + } + }); +}); + +// ============================================================================ +// 5. Bluebird Proxy wrapping on ContainerdContainer +// ============================================================================ +describe('containerd compose start: Bluebird Proxy wrapping', () => { + it('should wrap ContainerdContainer methods to return Bluebird promises', () => { + const {engine} = createContainerdEngine(); + const Promise = require('./../lib/promise'); + + // engine.docker is a Proxy wrapping ContainerdContainer + // Calling list() should return a Bluebird promise (has .each, .tap, .map) + const result = engine.docker.list(); + expect(result).to.be.an.instanceOf(Promise); + expect(result.each).to.be.a('function'); + expect(result.tap).to.be.a('function'); + expect(result.map).to.be.a('function'); + }); + + it('should preserve non-function properties on the proxy', () => { + const {engine} = createContainerdEngine(); + + // ContainerdContainer has an 'id' property set in constructor + expect(engine.docker.id).to.equal('test-id'); + }); +}); + +// ============================================================================ +// 6. Full engine.start() → router.eventWrapper → compose flow +// ============================================================================ +describe('containerd compose start: full engine.start() flow', () => { + it('should call daemon.up() before compose start', async () => { + const {engine, deps} = createContainerdEngine(); + + // Stub daemon.up() to track call order + const callOrder = []; + sinon.stub(engine.daemon, 'up').callsFake(() => { + callOrder.push('daemon.up'); + return BluebirdPromise.resolve(); + }); + + // The shell.sh stub records when compose is called + deps.shell.sh.callsFake(() => { + callOrder.push('compose'); + return Promise.resolve(''); + }); + + const data = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await engine.start(data); + + expect(callOrder).to.include('daemon.up'); + expect(callOrder).to.include('compose'); + expect(callOrder.indexOf('daemon.up')).to.be.lessThan(callOrder.indexOf('compose')); + + engine.daemon.up.restore(); + }); + + it('should emit pre-engine-start and post-engine-start events', async () => { + const {engine, deps} = createContainerdEngine(); + + // Stub daemon.up() so it doesn't actually try to start containerd + sinon.stub(engine.daemon, 'up').callsFake(() => BluebirdPromise.resolve()); + + const data = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await engine.start(data); + + // events.emit is called for various lifecycle events + const emitCalls = deps.events.emit.getCalls().map(c => c.args[0]); + expect(emitCalls).to.include('pre-engine-autostart'); + expect(emitCalls).to.include('engine-autostart'); + expect(emitCalls).to.include('pre-engine-start'); + expect(emitCalls).to.include('post-engine-start'); + + engine.daemon.up.restore(); + }); + + it('should pass data through to compose for a single datum', async () => { + const {engine, deps} = createContainerdEngine(); + + sinon.stub(engine.daemon, 'up').callsFake(() => BluebirdPromise.resolve()); + + const data = { + compose: ['/tmp/docker-compose.yml'], + project: 'myproject', + opts: {services: ['web']}, + }; + + await engine.start(data); + + // Two-phase start: shell.sh called twice per datum + sinon.assert.calledTwice(deps.shell.sh); + + // Check Phase 3 (second call) for the actual start command + const [cmdArray] = deps.shell.sh.secondCall.args; + + // Should include the project name + const projectIdx = cmdArray.indexOf('--project-name'); + expect(cmdArray[projectIdx + 1]).to.equal('myproject'); + + // Should include 'up' for start + expect(cmdArray).to.include('up'); + + engine.daemon.up.restore(); + }); + + it('should handle an array of data objects (multiple compose sets)', async () => { + const {engine, deps} = createContainerdEngine(); + + sinon.stub(engine.daemon, 'up').callsFake(() => BluebirdPromise.resolve()); + + const data = [ + { + compose: ['/tmp/compose-a.yml'], + project: 'project-a', + opts: {}, + }, + { + compose: ['/tmp/compose-b.yml'], + project: 'project-b', + opts: {}, + }, + ]; + + await engine.start(data); + + // Two-phase start: shell.sh called 4 times (2 phases × 2 datums) + expect(deps.shell.sh.callCount).to.equal(4); + + // Calls are: datum-a phase 1, datum-a phase 3, datum-b phase 1, datum-b phase 3 + // Phase 3 calls (second and fourth) have --detach and contain the project names + const [cmdA] = deps.shell.sh.getCall(1).args; // datum-a phase 3 + const [cmdB] = deps.shell.sh.getCall(3).args; // datum-b phase 3 + + const projectIdxA = cmdA.indexOf('--project-name'); + expect(cmdA[projectIdxA + 1]).to.equal('project-a'); + + const projectIdxB = cmdB.indexOf('--project-name'); + expect(cmdB[projectIdxB + 1]).to.equal('project-b'); + + engine.daemon.up.restore(); + }); + + it('should short-circuit when opts.services is an empty array', async () => { + const {engine, deps} = createContainerdEngine(); + + const data = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {services: []}, + }; + + await engine.start(data); + + // Should NOT call shell.sh — engine.start returns early for empty services + sinon.assert.notCalled(deps.shell.sh); + }); + + it('should return a thenable (Bluebird promise) from engine.start()', () => { + const {engine} = createContainerdEngine(); + + sinon.stub(engine.daemon, 'up').callsFake(() => BluebirdPromise.resolve()); + + const data = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + const result = engine.start(data); + // router.js uses Bluebird, so the return is a Bluebird promise + expect(result).to.have.property('then').that.is.a('function'); + expect(result).to.be.an.instanceOf(BluebirdPromise); + + engine.daemon.up.restore(); + }); +}); + +// ============================================================================ +// 7. Compose closure vs Docker compose closure — structural parity +// ============================================================================ +describe('containerd compose start: parity with Docker compose path', () => { + it('should use the same compose.js command builder as Docker engine', async () => { + // Both containerd and docker paths require('./compose') and call compose[cmd]() + // Verify they produce the same command structure (minus env vars) + const dockerConfig = { + engine: 'docker', + orchestratorBin: '/usr/bin/docker-compose', + orchestratorVersion: '2.31.0', + dockerBin: '/usr/bin/docker', + engineConfig: {}, + process: 'node', + userConfRoot: '/tmp/.lando-test', + }; + + const dockerDeps = stubDeps(); + const dockerManager = new BackendManager(dockerConfig, dockerDeps.cache, dockerDeps.events, dockerDeps.log, dockerDeps.shell); + const dockerEngine = dockerManager.createEngine('test-id'); + + const cdDeps = stubDeps(); + const cdConfig = stubConfig({orchestratorBin: '/usr/bin/docker-compose'}); + const cdManager = new BackendManager( + cdConfig, cdDeps.cache, cdDeps.events, cdDeps.log, cdDeps.shell, + ); + const containerdEngine = cdManager.createEngine('test-id'); + + // Mock compose file — ensureComposeCniNetworks reads it on 'start' + mockFs({ + '/tmp/docker-compose.yml': 'services:\n web:\n image: nginx:alpine\n', + '/etc/lando/cni/finch': {}, + }); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + try { + // Call start on both — await to surface any promise rejections + await dockerEngine.compose('start', datum); + await containerdEngine.compose('start', datum); + + // Docker path calls shell.sh once; containerd path calls twice (two-phase start) + sinon.assert.calledOnce(dockerDeps.shell.sh); + sinon.assert.calledTwice(cdDeps.shell.sh); + + const [dockerCmd] = dockerDeps.shell.sh.firstCall.args; + // Phase 3 (second call) is the equivalent of Docker's single start call + const [containerdCmd] = cdDeps.shell.sh.secondCall.args; + + // Both should use the same orchestrator binary + expect(dockerCmd[0]).to.equal(containerdCmd[0]); + + // Both Phase 3 and Docker's start should have the same compose sub-commands + // (project-name, file, up, --detach, --no-recreate, --remove-orphans) + expect(dockerCmd).to.deep.equal(containerdCmd); + } finally { + mockFs.restore(); + } + }); + + it('should differ only in env vars between Docker and containerd compose', async () => { + const dockerConfig = { + engine: 'docker', + orchestratorBin: '/usr/bin/docker-compose', + orchestratorVersion: '2.31.0', + dockerBin: '/usr/bin/docker', + engineConfig: {}, + process: 'node', + userConfRoot: '/tmp/.lando-test', + }; + + const dockerDeps = stubDeps(); + const dockerManager = new BackendManager(dockerConfig, dockerDeps.cache, dockerDeps.events, dockerDeps.log, dockerDeps.shell); + const dockerEngine = dockerManager.createEngine('test-id'); + + const cdDeps = stubDeps(); + const cdConfig = stubConfig({orchestratorBin: '/usr/bin/docker-compose'}); + const cdManager = new BackendManager( + cdConfig, cdDeps.cache, cdDeps.events, cdDeps.log, cdDeps.shell, + ); + const containerdEngine = cdManager.createEngine('test-id'); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'testapp', + opts: {}, + }; + + await dockerEngine.compose('stop', datum); + await containerdEngine.compose('stop', datum); + + const [, dockerOpts] = dockerDeps.shell.sh.firstCall.args; + const [, containerdOpts] = cdDeps.shell.sh.firstCall.args; + + // Docker path should NOT have DOCKER_HOST set to finch socket + expect(dockerOpts.env).to.be.undefined; + + // Containerd path MUST have DOCKER_HOST, DOCKER_BUILDKIT, BUILDKIT_HOST + expect(containerdOpts.env).to.have.property('DOCKER_HOST'); + expect(containerdOpts.env).to.have.property('DOCKER_BUILDKIT'); + expect(containerdOpts.env).to.have.property('BUILDKIT_HOST'); + }); +}); + +// ============================================================================ +// 8. Multi-container orchestration — CNI + compose integration +// ============================================================================ +describe('containerd compose start: multi-container orchestration', () => { + afterEach(() => { + mockFs.restore(); + }); + + it('should create CNI configs for all networks before starting multi-service compose', async () => { + // Simulates a typical Lando multi-service app: web + db + cache + // on a shared network plus a frontend-specific network. + const {compose, shell} = createContainerdEngine(); + + mockFs({ + '/tmp/docker-compose.yml': ` +services: + web: + image: nginx:alpine + networks: + - default + - frontend + db: + image: postgres:16 + networks: + - default + cache: + image: redis:7 + networks: + - default +networks: + frontend: + driver: bridge +`, + '/etc/lando/cni/finch': {}, + }); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'myapp', + opts: {}, + }; + + await compose('start', datum); + + // Verify compose command was executed (two-phase start) + sinon.assert.calledTwice(shell.sh); + // Phase 3 (second call) is the actual start with --detach + const [cmdArray] = shell.sh.secondCall.args; + expect(cmdArray).to.include('up'); + expect(cmdArray).to.include('--detach'); + + // Verify CNI conflist files for both default and custom networks + const files = fs.readdirSync('/etc/lando/cni/finch'); + expect(files).to.include('nerdctl-myapp_default.conflist'); + expect(files).to.include('nerdctl-myapp_frontend.conflist'); + + // Verify conflist content is valid JSON with correct plugin chain + const defaultConflist = JSON.parse( + fs.readFileSync('/etc/lando/cni/finch/nerdctl-myapp_default.conflist', 'utf8'), + ); + expect(defaultConflist.name).to.equal('myapp_default'); + const pluginTypes = defaultConflist.plugins.map(p => p.type); + // portmap is intentionally excluded — see ensure-cni-network.js: + // CNI portmap rejects HostPort:0 (random port); finch-daemon handles port mapping instead. + expect(pluginTypes).to.deep.equal(['bridge', 'firewall', 'tuning']); + }); + + it('should merge networks from multiple compose files for multi-container apps', async () => { + // Simulates Lando's compose layering: base services + globals + proxy overrides + const {compose, shell} = createContainerdEngine(); + + mockFs({ + '/tmp/compose-base.yml': ` +services: + web: + image: nginx:alpine + db: + image: mariadb:10.11 +`, + '/tmp/compose-proxy.yml': ` +services: + web: + networks: + - lando_proxyedge +networks: + lando_proxyedge: + name: landoproxy_edge + external: true + appnet: + driver: bridge +`, + '/etc/lando/cni/finch': {}, + }); + + const datum = { + compose: ['/tmp/compose-base.yml', '/tmp/compose-proxy.yml'], + project: 'myapp', + opts: {}, + }; + + await compose('start', datum); + + // Two-phase start: shell.sh called twice + sinon.assert.calledTwice(shell.sh); + + const files = fs.readdirSync('/etc/lando/cni/finch'); + // Should have _default and appnet (non-external) + expect(files).to.include('nerdctl-myapp_default.conflist'); + expect(files).to.include('nerdctl-myapp_appnet.conflist'); + // Should NOT have external network (proxy edge is managed elsewhere) + expect(files).to.not.include('nerdctl-landoproxy_edge.conflist'); + }); + + it('should allocate unique subnets for each network in multi-container setup', async () => { + const {compose} = createContainerdEngine(); + + mockFs({ + '/tmp/docker-compose.yml': ` +services: + web: + image: nginx:alpine + api: + image: node:20 + db: + image: postgres:16 +networks: + frontend: + driver: bridge + backend: + driver: bridge +`, + '/etc/lando/cni/finch': {}, + }); + + const datum = { + compose: ['/tmp/docker-compose.yml'], + project: 'multiapp', + opts: {}, + }; + + await compose('start', datum); + + // Read all conflist files and verify unique subnets + const files = fs.readdirSync('/etc/lando/cni/finch'); + expect(files).to.have.lengthOf(3); // default + frontend + backend + + /** @type {Set} */ + const subnets = new Set(); + for (const file of files) { + const conflist = JSON.parse( + fs.readFileSync(`/etc/lando/cni/finch/${file}`, 'utf8'), + ); + const bridgePlugin = conflist.plugins.find(p => p.type === 'bridge'); + expect(bridgePlugin).to.exist; + const subnet = bridgePlugin.ipam.ranges[0][0].subnet; + expect(subnet).to.match(/^10\.4\.\d+\.0\/24$/); + expect(subnets.has(subnet)).to.equal(false, `Duplicate subnet: ${subnet}`); + subnets.add(subnet); + } + expect(subnets.size).to.equal(3); + }); + + it('should handle engine.start with multi-service datum and CNI pre-creation', async () => { + // Full engine.start() flow: daemon.up() → CNI pre-creation → compose up + const {engine, deps} = createContainerdEngine(); + + sinon.stub(engine.daemon, 'up').callsFake(() => BluebirdPromise.resolve()); + + mockFs({ + '/tmp/docker-compose.yml': ` +services: + web: + image: nginx:alpine + db: + image: postgres:16 + cache: + image: redis:7 +`, + '/etc/lando/cni/finch': {}, + }); + + const data = { + compose: ['/tmp/docker-compose.yml'], + project: 'fullflow', + opts: {}, + }; + + await engine.start(data); + + // daemon.up() should be called before compose + sinon.assert.calledOnce(engine.daemon.up); + + // Two-phase start: shell.sh called twice + sinon.assert.calledTwice(deps.shell.sh); + + // CNI default network should be created + const files = fs.readdirSync('/etc/lando/cni/finch'); + expect(files).to.include('nerdctl-fullflow_default.conflist'); + + // DOCKER_HOST should point to finch-daemon socket on both calls + for (const call of deps.shell.sh.getCalls()) { + const [, opts] = call.args; + expect(opts.env.DOCKER_HOST).to.match(/finch\.sock$/); + } + + engine.daemon.up.restore(); + }); +}); + +// ============================================================================ +// 9. Engine construction — binary path resolution +// ============================================================================ +describe('containerd compose start: binary path resolution', () => { + it('should use config.orchestratorBin when provided', () => { + const {engine} = createContainerdEngine({ + orchestratorBin: '/opt/custom/docker-compose', + }); + + expect(engine.daemon.compose).to.equal('/opt/custom/docker-compose'); + }); + + it('should fall back to userConfRoot/bin/docker-compose-v when orchestratorBin not set', () => { + const {engine} = createContainerdEngine({ + orchestratorBin: undefined, + orchestratorVersion: '2.31.0', + userConfRoot: '/home/testuser/.lando', + }); + + expect(engine.daemon.compose).to.equal('/home/testuser/.lando/bin/docker-compose-v2.31.0'); + }); + + it('should set daemon.compose to the orchestrator binary path', () => { + const {engine} = createContainerdEngine({ + orchestratorBin: '/usr/bin/docker-compose', + }); + + // Per BRIEF: daemon.compose is set so Engine.composeInstalled resolves correctly + expect(engine.daemon.compose).to.equal('/usr/bin/docker-compose'); + }); + + it('should set engineBackend to "containerd"', () => { + const {engine} = createContainerdEngine(); + expect(engine.engineBackend).to.equal('containerd'); + }); + + it('should set dockerInstalled based on containerd binary existence', () => { + const {engine} = createContainerdEngine({ + containerdBin: '/definitely/does/not/exist/containerd', + }); + + // The containerd binary doesn't exist, so dockerInstalled should be false + expect(engine.dockerInstalled).to.equal(false); + }); +}); diff --git a/test/containerd-container.spec.js b/test/containerd-container.spec.js new file mode 100644 index 000000000..5d7abab55 --- /dev/null +++ b/test/containerd-container.spec.js @@ -0,0 +1,234 @@ +/* + * Tests for containerd-container. + * @file containerd-container.spec.js + */ + +'use strict'; + +// Setup chai. +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const ContainerdContainer = require('./../lib/backends/containerd/containerd-container'); + +// We need to access the private `parseLabels` helper. +// Since it's module-scoped, we test it indirectly through the class's behavior, +// but we can also require the module file and extract it via a test-friendly approach. +// The parseLabels function is used internally by normalizeContainer and exposed +// through the list() pipeline. For direct unit testing, we'll re-extract it. + +// Helper: extract parseLabels by reading the module source and evaluating the function. +// A cleaner approach: since parseLabels is used by the module, we test it through +// the container's behavior. But we can also just copy the logic for direct testing. +// Instead, let's test through the public API where possible. + +// For parseLabels testing, we'll require the file and test normalizeContainer behavior +// through the getContainer/getNetwork proxy methods and direct label parsing. + +// Direct access: since parseLabels is a module-level const, we can test it via +// the class methods that use it. For truly direct testing, let's use a small trick: +const Module = require('module'); +const path = require('path'); + +/** + * Extract the parseLabels function from the containerd-container module. + * + * This reads the module source and evaluates just the parseLabels function + * in an isolated context. This is a common pattern for testing private helpers. + */ +function getParseLabels() { + const fs = require('fs'); + const src = fs.readFileSync( + path.join(__dirname, '..', 'lib', 'backends', 'containerd', 'containerd-container.js'), + 'utf8', + ); + + // Extract the parseLabels function body from source + const match = src.match(/const parseLabels = ([\s\S]*?)^};/m); + if (!match) throw new Error('Could not extract parseLabels from source'); + + // eslint-disable-next-line no-eval + const parseLabels = eval('(' + match[1] + '})'); + return parseLabels; +} + +let parseLabels; +try { + parseLabels = getParseLabels(); +} catch (err) { + // Fallback: if we can't extract it, we'll skip those tests + parseLabels = null; +} + +describe('containerd-container', () => { + describe('#ContainerdContainer', () => { + it('should be a constructor', () => { + expect(ContainerdContainer).to.be.a('function'); + }); + + it('should create an instance with default options', () => { + const cc = new ContainerdContainer({ + debug: () => {}, + }); + + expect(cc).to.have.property('finchSocket'); + expect(cc).to.have.property('dockerode'); + expect(cc).to.have.property('id'); + cc.id.should.equal('lando'); + }); + + it('should accept custom options', () => { + const cc = new ContainerdContainer({ + finchSocket: '/custom/socket.sock', + id: 'custom-id', + debug: () => {}, + }); + + cc.finchSocket.should.equal('/custom/socket.sock'); + cc.id.should.equal('custom-id'); + }); + }); + + describe('#parseLabels', () => { + // Skip if we couldn't extract the function + before(function() { + if (!parseLabels) this.skip(); + }); + + it('should return an empty object for null/undefined input', () => { + expect(parseLabels(null)).to.deep.equal({}); + expect(parseLabels(undefined)).to.deep.equal({}); + }); + + it('should return an empty object for empty string', () => { + expect(parseLabels('')).to.deep.equal({}); + }); + + it('should return the same object if input is already an object', () => { + const labels = {'io.lando.container': 'TRUE', 'com.docker.compose.project': 'myapp'}; + expect(parseLabels(labels)).to.equal(labels); + }); + + it('should return an empty object for non-string/non-object input', () => { + expect(parseLabels(42)).to.deep.equal({}); + expect(parseLabels(true)).to.deep.equal({}); + }); + + it('should parse simple key=value pairs separated by commas', () => { + const input = 'io.lando.container=TRUE,com.docker.compose.project=myapp'; + const result = parseLabels(input); + + result.should.deep.equal({ + 'io.lando.container': 'TRUE', + 'com.docker.compose.project': 'myapp', + }); + }); + + it('should handle values containing "=" (split on first = only)', () => { + const input = 'key1=val=ue,key2=normal'; + const result = parseLabels(input); + + result['key1'].should.equal('val=ue'); + result['key2'].should.equal('normal'); + }); + + it('should handle commas inside label values (the comma-in-value bug fix)', () => { + // This is the key test: io.lando.landofiles value contains commas + const input = 'io.lando.container=TRUE,io.lando.landofiles=.lando.yml,.lando.local.yml,com.docker.compose.project=myapp'; + const result = parseLabels(input); + + result['io.lando.container'].should.equal('TRUE'); + // The comma-separated filenames should be preserved as a single value + result['io.lando.landofiles'].should.equal('.lando.yml,.lando.local.yml'); + result['com.docker.compose.project'].should.equal('myapp'); + }); + + it('should handle a single key=value pair with no commas', () => { + const input = 'io.lando.container=TRUE'; + const result = parseLabels(input); + + result.should.deep.equal({'io.lando.container': 'TRUE'}); + }); + + it('should trim whitespace from keys', () => { + const input = ' key1 =value1, key2 =value2'; + const result = parseLabels(input); + + expect(result).to.have.property('key1'); + expect(result).to.have.property('key2'); + }); + }); + + describe('#getContainer', () => { + it('should return a proxy object with id, inspect, remove, and stop', () => { + const cc = new ContainerdContainer({debug: () => {}}); + const container = cc.getContainer('abc123'); + + expect(container).to.be.an('object'); + container.id.should.equal('abc123'); + expect(container.inspect).to.be.a('function'); + expect(container.remove).to.be.a('function'); + expect(container.stop).to.be.a('function'); + }); + + it('should store the correct container id', () => { + const cc = new ContainerdContainer({debug: () => {}}); + const container = cc.getContainer('my-container-id'); + + container.id.should.equal('my-container-id'); + }); + }); + + describe('#getNetwork', () => { + it('should return a proxy object with id, inspect, and remove', () => { + const cc = new ContainerdContainer({debug: () => {}}); + const network = cc.getNetwork('my-network'); + + expect(network).to.be.an('object'); + network.id.should.equal('my-network'); + expect(network.inspect).to.be.a('function'); + expect(network.remove).to.be.a('function'); + }); + + it('should store the correct network id', () => { + const cc = new ContainerdContainer({debug: () => {}}); + const network = cc.getNetwork('lando_bridge_network'); + + network.id.should.equal('lando_bridge_network'); + }); + }); + + describe('#_isNotFoundError', () => { + it('should return true for "no such container" errors', () => { + const cc = new ContainerdContainer({debug: () => {}}); + cc._isNotFoundError(new Error('no such container: abc123')).should.be.true; + }); + + it('should return true for "not found" errors', () => { + const cc = new ContainerdContainer({debug: () => {}}); + cc._isNotFoundError(new Error('container not found')).should.be.true; + }); + + it('should return true for "no such network" errors', () => { + const cc = new ContainerdContainer({debug: () => {}}); + cc._isNotFoundError(new Error('no such network: my-net')).should.be.true; + }); + + it('should return true for "no such object" errors', () => { + const cc = new ContainerdContainer({debug: () => {}}); + cc._isNotFoundError(new Error('no such object')).should.be.true; + }); + + it('should return false for other errors', () => { + const cc = new ContainerdContainer({debug: () => {}}); + cc._isNotFoundError(new Error('permission denied')).should.be.false; + }); + + it('should return false for null/empty errors', () => { + const cc = new ContainerdContainer({debug: () => {}}); + cc._isNotFoundError(null).should.be.false; + cc._isNotFoundError({}).should.be.false; + }); + }); +}); diff --git a/test/containerd-integration.spec.js b/test/containerd-integration.spec.js new file mode 100644 index 000000000..8f3e47145 --- /dev/null +++ b/test/containerd-integration.spec.js @@ -0,0 +1,615 @@ +/* + * Integration tests for the containerd backend. + * + * Tests that require a real containerd installation are gated behind + * `describeIfContainerd` and will be skipped when containerd is not present. + * The NerdctlCompose command-generation tests are pure unit tests and always run. + * + * @file containerd-integration.spec.js + */ + +'use strict'; + +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const sinon = require('sinon'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); + +const BackendManager = require('./../lib/backend-manager'); +const {ContainerdDaemon, ContainerdContainer} = require('./../lib/backends/containerd'); +// NerdctlCompose is deprecated (not used in production — docker-compose + DOCKER_HOST is used instead) +// but we still test its command generation as a regression safeguard. +const NerdctlCompose = require('./../lib/backends/containerd/nerdctl-compose'); + +// --------------------------------------------------------------------------- +// Detect containerd availability +// --------------------------------------------------------------------------- +const hasContainerd = fs.existsSync('/usr/bin/containerd') + || fs.existsSync(path.join(os.homedir(), '.lando/bin/containerd')); + +const describeIfContainerd = hasContainerd ? describe : describe.skip; + +// --------------------------------------------------------------------------- +// Helpers shared by stub-based and live tests +// --------------------------------------------------------------------------- + +/** Minimal stub config for BackendManager */ +const stubConfig = (overrides = {}) => ({ + engine: 'containerd', + orchestratorBin: '/usr/bin/docker-compose', + orchestratorVersion: '2.0.0', + dockerBin: '/usr/bin/docker', + engineConfig: {}, + process: 'node', + userConfRoot: path.join(os.homedir(), '.lando'), + ...overrides, +}); + +/** Minimal stub dependencies for BackendManager */ +const stubDeps = () => ({ + cache: {get: sinon.stub().returns(undefined), set: sinon.stub()}, + events: {on: sinon.stub(), emit: sinon.stub().resolves()}, + log: { + debug: sinon.stub(), + verbose: sinon.stub(), + info: sinon.stub(), + warn: sinon.stub(), + error: sinon.stub(), + silly: sinon.stub(), + }, + shell: {sh: sinon.stub().resolves('')}, +}); + +// ============================================================================ +// 1. BackendManager integration — engine="containerd" +// ============================================================================ +describe('containerd integration: BackendManager', () => { + it('should create an engine with the containerd backend', () => { + const config = stubConfig({engine: 'containerd'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + const engine = manager.createEngine('test-id'); + + expect(engine).to.be.an('object'); + expect(engine).to.have.property('daemon'); + expect(engine).to.have.property('docker'); + expect(engine).to.have.property('compose'); + }); + + it('should set engineBackend to "containerd"', () => { + const config = stubConfig({engine: 'containerd'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + const engine = manager.createEngine('test-id'); + + expect(engine.engineBackend).to.equal('containerd'); + }); + + it('should use ContainerdDaemon as the daemon backend', () => { + const config = stubConfig({engine: 'containerd'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + const engine = manager.createEngine('test-id'); + + // Verify it's an instance of ContainerdDaemon + expect(engine.daemon).to.be.an.instanceOf(ContainerdDaemon); + }); + + it('should use ContainerdContainer as the docker/container backend', () => { + const config = stubConfig({engine: 'containerd'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + const engine = manager.createEngine('test-id'); + + // Verify it's an instance of ContainerdContainer + expect(engine.docker).to.be.an.instanceOf(ContainerdContainer); + }); + + it('should expose daemon methods: up, down, isUp, getVersions', () => { + const config = stubConfig({engine: 'containerd'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + const engine = manager.createEngine('test-id'); + + expect(engine.daemon.up).to.be.a('function'); + expect(engine.daemon.down).to.be.a('function'); + expect(engine.daemon.isUp).to.be.a('function'); + expect(engine.daemon.getVersions).to.be.a('function'); + }); + + it('should set composeInstalled based on orchestrator binary existence', () => { + const config = stubConfig({engine: 'containerd'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + const engine = manager.createEngine('test-id'); + + // composeInstalled is a boolean derived from fs.existsSync(orchestratorBin) + expect(engine.composeInstalled).to.be.a('boolean'); + }); + + it('should mark dockerInstalled false when the containerd binary is missing', () => { + const config = stubConfig({ + engine: 'containerd', + containerdBin: '/definitely/missing/containerd', + }); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + + const engine = manager.createEngine('test-id'); + + expect(engine.dockerInstalled).to.equal(false); + }); +}); + +// ============================================================================ +// 2. ContainerdDaemon lifecycle (requires real containerd) +// ============================================================================ +describeIfContainerd('containerd integration: ContainerdDaemon lifecycle', function() { + // These tests may take a while to start/stop daemons + this.timeout(60000); + + let daemon; + + before(() => { + daemon = new ContainerdDaemon({ + userConfRoot: path.join(os.homedir(), '.lando'), + }); + }); + + it('should return version strings from getVersions()', async () => { + const versions = await daemon.getVersions(); + + expect(versions).to.be.an('object'); + expect(versions).to.have.property('containerd'); + expect(versions).to.have.property('buildkit'); + expect(versions).to.have.property('nerdctl'); + + // At least containerd should have a version if the binary exists + if (versions.containerd) { + expect(versions.containerd).to.match(/\d+\.\d+\.\d+/); + } + }); + + it('should return a boolean from isUp()', async () => { + const result = await daemon.isUp(); + expect(result).to.be.a('boolean'); + }); + + it('should start containerd with up() if not already running', async function() { + const wasBefore = await daemon.isUp(); + + if (!wasBefore) { + // Attempt to start — may require permissions; skip if it fails due to EACCES + try { + await daemon.up(); + } catch (err) { + if (err.message && (err.message.includes('EACCES') || err.message.includes('permission'))) { + this.skip(); + return; + } + throw err; + } + } + + const isUpNow = await daemon.isUp(); + expect(isUpNow).to.equal(true); + }); + + it('should complete down() without error (no-op on Linux per BRIEF)', async function() { + const isUpBefore = await daemon.isUp(); + + if (!isUpBefore) { + this.skip(); + return; + } + + try { + await daemon.down(); + } catch (err) { + if (err.message && (err.message.includes('EACCES') || err.message.includes('permission'))) { + this.skip(); + return; + } + throw err; + } + + // Per BRIEF: "ContainerdDaemon.down() is a no-op on Linux/WSL. The service + // keeps running for fast restart." The daemon should still be up. + const isUpAfter = await daemon.isUp(); + if (process.platform === 'linux') { + expect(isUpAfter).to.equal(true); + } else { + // macOS: Lima VM actually stops + expect(isUpAfter).to.equal(false); + } + }); +}); + +// ============================================================================ +// 3. ContainerdContainer operations (requires running containerd) +// ============================================================================ +describeIfContainerd('containerd integration: ContainerdContainer operations', function() { + this.timeout(30000); + + let container; + const testNetworkName = 'lando-test-net-' + Date.now(); + + before(() => { + container = new ContainerdContainer({ + finchSocket: '/run/lando/finch.sock', + id: 'lando', + }); + }); + + after(async () => { + // Clean up test network if it still exists + try { + const handle = container.getNetwork(testNetworkName); + await handle.remove(); + } catch { + // Network may already be removed — that's fine + } + }); + + it('should return an array from list()', async function() { + try { + const result = await container.list(); + expect(result).to.be.an('array'); + } catch (err) { + // If containerd isn't actually running, skip + if (err.message && err.message.includes('nerdctl')) { + this.skip(); + return; + } + throw err; + } + }); + + it('should create a network with createNet()', async function() { + try { + const result = await container.createNet(testNetworkName); + expect(result).to.be.an('object'); + expect(result).to.have.property('Name', testNetworkName); + } catch (err) { + if (err.message && (err.message.includes('nerdctl') || err.message.includes('EACCES'))) { + this.skip(); + return; + } + throw err; + } + }); + + it('should include the created network in listNetworks()', async function() { + try { + const networks = await container.listNetworks(); + expect(networks).to.be.an('array'); + + const found = networks.find(n => (n.Name || n.name) === testNetworkName); + expect(found, `expected to find network "${testNetworkName}"`).to.exist; + } catch (err) { + if (err.message && err.message.includes('nerdctl')) { + this.skip(); + return; + } + throw err; + } + }); + + it('should remove the network via getNetwork().remove()', async function() { + try { + const handle = container.getNetwork(testNetworkName); + expect(handle).to.have.property('remove').that.is.a('function'); + + await handle.remove(); + + // Verify it's gone + const networks = await container.listNetworks(); + const found = networks.find(n => (n.Name || n.name) === testNetworkName); + expect(found, `expected network "${testNetworkName}" to be removed`).to.not.exist; + } catch (err) { + if (err.message && err.message.includes('nerdctl')) { + this.skip(); + return; + } + throw err; + } + }); +}); + +// ============================================================================ +// 4. NerdctlCompose command generation (unit-level, always runs) +// ============================================================================ +describe('containerd integration: NerdctlCompose command generation', () => { + let nerdctlCompose; + const socketPath = '/run/lando/containerd.sock'; + + before(() => { + nerdctlCompose = new NerdctlCompose({socketPath}); + }); + + it('should be a valid NerdctlCompose instance', () => { + expect(nerdctlCompose).to.be.an.instanceOf(NerdctlCompose); + expect(nerdctlCompose.socketPath).to.equal(socketPath); + }); + + describe('#start (compose up)', () => { + it('should generate a compose up command with nerdctl --address prefix', () => { + const result = nerdctlCompose.start( + ['/tmp/docker-compose.yml'], + 'testproject', + {services: ['web']}, + ); + + expect(result).to.have.property('cmd').that.is.an('array'); + expect(result).to.have.property('opts').that.is.an('object'); + + // Should start with --address --namespace compose + expect(result.cmd[0]).to.equal('--address'); + expect(result.cmd[1]).to.equal(socketPath); + expect(result.cmd[2]).to.equal('--namespace'); + expect(result.cmd[3]).to.equal('default'); + expect(result.cmd[4]).to.equal('compose'); + + // Should contain 'up' somewhere in the command + expect(result.cmd).to.include('up'); + + // Should contain --detach (default background: true) + expect(result.cmd).to.include('--detach'); + }); + + it('should include the compose file via --file flag', () => { + const composeFile = '/my/project/docker-compose.yml'; + const result = nerdctlCompose.start( + [composeFile], + 'testproject', + {}, + ); + + // The compose file should appear after --file + const fileIdx = result.cmd.indexOf('--file'); + expect(fileIdx).to.be.greaterThan(-1); + expect(result.cmd[fileIdx + 1]).to.equal(composeFile); + }); + + it('should include --remove-orphans by default', () => { + const result = nerdctlCompose.start( + ['/tmp/docker-compose.yml'], + 'testproject', + {}, + ); + + expect(result.cmd).to.include('--remove-orphans'); + }); + }); + + describe('#stop (compose stop)', () => { + it('should generate a compose stop command with nerdctl prefix', () => { + const result = nerdctlCompose.stop( + ['/tmp/docker-compose.yml'], + 'testproject', + {services: ['web']}, + ); + + expect(result).to.have.property('cmd').that.is.an('array'); + expect(result.cmd[0]).to.equal('--address'); + expect(result.cmd[1]).to.equal(socketPath); + expect(result.cmd[2]).to.equal('--namespace'); + expect(result.cmd[3]).to.equal('default'); + expect(result.cmd[4]).to.equal('compose'); + expect(result.cmd).to.include('stop'); + }); + }); + + describe('#remove (compose down / rm)', () => { + it('should generate a compose down command when purge is true', () => { + const result = nerdctlCompose.remove( + ['/tmp/docker-compose.yml'], + 'testproject', + {purge: true}, + ); + + expect(result.cmd[0]).to.equal('--address'); + expect(result.cmd[1]).to.equal(socketPath); + expect(result.cmd[2]).to.equal('--namespace'); + expect(result.cmd[3]).to.equal('default'); + expect(result.cmd[4]).to.equal('compose'); + + // purge = true → uses 'down' + expect(result.cmd).to.include('down'); + }); + + it('should generate a compose rm command when purge is false', () => { + const result = nerdctlCompose.remove( + ['/tmp/docker-compose.yml'], + 'testproject', + {purge: false}, + ); + + expect(result.cmd[0]).to.equal('--address'); + expect(result.cmd[1]).to.equal(socketPath); + expect(result.cmd[2]).to.equal('--namespace'); + expect(result.cmd[3]).to.equal('default'); + expect(result.cmd[4]).to.equal('compose'); + + // purge = false → uses 'rm' + expect(result.cmd).to.include('rm'); + }); + + it('should include volume removal flags by default', () => { + const result = nerdctlCompose.remove( + ['/tmp/docker-compose.yml'], + 'testproject', + {purge: true}, + ); + + expect(result.cmd).to.include('-v'); + }); + + it('should include --remove-orphans for purge/down', () => { + const result = nerdctlCompose.remove( + ['/tmp/docker-compose.yml'], + 'testproject', + {purge: true}, + ); + + expect(result.cmd).to.include('--remove-orphans'); + }); + }); + + describe('#build', () => { + it('should generate a compose build command', () => { + const result = nerdctlCompose.build( + ['/tmp/docker-compose.yml'], + 'testproject', + {services: ['web'], local: ['web']}, + ); + + expect(result.cmd[0]).to.equal('--address'); + expect(result.cmd[1]).to.equal(socketPath); + expect(result.cmd[2]).to.equal('--namespace'); + expect(result.cmd[3]).to.equal('default'); + expect(result.cmd[4]).to.equal('compose'); + expect(result.cmd).to.include('build'); + }); + }); + + describe('#run (compose exec)', () => { + it('should generate a compose exec/run command', () => { + const result = nerdctlCompose.run( + ['/tmp/docker-compose.yml'], + 'testproject', + {cmd: ['echo', 'hello'], services: ['web']}, + ); + + expect(result.cmd[0]).to.equal('--address'); + expect(result.cmd[1]).to.equal(socketPath); + expect(result.cmd[2]).to.equal('--namespace'); + expect(result.cmd[3]).to.equal('default'); + expect(result.cmd[4]).to.equal('compose'); + }); + }); + + describe('#logs', () => { + it('should generate a compose logs command', () => { + const result = nerdctlCompose.logs( + ['/tmp/docker-compose.yml'], + 'testproject', + {services: ['web']}, + ); + + expect(result.cmd[0]).to.equal('--address'); + expect(result.cmd[1]).to.equal(socketPath); + expect(result.cmd[2]).to.equal('--namespace'); + expect(result.cmd[3]).to.equal('default'); + expect(result.cmd[4]).to.equal('compose'); + expect(result.cmd).to.include('logs'); + }); + }); + + describe('#pull', () => { + it('should generate a compose pull command', () => { + const result = nerdctlCompose.pull( + ['/tmp/docker-compose.yml'], + 'testproject', + {services: ['web'], pullable: ['web']}, + ); + + expect(result.cmd[0]).to.equal('--address'); + expect(result.cmd[1]).to.equal(socketPath); + expect(result.cmd[2]).to.equal('--namespace'); + expect(result.cmd[3]).to.equal('default'); + expect(result.cmd[4]).to.equal('compose'); + expect(result.cmd).to.include('pull'); + }); + }); +}); + +// ============================================================================ +// 5. Full engine lifecycle (requires real containerd) +// ============================================================================ +describeIfContainerd('containerd integration: full engine lifecycle', function() { + this.timeout(90000); + + let engine; + + before(() => { + const config = stubConfig({engine: 'containerd'}); + const {cache, events, log, shell} = stubDeps(); + const manager = new BackendManager(config, cache, events, log, shell); + engine = manager.createEngine('integration-test'); + }); + + it('should have a daemon that can be started', async function() { + try { + await engine.daemon.up(); + } catch (err) { + if (err.message && (err.message.includes('EACCES') || err.message.includes('permission'))) { + this.skip(); + return; + } + throw err; + } + }); + + it('should report daemon as up after start', async function() { + const isUp = await engine.daemon.isUp(); + + if (!isUp) { + // If we can't bring it up (permissions etc), skip + this.skip(); + return; + } + + expect(isUp).to.equal(true); + }); + + it('should return an array from engine.docker.list()', async function() { + const isUp = await engine.daemon.isUp(); + + if (!isUp) { + this.skip(); + return; + } + + const containers = await engine.docker.list(); + expect(containers).to.be.an('array'); + }); + + it('should complete down() without error (no-op on Linux per BRIEF)', async function() { + const isUp = await engine.daemon.isUp(); + + if (!isUp) { + this.skip(); + return; + } + + try { + await engine.daemon.down(); + } catch (err) { + if (err.message && (err.message.includes('EACCES') || err.message.includes('permission'))) { + this.skip(); + return; + } + throw err; + } + + // Per BRIEF: "ContainerdDaemon.down() is a no-op on Linux/WSL. The service + // keeps running for fast restart." + const isUpAfter = await engine.daemon.isUp(); + if (process.platform === 'linux') { + expect(isUpAfter).to.equal(true); + } else { + expect(isUpAfter).to.equal(false); + } + }); +}); diff --git a/test/containerd-messages.spec.js b/test/containerd-messages.spec.js new file mode 100644 index 000000000..9de0552a6 --- /dev/null +++ b/test/containerd-messages.spec.js @@ -0,0 +1,100 @@ +'use strict'; + +const {expect} = require('chai'); // eslint-disable-line + +const validTypes = ['error', 'warning', 'tip']; + +// Messages that take no arguments +const noArgMessages = [ + {name: 'containerd-not-running', file: '../messages/containerd-not-running'}, + {name: 'buildkitd-not-running', file: '../messages/buildkitd-not-running'}, + {name: 'containerd-binaries-not-found', file: '../messages/containerd-binaries-not-found'}, + {name: 'lima-not-installed', file: '../messages/lima-not-installed'}, + {name: 'lima-vm-not-running', file: '../messages/lima-vm-not-running'}, + {name: 'containerd-permission-denied', file: '../messages/containerd-permission-denied'}, + {name: 'containerd-socket-conflict', file: '../messages/containerd-socket-conflict'}, + {name: 'finch-daemon-not-running', file: '../messages/finch-daemon-not-running'}, +]; + +// Messages that take a string argument +const paramMessages = [ + {name: 'compose-failed-containerd', file: '../messages/compose-failed-containerd'}, +]; + +describe('containerd error/warning messages', () => { + describe('no-argument messages', () => { + for (const {name, file} of noArgMessages) { + describe(name, () => { + let result; + + before(() => { + const messageFn = require(file); + result = messageFn(); + }); + + it('should return an object with title, type, detail, and url', () => { + expect(result).to.have.property('title').that.is.a('string').and.is.not.empty; + expect(result).to.have.property('type').that.is.a('string'); + expect(result).to.have.property('detail').that.is.an('array'); + expect(result).to.have.property('url').that.is.a('string').and.is.not.empty; + }); + + it('should have a valid type', () => { + expect(validTypes).to.include(result.type); + }); + + it('should have detail as an array of strings', () => { + expect(result.detail).to.be.an('array').that.is.not.empty; + for (const line of result.detail) { + expect(line).to.be.a('string'); + } + }); + + it('should have a url starting with https://', () => { + expect(result.url).to.match(/^https:\/\//); + }); + }); + } + }); + + describe('parameterized messages', () => { + for (const {name, file} of paramMessages) { + describe(name, () => { + const testMessage = 'Something went wrong during compose up'; + let result; + + before(() => { + const messageFn = require(file); + result = messageFn(testMessage); + }); + + it('should return an object with title, type, detail, and url', () => { + expect(result).to.have.property('title').that.is.a('string').and.is.not.empty; + expect(result).to.have.property('type').that.is.a('string'); + expect(result).to.have.property('detail').that.is.an('array'); + expect(result).to.have.property('url').that.is.a('string').and.is.not.empty; + }); + + it('should have a valid type', () => { + expect(validTypes).to.include(result.type); + }); + + it('should have detail as an array of strings', () => { + expect(result.detail).to.be.an('array').that.is.not.empty; + for (const line of result.detail) { + expect(line).to.be.a('string'); + } + }); + + it('should include the parameter in detail', () => { + const detailText = result.detail.join(' '); + expect(detailText).to.include(testMessage); + }); + + it('should have a url starting with https://', () => { + expect(result.url).to.match(/^https:\/\//); + }); + }); + } + }); +}); diff --git a/test/containerd-networking.spec.js b/test/containerd-networking.spec.js new file mode 100644 index 000000000..61bd8ba47 --- /dev/null +++ b/test/containerd-networking.spec.js @@ -0,0 +1,345 @@ +/* + * Tests for containerd networking (createNet, getNetwork, listNetworks). + * @file containerd-networking.spec.js + */ + +'use strict'; + +// Setup chai. +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const ContainerdContainer = require('./../lib/backends/containerd/containerd-container'); + +/** + * Create a ContainerdContainer instance with mocked Docker API methods. + * + * The mock captures network API calls so we can verify the containerd backend + * routes network operations through the finch-daemon Docker API. + * + * @param {Object} [overrides={}] - Per-test overrides. + * @param {Array} [overrides.networks=[]] - Network list returned by Docker API. + * @param {Object} [overrides.inspectData] - Inspect result for getNetwork().inspect(). + * @param {Error} [overrides.disconnectError=null] - Error thrown by network disconnect. + * @return {{cc: ContainerdContainer, calls: Array}} + */ +function createMockedInstance(overrides = {}) { + const calls = []; + const cc = new ContainerdContainer({debug: () => {}}); + + cc.dockerode = { + createNetwork: async opts => { + calls.push({method: 'createNetwork', opts}); + }, + listNetworks: async () => overrides.networks || [], + getNetwork: () => ({ + inspect: async () => overrides.inspectData || {Name: 'my-net', Id: 'abc123'}, + remove: async () => { + calls.push({method: 'remove'}); + }, + connect: async opts => { + calls.push({method: 'connect', opts}); + }, + disconnect: async opts => { + if (overrides.disconnectError) throw overrides.disconnectError; + calls.push({method: 'disconnect', opts}); + }, + }), + }; + + return {cc, calls}; +} + +describe('containerd-networking', () => { + // =========================================================================== + // createNet + // =========================================================================== + describe('#createNet', () => { + it('should create a Docker API network with the lando label', async () => { + const {cc, calls} = createMockedInstance(); + + await cc.createNet('my-net'); + + calls[0].method.should.equal('createNetwork'); + calls[0].opts.Name.should.equal('my-net'); + calls[0].opts.Labels.should.deep.equal({'io.lando.container': 'TRUE'}); + calls[0].opts.Attachable.should.equal(true); + }); + + it('should not include --internal even when Internal option is not set', async () => { + const {cc, calls} = createMockedInstance(); + + await cc.createNet('my-net', {Internal: false}); + + expect(calls[0].opts).to.not.have.property('Internal'); + }); + + it('should include extra labels from opts.Labels', async () => { + const {cc, calls} = createMockedInstance(); + + await cc.createNet('my-net', { + Labels: { + 'com.example.env': 'production', + 'com.example.version': '2.0', + }, + }); + + calls[0].opts.Labels.should.deep.equal({ + 'io.lando.container': 'TRUE', + 'com.example.env': 'production', + 'com.example.version': '2.0', + }); + }); + + it('should call network inspect after creation and return parsed data', async () => { + const inspectData = {Name: 'my-net', Id: 'abc123', Driver: 'bridge'}; + const {cc, calls} = createMockedInstance({inspectData}); + + const result = await cc.createNet('my-net'); + + calls.length.should.equal(1); + + result.should.deep.equal(inspectData); + }); + }); + + // =========================================================================== + // getNetwork().connect + // =========================================================================== + describe('#getNetwork().connect', () => { + it('should return a proxy with connect and disconnect methods', () => { + const {cc} = createMockedInstance(); + const network = cc.getNetwork('landonet'); + + expect(network.connect).to.be.a('function'); + expect(network.disconnect).to.be.a('function'); + }); + + it('should proxy network connect through dockerode', async () => { + const {cc, calls} = createMockedInstance(); + const network = cc.getNetwork('landonet'); + + await network.connect({Container: 'my-container-id'}); + + calls.length.should.equal(1); + calls[0].should.deep.equal({method: 'connect', opts: {Container: 'my-container-id'}}); + }); + + it('should preserve aliases on dockerode network connect', async () => { + const {cc, calls} = createMockedInstance(); + const network = cc.getNetwork('landonet'); + + await network.connect({ + Container: 'my-container-id', + EndpointConfig: { + Aliases: ['web.myapp.internal', 'web'], + }, + }); + + calls.length.should.equal(1); + calls[0].should.deep.equal({ + method: 'connect', + opts: { + Container: 'my-container-id', + EndpointConfig: { + Aliases: ['web.myapp.internal', 'web'], + }, + }, + }); + }); + + it('should throw if Container is not provided', async () => { + const {cc} = createMockedInstance(); + const network = cc.getNetwork('landonet'); + + try { + await network.connect({}); + throw new Error('should have thrown'); + } catch (err) { + err.message.should.include('Container is required'); + } + }); + + it('should throw if connect is called with no arguments', async () => { + const {cc} = createMockedInstance(); + const network = cc.getNetwork('landonet'); + + try { + await network.connect(); + throw new Error('should have thrown'); + } catch (err) { + err.message.should.include('Container is required'); + } + }); + + it('should handle EndpointConfig without Aliases gracefully', async () => { + const {cc, calls} = createMockedInstance(); + const network = cc.getNetwork('landonet'); + + await network.connect({ + Container: 'cid-123', + EndpointConfig: {}, + }); + + calls.length.should.equal(1); + calls[0].should.deep.equal({method: 'connect', opts: {Container: 'cid-123', EndpointConfig: {}}}); + }); + }); + + // =========================================================================== + // getNetwork().disconnect + // =========================================================================== + describe('#getNetwork().disconnect', () => { + it('should proxy network disconnect through dockerode', async () => { + const {cc, calls} = createMockedInstance(); + const network = cc.getNetwork('landonet'); + + await network.disconnect({Container: 'my-container-id'}); + + calls.length.should.equal(1); + calls[0].should.deep.equal({method: 'disconnect', opts: {Container: 'my-container-id'}}); + }); + + it('should ignore Force when nerdctl does not support it', async () => { + const {cc, calls} = createMockedInstance(); + const network = cc.getNetwork('landonet'); + + await network.disconnect({Container: 'my-container-id', Force: true}); + + calls.length.should.equal(1); + calls[0].should.deep.equal({method: 'disconnect', opts: {Container: 'my-container-id', Force: true}}); + }); + + it('should not include --force flag when Force is false', async () => { + const {cc, calls} = createMockedInstance(); + const network = cc.getNetwork('landonet'); + + await network.disconnect({Container: 'my-container-id', Force: false}); + + calls.length.should.equal(1); + calls[0].should.deep.equal({method: 'disconnect', opts: {Container: 'my-container-id', Force: false}}); + }); + + it('should throw if Container is not provided', async () => { + const {cc} = createMockedInstance(); + const network = cc.getNetwork('landonet'); + + try { + await network.disconnect({Force: true}); + throw new Error('should have thrown'); + } catch (err) { + err.message.should.include('Container is required'); + } + }); + + it('should silently ignore "is not connected" errors (Docker parity)', async () => { + const {cc} = createMockedInstance({disconnectError: new Error('container abc123 is not connected to network landonet')}); + const network = cc.getNetwork('landonet'); + + // Should NOT throw + await network.disconnect({Container: 'abc123'}); + }); + + it('should re-throw non "is not connected" errors', async () => { + const {cc} = createMockedInstance({disconnectError: new Error('permission denied')}); + const network = cc.getNetwork('landonet'); + + try { + await network.disconnect({Container: 'abc123'}); + throw new Error('should have thrown'); + } catch (err) { + err.message.should.equal('permission denied'); + } + }); + }); + + // =========================================================================== + // listNetworks + // =========================================================================== + describe('#listNetworks', () => { + it('should filter networks by name', async () => { + const {cc} = createMockedInstance({ + networks: [ + JSON.stringify({Name: 'lando_bridge_network', ID: 'abc123', Labels: ''}), + JSON.stringify({Name: 'other-network', ID: 'def456', Labels: ''}), + JSON.stringify({Name: 'lando_custom_net', ID: 'ghi789', Labels: ''}), + ].map(JSON.parse), + }); + + const result = await cc.listNetworks({filters: {name: ['lando']}}); + result.length.should.equal(2); + result[0].Name.should.equal('lando_bridge_network'); + result[1].Name.should.equal('lando_custom_net'); + }); + + it('should filter networks by id (prefix match)', async () => { + const {cc} = createMockedInstance({ + networks: [ + JSON.stringify({Name: 'net1', ID: 'abc123def', Labels: ''}), + JSON.stringify({Name: 'net2', ID: 'xyz789ghi', Labels: ''}), + ].map(JSON.parse), + }); + + const result = await cc.listNetworks({filters: {id: ['abc']}}); + result.length.should.equal(1); + result[0].Name.should.equal('net1'); + }); + + it('should filter networks by label', async () => { + const {cc} = createMockedInstance({ + networks: [ + JSON.stringify({Name: 'net1', ID: 'abc', Labels: 'io.lando.container=TRUE'}), + JSON.stringify({Name: 'net2', ID: 'def', Labels: 'other=label'}), + ].map(JSON.parse), + }); + + const result = await cc.listNetworks({filters: {label: ['io.lando.container=TRUE']}}); + result.length.should.equal(1); + result[0].Name.should.equal('net1'); + }); + + it('should return all networks when no filters are specified', async () => { + const {cc} = createMockedInstance({ + networks: [ + JSON.stringify({Name: 'net1', ID: 'abc'}), + JSON.stringify({Name: 'net2', ID: 'def'}), + JSON.stringify({Name: 'net3', ID: 'ghi'}), + ].map(JSON.parse), + }); + + const result = await cc.listNetworks(); + result.length.should.equal(3); + }); + + it('should return empty array when nerdctl fails', async () => { + const {cc} = createMockedInstance(); + cc.dockerode.listNetworks = async () => { throw new Error('containerd not running'); }; + + const result = await cc.listNetworks(); + result.should.deep.equal([]); + }); + + it('should return empty array when nerdctl returns empty output', async () => { + const {cc} = createMockedInstance({networks: []}); + + const result = await cc.listNetworks(); + result.should.deep.equal([]); + }); + + it('should handle multiple name filters (match any)', async () => { + const {cc} = createMockedInstance({ + networks: [ + JSON.stringify({Name: 'alpha-net', ID: 'a1'}), + JSON.stringify({Name: 'beta-net', ID: 'b1'}), + JSON.stringify({Name: 'gamma-net', ID: 'c1'}), + ].map(JSON.parse), + }); + + const result = await cc.listNetworks({filters: {name: ['alpha', 'gamma']}}); + result.length.should.equal(2); + result[0].Name.should.equal('alpha-net'); + result[1].Name.should.equal('gamma-net'); + }); + }); +}); diff --git a/test/containerd-proxy-adapter.spec.js b/test/containerd-proxy-adapter.spec.js new file mode 100644 index 000000000..01a79611c --- /dev/null +++ b/test/containerd-proxy-adapter.spec.js @@ -0,0 +1,237 @@ +'use strict'; + +const {expect} = require('chai'); +const sinon = require('sinon'); +const mockFs = require('mock-fs'); + +describe('ContainerdProxyAdapter', () => { + let ContainerdProxyAdapter; + + before(() => { + ContainerdProxyAdapter = require('../lib/backends/containerd/proxy-adapter'); + }); + + afterEach(() => { + mockFs.restore(); + }); + + describe('constructor', () => { + it('should use default finch socket path from get-containerd-paths', () => { + const adapter = new ContainerdProxyAdapter({}); + expect(adapter.finchSocket).to.equal('/run/lando/finch.sock'); + }); + + it('should accept a custom finch socket path', () => { + const adapter = new ContainerdProxyAdapter({finchSocket: '/custom/finch.sock'}); + expect(adapter.finchSocket).to.equal('/custom/finch.sock'); + }); + + it('should accept config to derive paths', () => { + const adapter = new ContainerdProxyAdapter({ + config: {finchDaemonSocket: '/my/custom/finch.sock'}, + }); + expect(adapter.finchSocket).to.equal('/my/custom/finch.sock'); + }); + + it('should use provided debug function', () => { + const debugFn = sinon.spy(); + const adapter = new ContainerdProxyAdapter({debug: debugFn}); + expect(adapter.debug).to.equal(debugFn); + }); + }); + + describe('#ensureProxyNetworks', () => { + it('should ensure CNI configs for both edge and default networks', () => { + const cniDir = '/tmp/test-cni'; + mockFs({ + [cniDir]: {}, + }); + + const adapter = new ContainerdProxyAdapter({}); + const results = adapter.ensureProxyNetworks('landoproxyhyperion5000gandalfedition', { + cniNetconfPath: cniDir, + }); + + expect(results).to.have.property('landoproxyhyperion5000gandalfedition_edge'); + expect(results).to.have.property('landoproxyhyperion5000gandalfedition_default'); + }); + + it('should return true for networks that were newly created', () => { + const cniDir = '/tmp/test-cni-new'; + mockFs({ + [cniDir]: {}, + }); + + const adapter = new ContainerdProxyAdapter({}); + const results = adapter.ensureProxyNetworks('myproxy', { + cniNetconfPath: cniDir, + }); + + expect(results['myproxy_edge']).to.equal(true); + expect(results['myproxy_default']).to.equal(true); + }); + + it('should return false for networks that already have CNI configs', () => { + const cniDir = '/tmp/test-cni-existing'; + // Use the expected plugin chain so the conflist is treated as up-to-date + // (empty plugins: [] would trigger migration and return true) + const validPlugins = [ + {type: 'bridge', bridge: 'br-aaaaaaaaaaaa', isGateway: true, ipMasq: true, hairpinMode: true, + ipam: {ranges: [[{gateway: '10.4.1.1', subnet: '10.4.1.0/24'}]], routes: [{dst: '0.0.0.0/0'}], type: 'host-local'}}, + {type: 'firewall'}, + {type: 'tuning'}, + ]; + const validPlugins2 = [ + {type: 'bridge', bridge: 'br-bbbbbbbbbbbb', isGateway: true, ipMasq: true, hairpinMode: true, + ipam: {ranges: [[{gateway: '10.4.2.1', subnet: '10.4.2.0/24'}]], routes: [{dst: '0.0.0.0/0'}], type: 'host-local'}}, + {type: 'firewall'}, + {type: 'tuning'}, + ]; + mockFs({ + [cniDir]: { + 'nerdctl-myproxy_edge.conflist': JSON.stringify({cniVersion: '1.0.0', name: 'myproxy_edge', plugins: validPlugins}), + 'nerdctl-myproxy_default.conflist': JSON.stringify({cniVersion: '1.0.0', name: 'myproxy_default', plugins: validPlugins2}), + }, + }); + + const adapter = new ContainerdProxyAdapter({}); + const results = adapter.ensureProxyNetworks('myproxy', { + cniNetconfPath: cniDir, + }); + + expect(results['myproxy_edge']).to.equal(false); + expect(results['myproxy_default']).to.equal(false); + }); + + it('should pass debug function to ensureCniNetwork', () => { + const cniDir = '/tmp/test-cni-debug'; + mockFs({ + [cniDir]: {}, + }); + + const debugFn = sinon.spy(); + const adapter = new ContainerdProxyAdapter({debug: debugFn}); + adapter.ensureProxyNetworks('myproxy', { + cniNetconfPath: cniDir, + }); + + expect(debugFn.called).to.equal(true); + }); + }); + + describe('#ensureAppProxyNetwork', () => { + it('should ensure CNI config for the specified proxy network', () => { + const cniDir = '/tmp/test-cni-app'; + mockFs({ + [cniDir]: {}, + }); + + const adapter = new ContainerdProxyAdapter({}); + const result = adapter.ensureAppProxyNetwork('landoproxyhyperion5000gandalfedition_edge', { + cniNetconfPath: cniDir, + }); + + expect(result).to.equal(true); + }); + + it('should return false if config already exists', () => { + const cniDir = '/tmp/test-cni-app-existing'; + const networkName = 'landoproxyhyperion5000gandalfedition_edge'; + // Use the expected plugin chain so the conflist is treated as up-to-date + const validPlugins = [ + {type: 'bridge', bridge: 'br-aaaaaaaaaaaa', isGateway: true, ipMasq: true, hairpinMode: true, + ipam: {ranges: [[{gateway: '10.4.1.1', subnet: '10.4.1.0/24'}]], routes: [{dst: '0.0.0.0/0'}], type: 'host-local'}}, + {type: 'firewall'}, + {type: 'tuning'}, + ]; + mockFs({ + [cniDir]: { + [`nerdctl-${networkName}.conflist`]: JSON.stringify({ + cniVersion: '1.0.0', + name: networkName, + plugins: validPlugins, + }), + }, + }); + + const adapter = new ContainerdProxyAdapter({}); + const result = adapter.ensureAppProxyNetwork(networkName, { + cniNetconfPath: cniDir, + }); + + expect(result).to.equal(false); + }); + }); +}); + +describe('app-add-proxy-2-landonet hook (containerd compat)', () => { + let hook; + // Pre-require modules that use fs so mock-fs doesn't intercept their loading + const bluebird = require('bluebird'); + + before(() => { + // Pre-require the hook (and its transitive deps) before any mock-fs calls + hook = require('../hooks/app-add-proxy-2-landonet'); + }); + + afterEach(() => { + mockFs.restore(); + }); + + it('should not bail early for containerd backend', async () => { + // The hook should attempt to find the proxy container even with containerd. + // It will bail because the container doesn't exist, but it should NOT + // return immediately due to engineBackend === 'containerd'. + // + // Mock the CNI directory so ensureCniNetwork() can write conflist files + // without requiring real root-owned /etc/lando/cni/finch permissions. + mockFs({'/etc/lando/cni/finch': {}}); + + const mockApp = { + config: {proxy: []}, + log: {debug: sinon.spy()}, + }; + const existsSpy = sinon.stub().resolves(false); + const mockLando = { + config: { + proxy: 'ON', + networkBridge: 'lando_bridgenet_test', + proxyContainer: 'test-proxy-container', + }, + engine: { + engineBackend: 'containerd', + getNetwork: sinon.stub().returns({ + disconnect: sinon.stub().resolves(), + connect: sinon.stub().resolves(), + }), + exists: existsSpy, + }, + log: {debug: sinon.spy()}, + Promise: bluebird, + }; + + await hook(mockApp, mockLando); + + // The key assertion: engine.exists was called, meaning we did NOT bail + // early due to containerd backend check + expect(existsSpy.calledOnce).to.equal(true); + expect(existsSpy.calledWith({id: 'test-proxy-container'})).to.equal(true); + }); + + it('should still bail if proxy is not ON', async () => { + const mockApp = {config: {proxy: []}}; + const existsSpy = sinon.stub().resolves(false); + const mockLando = { + config: {proxy: 'OFF'}, + engine: { + engineBackend: 'containerd', + exists: existsSpy, + }, + }; + + await hook(mockApp, mockLando); + + // engine.exists should NOT have been called — bailed because proxy is OFF + expect(existsSpy.called).to.equal(false); + }); +}); diff --git a/test/docker-engine.spec.js b/test/docker-engine.spec.js new file mode 100644 index 000000000..2b3e8961e --- /dev/null +++ b/test/docker-engine.spec.js @@ -0,0 +1,88 @@ +'use strict'; + +const chai = require('chai'); +const expect = chai.expect; +const sinon = require('sinon'); + +const DockerEngine = require('./../components/docker-engine'); + +describe('docker-engine', () => { + describe('#build', () => { + it('should delegate containerd builds to buildx/buildctl', () => { + const engine = new DockerEngine({ + containerdMode: true, + userConfRoot: '/tmp/.lando-test', + }); + const stub = sinon.stub(engine, 'buildx').returns('delegated'); + + const result = engine.build('/tmp/Dockerfile', {tag: 'example/test:latest'}); + + expect(result).to.equal('delegated'); + sinon.assert.calledOnce(stub); + sinon.assert.calledWithMatch(stub, '/tmp/Dockerfile', {tag: 'example/test:latest'}); + }); + }); + + describe('#_getContainerdBuildctlCommand', () => { + it('should generate a buildctl command for containerd image builds', () => { + const engine = new DockerEngine({ + containerdMode: true, + buildctlBin: '/usr/local/lib/lando/bin/buildctl', + buildkitHost: 'unix:///run/lando/buildkitd.sock', + userConfRoot: '/tmp/.lando-test', + }); + + const result = engine._getContainerdBuildctlCommand({ + buildArgs: {FOO: 'bar', BAZ: 'qux'}, + context: '/tmp/build-context', + dockerfile: '/tmp/build-context/Dockerfile', + outputPath: '/tmp/build-context/image.tar', + tag: 'example/test:latest', + }); + + expect(result.command).to.equal('/usr/local/lib/lando/bin/buildctl'); + expect(result.args).to.deep.equal([ + '--addr', 'unix:///run/lando/buildkitd.sock', + 'build', + '--frontend', 'dockerfile.v0', + '--local', 'context=/tmp/build-context', + '--local', 'dockerfile=/tmp/build-context', + '--opt', 'filename=Dockerfile', + '--opt', `platform=${process.arch === 'arm64' ? 'linux/arm64' : 'linux/amd64'}`, + '--output', 'type=docker,name=example/test:latest,dest=/tmp/build-context/image.tar', + '--progress=plain', + '--opt', 'build-arg:FOO=bar', + '--opt', 'build-arg:BAZ=qux', + ]); + }); + }); + + describe('#_loadContainerdImageIntoFinch', () => { + it('should exist as a method for loading images via Dockerode/finch-daemon', () => { + // Per BRIEF: image loading uses Dockerode.loadImage() via finch-daemon, + // NOT sudo nerdctl load. The old _getContainerdNerdctlLoadCommand was + // never implemented because it would violate "never shell out to nerdctl". + const engine = new DockerEngine({ + containerdMode: true, + userConfRoot: '/tmp/.lando-test', + }); + + expect(engine._loadContainerdImageIntoFinch).to.be.a('function'); + expect(engine._loadContainerdImage).to.be.a('function'); + }); + + it('should delegate _loadContainerdImage to _loadContainerdImageIntoFinch', () => { + const engine = new DockerEngine({ + containerdMode: true, + userConfRoot: '/tmp/.lando-test', + }); + + const stub = sinon.stub(engine, '_loadContainerdImageIntoFinch').resolves('loaded'); + const result = engine._loadContainerdImage('/tmp/image.tar', 'test:latest'); + + sinon.assert.calledOnce(stub); + sinon.assert.calledWith(stub, '/tmp/image.tar', 'test:latest'); + return result.then(r => expect(r).to.equal('loaded')); + }); + }); +}); diff --git a/test/ensure-cni-network.spec.js b/test/ensure-cni-network.spec.js new file mode 100644 index 000000000..29432e61f --- /dev/null +++ b/test/ensure-cni-network.spec.js @@ -0,0 +1,667 @@ +'use strict'; + +const {expect} = require('chai'); +const sinon = require('sinon'); +const mockFs = require('mock-fs'); +const fs = require('fs'); +const path = require('path'); + +const ensureCniNetwork = require('../utils/ensure-cni-network'); + +/** + * Helper to create a mock CNI conflist file for testing subnet allocation. + * + * @param {string} dir - CNI config directory. + * @param {string} networkName - Network name. + * @param {number} subnetOctet - Third octet of the 10.4.x.0/24 subnet. + * @return {string} Path to the created conflist file. + */ +const writeConflist = (dir, networkName, subnetOctet) => { + const filePath = path.join(dir, `nerdctl-${networkName}.conflist`); + const content = { + cniVersion: '1.0.0', + name: networkName, + plugins: [{ + type: 'bridge', + ipam: { + type: 'host-local', + ranges: [[{ + gateway: `10.4.${subnetOctet}.1`, + subnet: `10.4.${subnetOctet}.0/24`, + }]], + }, + }], + }; + fs.writeFileSync(filePath, JSON.stringify(content, null, 2), 'utf8'); + return filePath; +}; + +describe('ensure-cni-network', () => { + const cniDir = '/tmp/test-cni'; + + afterEach(() => { + mockFs.restore(); + }); + + describe('conflist creation', () => { + it('should create a new conflist when none exists', () => { + mockFs({[cniDir]: {}}); + + const result = ensureCniNetwork('myapp_default', {cniNetconfPath: cniDir}); + + expect(result).to.be.true; + expect(fs.existsSync(path.join(cniDir, 'nerdctl-myapp_default.conflist'))).to.be.true; + }); + + it('should return false when conflist already exists', () => { + mockFs({[cniDir]: {}}); + + // Create first + ensureCniNetwork('myapp_default', {cniNetconfPath: cniDir}); + // Second call should be a no-op + const result = ensureCniNetwork('myapp_default', {cniNetconfPath: cniDir}); + + expect(result).to.be.false; + }); + + it('should create valid JSON conflist content', () => { + mockFs({[cniDir]: {}}); + + ensureCniNetwork('testnet', {cniNetconfPath: cniDir}); + + const conflistPath = path.join(cniDir, 'nerdctl-testnet.conflist'); + const content = JSON.parse(fs.readFileSync(conflistPath, 'utf8')); + + expect(content).to.have.property('cniVersion', '1.0.0'); + expect(content).to.have.property('name', 'testnet'); + expect(content).to.have.property('nerdctlID').that.is.a('string'); + expect(content.nerdctlID).to.have.lengthOf(64); // 32 bytes hex + expect(content).to.have.property('nerdctlLabels').that.deep.equals({}); + expect(content).to.have.property('plugins').that.is.an('array'); + }); + + it('should include bridge, firewall, and tuning plugins', () => { + mockFs({[cniDir]: {}}); + + ensureCniNetwork('testnet', {cniNetconfPath: cniDir}); + + const conflistPath = path.join(cniDir, 'nerdctl-testnet.conflist'); + const content = JSON.parse(fs.readFileSync(conflistPath, 'utf8')); + const pluginTypes = content.plugins.map(p => p.type); + + expect(pluginTypes).to.deep.equal(['bridge', 'firewall', 'tuning']); + }); + + it('should NOT include portmap plugin', () => { + mockFs({[cniDir]: {}}); + + ensureCniNetwork('testnet', {cniNetconfPath: cniDir}); + + const conflistPath = path.join(cniDir, 'nerdctl-testnet.conflist'); + const content = JSON.parse(fs.readFileSync(conflistPath, 'utf8')); + const pluginTypes = content.plugins.map(p => p.type); + + expect(pluginTypes).to.not.include('portmap'); + }); + + it('should NOT include tc-redirect-tap plugin', () => { + mockFs({[cniDir]: {}}); + + ensureCniNetwork('testnet', {cniNetconfPath: cniDir}); + + const conflistPath = path.join(cniDir, 'nerdctl-testnet.conflist'); + const content = JSON.parse(fs.readFileSync(conflistPath, 'utf8')); + const pluginTypes = content.plugins.map(p => p.type); + + expect(pluginTypes).to.not.include('tc-redirect-tap'); + }); + + it('should configure the bridge plugin with correct properties', () => { + mockFs({[cniDir]: {}}); + + ensureCniNetwork('testnet', {cniNetconfPath: cniDir}); + + const conflistPath = path.join(cniDir, 'nerdctl-testnet.conflist'); + const content = JSON.parse(fs.readFileSync(conflistPath, 'utf8')); + const bridge = content.plugins[0]; + + expect(bridge.isGateway).to.be.true; + expect(bridge.ipMasq).to.be.true; + expect(bridge.hairpinMode).to.be.true; + expect(bridge.bridge).to.match(/^br-[a-f0-9]{12}$/); + }); + + it('should generate unique nerdctlID for each conflist', () => { + mockFs({[cniDir]: {}}); + + ensureCniNetwork('net1', {cniNetconfPath: cniDir}); + ensureCniNetwork('net2', {cniNetconfPath: cniDir}); + + const c1 = JSON.parse(fs.readFileSync(path.join(cniDir, 'nerdctl-net1.conflist'), 'utf8')); + const c2 = JSON.parse(fs.readFileSync(path.join(cniDir, 'nerdctl-net2.conflist'), 'utf8')); + + expect(c1.nerdctlID).to.not.equal(c2.nerdctlID); + }); + }); + + describe('subnet allocation', () => { + it('should allocate subnet 10.4.1.0/24 when no existing configs', () => { + mockFs({[cniDir]: {}}); + + ensureCniNetwork('first_net', {cniNetconfPath: cniDir}); + + const content = JSON.parse( + fs.readFileSync(path.join(cniDir, 'nerdctl-first_net.conflist'), 'utf8'), + ); + const subnet = content.plugins[0].ipam.ranges[0][0].subnet; + const gateway = content.plugins[0].ipam.ranges[0][0].gateway; + + expect(subnet).to.equal('10.4.1.0/24'); + expect(gateway).to.equal('10.4.1.1'); + }); + + it('should increment subnet past existing configs', () => { + mockFs({[cniDir]: {}}); + + // Pre-populate with subnet 10.4.3.0/24 + writeConflist(cniDir, 'existing', 3); + + ensureCniNetwork('newnet', {cniNetconfPath: cniDir}); + + const content = JSON.parse( + fs.readFileSync(path.join(cniDir, 'nerdctl-newnet.conflist'), 'utf8'), + ); + const subnet = content.plugins[0].ipam.ranges[0][0].subnet; + + expect(subnet).to.equal('10.4.4.0/24'); + }); + + it('should find the max subnet across multiple existing configs', () => { + mockFs({[cniDir]: {}}); + + writeConflist(cniDir, 'net_a', 1); + writeConflist(cniDir, 'net_b', 5); + writeConflist(cniDir, 'net_c', 3); + + ensureCniNetwork('newnet', {cniNetconfPath: cniDir}); + + const content = JSON.parse( + fs.readFileSync(path.join(cniDir, 'nerdctl-newnet.conflist'), 'utf8'), + ); + const subnet = content.plugins[0].ipam.ranges[0][0].subnet; + + // Should be 5 + 1 = 6 + expect(subnet).to.equal('10.4.6.0/24'); + }); + + it('should allocate sequential subnets for multiple new networks', () => { + mockFs({[cniDir]: {}}); + + ensureCniNetwork('net1', {cniNetconfPath: cniDir}); + ensureCniNetwork('net2', {cniNetconfPath: cniDir}); + ensureCniNetwork('net3', {cniNetconfPath: cniDir}); + + const subnets = ['net1', 'net2', 'net3'].map(name => { + const c = JSON.parse(fs.readFileSync(path.join(cniDir, `nerdctl-${name}.conflist`), 'utf8')); + return c.plugins[0].ipam.ranges[0][0].subnet; + }); + + expect(subnets).to.deep.equal([ + '10.4.1.0/24', + '10.4.2.0/24', + '10.4.3.0/24', + ]); + }); + + it('should return false when all 255 subnets are exhausted', () => { + // Build a directory with existing configs using subnets 1-255 + const dirContents = {}; + for (let i = 1; i <= 255; i++) { + const name = `net_${i}`; + dirContents[`nerdctl-${name}.conflist`] = JSON.stringify({ + plugins: [{ + type: 'bridge', + ipam: {ranges: [[{subnet: `10.4.${i}.0/24`}]]}, + }], + }); + } + + mockFs({[cniDir]: dirContents}); + + const result = ensureCniNetwork('one_too_many', {cniNetconfPath: cniDir}); + + expect(result).to.be.false; + }); + + it('should skip invalid JSON files when scanning for max subnet', () => { + mockFs({ + [cniDir]: { + 'nerdctl-valid.conflist': JSON.stringify({ + plugins: [{ + type: 'bridge', + ipam: {ranges: [[{subnet: '10.4.2.0/24'}]]}, + }], + }), + 'nerdctl-broken.conflist': '{{ not json', + }, + }); + + ensureCniNetwork('newnet', {cniNetconfPath: cniDir}); + + const content = JSON.parse( + fs.readFileSync(path.join(cniDir, 'nerdctl-newnet.conflist'), 'utf8'), + ); + // Should still find max from valid file (2) and use 3 + expect(content.plugins[0].ipam.ranges[0][0].subnet).to.equal('10.4.3.0/24'); + }); + + it('should skip conflist files with non-matching subnet patterns', () => { + mockFs({ + [cniDir]: { + 'nerdctl-other.conflist': JSON.stringify({ + plugins: [{ + type: 'bridge', + ipam: {ranges: [[{subnet: '192.168.1.0/24'}]]}, + }], + }), + }, + }); + + ensureCniNetwork('newnet', {cniNetconfPath: cniDir}); + + const content = JSON.parse( + fs.readFileSync(path.join(cniDir, 'nerdctl-newnet.conflist'), 'utf8'), + ); + // 192.168 doesn't match 10.4.x pattern, so maxSubnet stays 0, new gets 1 + expect(content.plugins[0].ipam.ranges[0][0].subnet).to.equal('10.4.1.0/24'); + }); + }); + + describe('IPAM routes', () => { + it('should include a default route', () => { + mockFs({[cniDir]: {}}); + + ensureCniNetwork('testnet', {cniNetconfPath: cniDir}); + + const content = JSON.parse( + fs.readFileSync(path.join(cniDir, 'nerdctl-testnet.conflist'), 'utf8'), + ); + const routes = content.plugins[0].ipam.routes; + + expect(routes).to.deep.equal([{dst: '0.0.0.0/0'}]); + }); + }); + + describe('error handling', () => { + it('should throw on EACCES permission error with helpful message', () => { + mockFs({[cniDir]: {}}); + + // Stub writeFileSync to simulate EACCES after mock-fs is set up + const eaccesErr = new Error('EACCES: permission denied'); + eaccesErr.code = 'EACCES'; + const writeStub = sinon.stub(fs, 'writeFileSync').throws(eaccesErr); + + try { + let thrown; + try { + ensureCniNetwork('testnet', {cniNetconfPath: cniDir}); + } catch (err) { + thrown = err; + } + expect(thrown).to.be.an.instanceOf(Error); + expect(thrown.message).to.match(/Permission denied/); + expect(thrown.message).to.include('lando setup'); + } finally { + writeStub.restore(); + } + }); + + it('should return false for non-permission write errors', () => { + // Use a path where the parent directory doesn't exist + // mock-fs won't auto-create parents, so rename will fail + const badDir = '/tmp/nonexistent-parent/cni'; + mockFs({}); + + const result = ensureCniNetwork('testnet', {cniNetconfPath: badDir}); + expect(result).to.be.false; + }); + + it('should handle non-existent CNI directory gracefully when scanning', () => { + // Directory doesn't exist at all — scanning should not throw + mockFs({}); + + // Will fail on write but the scan part should not throw + const result = ensureCniNetwork('testnet', {cniNetconfPath: '/nonexistent/dir'}); + expect(result).to.be.false; + }); + }); + + describe('debug logging', () => { + it('should call debug when conflist already exists', () => { + mockFs({[cniDir]: {}}); + + const messages = []; + const debug = (...args) => messages.push(args); + + // Create first, then check debug on second call + ensureCniNetwork('testnet', {cniNetconfPath: cniDir, debug}); + ensureCniNetwork('testnet', {cniNetconfPath: cniDir, debug}); + + const existsMsg = messages.find(m => m[0].includes('already exists')); + expect(existsMsg).to.exist; + }); + + it('should call debug with subnet info on successful creation', () => { + mockFs({[cniDir]: {}}); + + const messages = []; + const debug = (...args) => messages.push(args); + + ensureCniNetwork('testnet', {cniNetconfPath: cniDir, debug}); + + const createdMsg = messages.find(m => m[0].includes('created CNI conflist')); + expect(createdMsg).to.exist; + }); + + it('should call debug when subnets are exhausted', () => { + const dirContents = {}; + for (let i = 1; i <= 255; i++) { + dirContents[`nerdctl-net${i}.conflist`] = JSON.stringify({ + plugins: [{type: 'bridge', ipam: {ranges: [[{subnet: `10.4.${i}.0/24`}]]}}], + }); + } + mockFs({[cniDir]: dirContents}); + + const messages = []; + const debug = (...args) => messages.push(args); + + ensureCniNetwork('overflow', {cniNetconfPath: cniDir, debug}); + + const exhaustedMsg = messages.find(m => m[0].includes('no available subnets')); + expect(exhaustedMsg).to.exist; + }); + }); + + describe('options', () => { + it('should use default cniNetconfPath when not provided', () => { + // We can't test the actual default path (/etc/lando/cni/finch) without root, + // but we can verify the conflist path construction + mockFs({'/etc/lando/cni/finch': {}}); + + const result = ensureCniNetwork('testnet'); + + expect(result).to.be.true; + expect(fs.existsSync('/etc/lando/cni/finch/nerdctl-testnet.conflist')).to.be.true; + }); + + it('should use custom cniNetconfPath from opts', () => { + const customDir = '/custom/cni/path'; + mockFs({[customDir]: {}}); + + ensureCniNetwork('testnet', {cniNetconfPath: customDir}); + + expect(fs.existsSync(path.join(customDir, 'nerdctl-testnet.conflist'))).to.be.true; + }); + + it('should work with no opts argument at all', () => { + mockFs({'/etc/lando/cni/finch': {}}); + + // Should not throw + const result = ensureCniNetwork('testnet'); + expect(result).to.be.true; + }); + }); + + describe('conflist migration', () => { + it('should migrate old conflist with tc-redirect-tap to new plugin chain', () => { + const oldConflist = { + cniVersion: '1.0.0', + name: 'myapp_default', + nerdctlID: 'a'.repeat(64), + nerdctlLabels: {}, + plugins: [ + { + type: 'bridge', + bridge: 'br-aaaaaaaaaaaa', + isGateway: true, + ipMasq: true, + hairpinMode: true, + ipam: { + ranges: [[{gateway: '10.4.3.1', subnet: '10.4.3.0/24'}]], + routes: [{dst: '0.0.0.0/0'}], + type: 'host-local', + }, + }, + {type: 'firewall'}, + {type: 'tc-redirect-tap'}, + ], + }; + + mockFs({ + [cniDir]: { + 'nerdctl-myapp_default.conflist': JSON.stringify(oldConflist, null, 2), + }, + }); + + const result = ensureCniNetwork('myapp_default', {cniNetconfPath: cniDir}); + + expect(result).to.be.true; + + const updated = JSON.parse( + fs.readFileSync(path.join(cniDir, 'nerdctl-myapp_default.conflist'), 'utf8'), + ); + const pluginTypes = updated.plugins.map(p => p.type); + expect(pluginTypes).to.deep.equal(['bridge', 'firewall', 'tuning']); + }); + + it('should preserve subnet during migration', () => { + const oldConflist = { + cniVersion: '1.0.0', + name: 'myapp_default', + nerdctlID: 'b'.repeat(64), + nerdctlLabels: {}, + plugins: [ + { + type: 'bridge', + bridge: 'br-bbbbbbbbbbbb', + isGateway: true, + ipMasq: true, + hairpinMode: true, + ipam: { + ranges: [[{gateway: '10.4.7.1', subnet: '10.4.7.0/24'}]], + routes: [{dst: '0.0.0.0/0'}], + type: 'host-local', + }, + }, + {type: 'firewall'}, + {type: 'tc-redirect-tap'}, + ], + }; + + mockFs({ + [cniDir]: { + 'nerdctl-myapp_default.conflist': JSON.stringify(oldConflist, null, 2), + }, + }); + + ensureCniNetwork('myapp_default', {cniNetconfPath: cniDir}); + + const updated = JSON.parse( + fs.readFileSync(path.join(cniDir, 'nerdctl-myapp_default.conflist'), 'utf8'), + ); + const subnet = updated.plugins[0].ipam.ranges[0][0].subnet; + const gateway = updated.plugins[0].ipam.ranges[0][0].gateway; + + expect(subnet).to.equal('10.4.7.0/24'); + expect(gateway).to.equal('10.4.7.1'); + }); + + it('should preserve bridge name during migration', () => { + const oldConflist = { + cniVersion: '1.0.0', + name: 'myapp_default', + nerdctlID: 'c'.repeat(64), + nerdctlLabels: {}, + plugins: [ + { + type: 'bridge', + bridge: 'br-cccccccccccc', + isGateway: true, + ipMasq: true, + ipam: { + ranges: [[{gateway: '10.4.2.1', subnet: '10.4.2.0/24'}]], + routes: [{dst: '0.0.0.0/0'}], + type: 'host-local', + }, + }, + {type: 'firewall'}, + {type: 'tc-redirect-tap'}, + ], + }; + + mockFs({ + [cniDir]: { + 'nerdctl-myapp_default.conflist': JSON.stringify(oldConflist, null, 2), + }, + }); + + ensureCniNetwork('myapp_default', {cniNetconfPath: cniDir}); + + const updated = JSON.parse( + fs.readFileSync(path.join(cniDir, 'nerdctl-myapp_default.conflist'), 'utf8'), + ); + + expect(updated.plugins[0].bridge).to.equal('br-cccccccccccc'); + }); + + it('should preserve nerdctlID during migration', () => { + const nerdctlID = 'd'.repeat(64); + const oldConflist = { + cniVersion: '1.0.0', + name: 'myapp_default', + nerdctlID, + nerdctlLabels: {foo: 'bar'}, + plugins: [ + { + type: 'bridge', + bridge: 'br-dddddddddddd', + isGateway: true, + ipMasq: true, + ipam: { + ranges: [[{gateway: '10.4.1.1', subnet: '10.4.1.0/24'}]], + routes: [{dst: '0.0.0.0/0'}], + type: 'host-local', + }, + }, + {type: 'firewall'}, + {type: 'tc-redirect-tap'}, + ], + }; + + mockFs({ + [cniDir]: { + 'nerdctl-myapp_default.conflist': JSON.stringify(oldConflist, null, 2), + }, + }); + + ensureCniNetwork('myapp_default', {cniNetconfPath: cniDir}); + + const updated = JSON.parse( + fs.readFileSync(path.join(cniDir, 'nerdctl-myapp_default.conflist'), 'utf8'), + ); + + expect(updated.nerdctlID).to.equal(nerdctlID); + expect(updated.nerdctlLabels).to.deep.equal({foo: 'bar'}); + }); + + it('should return false for conflist with correct plugin chain', () => { + mockFs({[cniDir]: {}}); + + // First call creates with correct plugins + ensureCniNetwork('testnet', {cniNetconfPath: cniDir}); + // Second call should detect correct plugins and skip + const result = ensureCniNetwork('testnet', {cniNetconfPath: cniDir}); + + expect(result).to.be.false; + }); + + it('should migrate conflist missing tuning plugin', () => { + const oldConflist = { + cniVersion: '1.0.0', + name: 'myapp_default', + nerdctlID: 'e'.repeat(64), + nerdctlLabels: {}, + plugins: [ + { + type: 'bridge', + bridge: 'br-eeeeeeeeeeee', + isGateway: true, + ipMasq: true, + ipam: { + ranges: [[{gateway: '10.4.5.1', subnet: '10.4.5.0/24'}]], + routes: [{dst: '0.0.0.0/0'}], + type: 'host-local', + }, + }, + {type: 'firewall'}, + ], + }; + + mockFs({ + [cniDir]: { + 'nerdctl-myapp_default.conflist': JSON.stringify(oldConflist, null, 2), + }, + }); + + const result = ensureCniNetwork('myapp_default', {cniNetconfPath: cniDir}); + + expect(result).to.be.true; + + const updated = JSON.parse( + fs.readFileSync(path.join(cniDir, 'nerdctl-myapp_default.conflist'), 'utf8'), + ); + const pluginTypes = updated.plugins.map(p => p.type); + expect(pluginTypes).to.deep.equal(['bridge', 'firewall', 'tuning']); + }); + + it('should log debug message during migration', () => { + const oldConflist = { + cniVersion: '1.0.0', + name: 'testnet', + nerdctlID: 'f'.repeat(64), + nerdctlLabels: {}, + plugins: [ + { + type: 'bridge', + bridge: 'br-ffffffffffff', + isGateway: true, + ipMasq: true, + ipam: { + ranges: [[{gateway: '10.4.1.1', subnet: '10.4.1.0/24'}]], + routes: [{dst: '0.0.0.0/0'}], + type: 'host-local', + }, + }, + {type: 'firewall'}, + {type: 'tc-redirect-tap'}, + ], + }; + + mockFs({ + [cniDir]: { + 'nerdctl-testnet.conflist': JSON.stringify(oldConflist, null, 2), + }, + }); + + const messages = []; + const debug = (...args) => messages.push(args); + + ensureCniNetwork('testnet', {cniNetconfPath: cniDir, debug}); + + const migrateMsg = messages.find(m => m[0].includes('stale plugin chain')); + expect(migrateMsg).to.exist; + const doneMsg = messages.find(m => m[0].includes('migrated CNI conflist')); + expect(doneMsg).to.exist; + }); + }); +}); diff --git a/test/ensure-compose-cni-networks.spec.js b/test/ensure-compose-cni-networks.spec.js new file mode 100644 index 000000000..4810bf413 --- /dev/null +++ b/test/ensure-compose-cni-networks.spec.js @@ -0,0 +1,402 @@ +'use strict'; + +const {expect} = require('chai'); +const mockFs = require('mock-fs'); +const path = require('path'); +const yaml = require('js-yaml'); +const fs = require('fs'); + +const ensureComposeCniNetworks = require('../utils/ensure-compose-cni-networks'); + +/** + * Helper to create a mock compose file on the mock filesystem. + * + * @param {string} filePath - Path to write the compose file. + * @param {Object} content - Compose file content as a JS object. + */ +const writeComposeFile = (filePath, content) => { + fs.mkdirSync(path.dirname(filePath), {recursive: true}); + fs.writeFileSync(filePath, yaml.dump(content), 'utf8'); +}; + +describe('ensure-compose-cni-networks', () => { + const cniDir = '/tmp/test-cni'; + const composeDir = '/tmp/test-compose'; + + afterEach(() => { + mockFs.restore(); + }); + + describe('default network handling', () => { + it('should always ensure the _default network', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const composeFile = path.join(composeDir, 'test.yml'); + writeComposeFile(composeFile, { + services: {web: {image: 'nginx'}}, + }); + + const result = ensureComposeCniNetworks([composeFile], 'myapp', {cniNetconfPath: cniDir}); + expect(result).to.include('myapp_default'); + expect(fs.existsSync(path.join(cniDir, 'nerdctl-myapp_default.conflist'))).to.be.true; + }); + + it('should ensure _default even when compose file has no networks section', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const composeFile = path.join(composeDir, 'minimal.yml'); + writeComposeFile(composeFile, { + services: {web: {image: 'nginx'}}, + }); + + const result = ensureComposeCniNetworks([composeFile], 'testproj', {cniNetconfPath: cniDir}); + expect(result).to.have.lengthOf(1); + expect(result[0]).to.equal('testproj_default'); + }); + + it('should ensure _default even when compose files array is empty', () => { + mockFs({ + [cniDir]: {}, + }); + + const result = ensureComposeCniNetworks([], 'emptyproj', {cniNetconfPath: cniDir}); + expect(result).to.include('emptyproj_default'); + }); + }); + + describe('custom network extraction', () => { + it('should ensure CNI configs for explicitly defined networks', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const composeFile = path.join(composeDir, 'custom-nets.yml'); + writeComposeFile(composeFile, { + services: { + web: {image: 'nginx', networks: ['frontend']}, + db: {image: 'postgres', networks: ['backend']}, + }, + networks: { + frontend: {driver: 'bridge'}, + backend: {driver: 'bridge'}, + }, + }); + + const result = ensureComposeCniNetworks([composeFile], 'myapp', {cniNetconfPath: cniDir}); + + expect(result).to.include('myapp_default'); + expect(result).to.include('myapp_frontend'); + expect(result).to.include('myapp_backend'); + expect(result).to.have.lengthOf(3); + + expect(fs.existsSync(path.join(cniDir, 'nerdctl-myapp_frontend.conflist'))).to.be.true; + expect(fs.existsSync(path.join(cniDir, 'nerdctl-myapp_backend.conflist'))).to.be.true; + }); + + it('should use explicit name when network has name: property', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const composeFile = path.join(composeDir, 'named-net.yml'); + writeComposeFile(composeFile, { + services: {web: {image: 'nginx'}}, + networks: { + mynet: {name: 'custom-global-network', driver: 'bridge'}, + }, + }); + + const result = ensureComposeCniNetworks([composeFile], 'myapp', {cniNetconfPath: cniDir}); + + expect(result).to.include('custom-global-network'); + expect(result).not.to.include('myapp_mynet'); + expect(fs.existsSync(path.join(cniDir, 'nerdctl-custom-global-network.conflist'))).to.be.true; + }); + + it('should handle networks with null/empty config', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const composeFile = path.join(composeDir, 'null-config.yml'); + writeComposeFile(composeFile, { + services: {web: {image: 'nginx'}}, + networks: { + mynet: null, + }, + }); + + const result = ensureComposeCniNetworks([composeFile], 'myapp', {cniNetconfPath: cniDir}); + expect(result).to.include('myapp_mynet'); + }); + }); + + describe('external network handling', () => { + it('should skip networks with external: true', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const composeFile = path.join(composeDir, 'external-net.yml'); + writeComposeFile(composeFile, { + services: {web: {image: 'nginx'}}, + networks: { + landonet: {external: true}, + internal: {driver: 'bridge'}, + }, + }); + + const result = ensureComposeCniNetworks([composeFile], 'myapp', {cniNetconfPath: cniDir}); + + expect(result).not.to.include('myapp_landonet'); + expect(result).not.to.include('landonet'); + expect(result).to.include('myapp_internal'); + }); + + it('should skip networks with external as object (compose v2 syntax)', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const composeFile = path.join(composeDir, 'external-obj.yml'); + writeComposeFile(composeFile, { + services: {web: {image: 'nginx'}}, + networks: { + landonet: {external: {name: 'some_external_net'}}, + }, + }); + + const result = ensureComposeCniNetworks([composeFile], 'myapp', {cniNetconfPath: cniDir}); + expect(result).not.to.include('myapp_landonet'); + expect(result).not.to.include('some_external_net'); + }); + }); + + describe('multiple compose files', () => { + it('should merge networks from multiple compose files', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const file1 = path.join(composeDir, 'base.yml'); + const file2 = path.join(composeDir, 'override.yml'); + + writeComposeFile(file1, { + services: {web: {image: 'nginx'}}, + networks: { + frontend: {driver: 'bridge'}, + }, + }); + + writeComposeFile(file2, { + services: {api: {image: 'node'}}, + networks: { + backend: {driver: 'bridge'}, + }, + }); + + const result = ensureComposeCniNetworks([file1, file2], 'myapp', {cniNetconfPath: cniDir}); + + expect(result).to.include('myapp_default'); + expect(result).to.include('myapp_frontend'); + expect(result).to.include('myapp_backend'); + expect(result).to.have.lengthOf(3); + }); + + it('should let later files override network config from earlier files', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const file1 = path.join(composeDir, 'base.yml'); + const file2 = path.join(composeDir, 'override.yml'); + + writeComposeFile(file1, { + networks: { + mynet: {driver: 'bridge'}, + }, + }); + + // Later file changes name — should use the overridden name + writeComposeFile(file2, { + networks: { + mynet: {name: 'overridden-name', driver: 'bridge'}, + }, + }); + + const result = ensureComposeCniNetworks([file1, file2], 'myapp', {cniNetconfPath: cniDir}); + + expect(result).to.include('overridden-name'); + expect(result).not.to.include('myapp_mynet'); + }); + }); + + describe('proxy network scenario', () => { + it('should handle the proxy compose pattern (edge network)', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + // This replicates _proxy.js builder output + const composeFile = path.join(composeDir, 'proxy.yml'); + writeComposeFile(composeFile, { + services: { + proxy: { + image: 'traefik:2.11.31', + networks: ['edge'], + }, + }, + networks: { + edge: {driver: 'bridge'}, + }, + }); + + const result = ensureComposeCniNetworks([composeFile], '_lando_', {cniNetconfPath: cniDir}); + + expect(result).to.include('_lando__default'); + expect(result).to.include('_lando__edge'); + }); + }); + + describe('deduplication', () => { + it('should not duplicate _default if also explicitly defined', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const composeFile = path.join(composeDir, 'explicit-default.yml'); + writeComposeFile(composeFile, { + services: {web: {image: 'nginx'}}, + networks: { + default: {driver: 'bridge'}, + }, + }); + + const result = ensureComposeCniNetworks([composeFile], 'myapp', {cniNetconfPath: cniDir}); + + // Should appear only once + const defaultCount = result.filter(n => n === 'myapp_default').length; + expect(defaultCount).to.equal(1); + }); + }); + + describe('error handling', () => { + it('should gracefully handle missing compose files', () => { + mockFs({ + [cniDir]: {}, + }); + + // Non-existent file should not crash — just ensure _default + const result = ensureComposeCniNetworks(['/nonexistent/compose.yml'], 'myapp', {cniNetconfPath: cniDir}); + expect(result).to.include('myapp_default'); + }); + + it('should gracefully handle invalid YAML in compose files', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const composeFile = path.join(composeDir, 'invalid.yml'); + fs.mkdirSync(composeDir, {recursive: true}); + fs.writeFileSync(composeFile, '{{ invalid yaml {{', 'utf8'); + + const result = ensureComposeCniNetworks([composeFile], 'myapp', {cniNetconfPath: cniDir}); + expect(result).to.include('myapp_default'); + }); + + it('should call debug on parse errors', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const composeFile = path.join(composeDir, 'bad.yml'); + fs.mkdirSync(composeDir, {recursive: true}); + fs.writeFileSync(composeFile, '{{ bad {{', 'utf8'); + + let debugCalled = false; + const debug = () => { debugCalled = true; }; + + ensureComposeCniNetworks([composeFile], 'myapp', {cniNetconfPath: cniDir, debug}); + expect(debugCalled).to.be.true; + }); + }); + + describe('CNI conflist content', () => { + it('should create valid CNI conflist JSON for each network', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const composeFile = path.join(composeDir, 'content-test.yml'); + writeComposeFile(composeFile, { + services: {web: {image: 'nginx'}}, + networks: { + custom: {driver: 'bridge'}, + }, + }); + + ensureComposeCniNetworks([composeFile], 'myapp', {cniNetconfPath: cniDir}); + + // Validate the conflist for the custom network + const conflistPath = path.join(cniDir, 'nerdctl-myapp_custom.conflist'); + expect(fs.existsSync(conflistPath)).to.be.true; + + const conflist = JSON.parse(fs.readFileSync(conflistPath, 'utf8')); + expect(conflist).to.have.property('cniVersion', '1.0.0'); + expect(conflist).to.have.property('name', 'myapp_custom'); + expect(conflist).to.have.property('plugins').that.is.an('array'); + expect(conflist.plugins[0]).to.have.property('type', 'bridge'); + expect(conflist.plugins[0].ipam).to.have.property('type', 'host-local'); + }); + + it('should allocate unique subnets for each network', () => { + mockFs({ + [cniDir]: {}, + [composeDir]: {}, + }); + + const composeFile = path.join(composeDir, 'multi.yml'); + writeComposeFile(composeFile, { + services: {web: {image: 'nginx'}}, + networks: { + net1: {}, + net2: {}, + net3: {}, + }, + }); + + ensureComposeCniNetworks([composeFile], 'myapp', {cniNetconfPath: cniDir}); + + // Read all conflist files and extract subnets + const subnets = new Set(); + const files = fs.readdirSync(cniDir).filter(f => f.endsWith('.conflist')); + for (const file of files) { + const conflist = JSON.parse(fs.readFileSync(path.join(cniDir, file), 'utf8')); + const subnet = conflist.plugins[0].ipam.ranges[0][0].subnet; + expect(subnets.has(subnet)).to.be.false; + subnets.add(subnet); + } + + // 4 networks: default + net1 + net2 + net3 + expect(subnets.size).to.equal(4); + }); + }); +}); diff --git a/test/finch-daemon-manager.spec.js b/test/finch-daemon-manager.spec.js new file mode 100644 index 000000000..98555a633 --- /dev/null +++ b/test/finch-daemon-manager.spec.js @@ -0,0 +1,530 @@ +/* + * Tests for finch-daemon-manager. + * @file finch-daemon-manager.spec.js + */ + +'use strict'; + +// Setup chai. +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const sinon = require('sinon'); +const mockFs = require('mock-fs'); +const fs = require('fs'); +const path = require('path'); +const os = require('os'); +const FinchDaemonManager = require('./../lib/backends/containerd/finch-daemon-manager'); + +// Provide a noop debug function so tests don't need a real Lando Log instance +const noopDebug = () => {}; + +describe('finch-daemon-manager', () => { + describe('#constructor defaults', () => { + it('should set correct default bin path', () => { + const mgr = new FinchDaemonManager({debug: noopDebug}); + const expected = path.join(os.homedir(), '.lando', 'bin', 'finch-daemon'); + mgr.finchDaemonBin.should.equal(expected); + }); + + it('should set correct default socket path', () => { + const mgr = new FinchDaemonManager({debug: noopDebug}); + mgr.socketPath.should.equal('/run/lando/finch.sock'); + }); + + it('should set correct default containerd socket', () => { + const mgr = new FinchDaemonManager({debug: noopDebug}); + mgr.containerdSocket.should.equal('/run/lando/containerd.sock'); + }); + + it('should set correct default pid file', () => { + const mgr = new FinchDaemonManager({debug: noopDebug}); + const expected = path.join(os.homedir(), '.lando', 'run', 'finch-daemon.pid'); + mgr.pidFile.should.equal(expected); + }); + + it('should set correct default CNI plugin path', () => { + const mgr = new FinchDaemonManager({debug: noopDebug}); + mgr.cniPath.should.equal('/usr/local/lib/lando/cni/bin'); + }); + }); + + describe('#constructor custom options', () => { + it('should accept custom userConfRoot', () => { + const mgr = new FinchDaemonManager({userConfRoot: '/custom/root', debug: noopDebug}); + mgr.finchDaemonBin.should.equal(path.join('/custom/root', 'bin', 'finch-daemon')); + // socketPath and containerdSocket now default to /run/lando/ (not userConfRoot) + mgr.socketPath.should.equal('/run/lando/finch.sock'); + mgr.containerdSocket.should.equal('/run/lando/containerd.sock'); + mgr.pidFile.should.equal(path.join('/custom/root', 'run', 'finch-daemon.pid')); + }); + + it('should accept custom finchDaemonBin', () => { + const mgr = new FinchDaemonManager({finchDaemonBin: '/usr/local/bin/finch-daemon', debug: noopDebug}); + mgr.finchDaemonBin.should.equal('/usr/local/bin/finch-daemon'); + }); + + it('should accept custom socketPath', () => { + const mgr = new FinchDaemonManager({socketPath: '/tmp/finch.sock', debug: noopDebug}); + mgr.socketPath.should.equal('/tmp/finch.sock'); + }); + + it('should accept custom containerdSocket', () => { + const mgr = new FinchDaemonManager({containerdSocket: '/tmp/containerd.sock', debug: noopDebug}); + mgr.containerdSocket.should.equal('/tmp/containerd.sock'); + }); + + it('should accept custom logDir', () => { + const mgr = new FinchDaemonManager({logDir: '/tmp/logs', debug: noopDebug}); + mgr.logDir.should.equal('/tmp/logs'); + }); + }); + + describe('#getSocketPath', () => { + it('should return the configured socket path', () => { + const mgr = new FinchDaemonManager({socketPath: '/var/run/finch.sock', debug: noopDebug}); + mgr.getSocketPath().should.equal('/var/run/finch.sock'); + }); + + it('should return default socket path when no custom path given', () => { + const mgr = new FinchDaemonManager({debug: noopDebug}); + mgr.getSocketPath().should.equal('/run/lando/finch.sock'); + }); + }); + + describe('#getStartArgs', () => { + it('should return correct args array', () => { + const mgr = new FinchDaemonManager({ + socketPath: '/tmp/finch.sock', + containerdSocket: '/tmp/containerd.sock', + debug: noopDebug, + }); + const args = mgr.getStartArgs(); + expect(args).to.be.an('array'); + args.length.should.equal(13); + }); + + it('should include --socket-addr with plain socket path', () => { + const mgr = new FinchDaemonManager({socketPath: '/tmp/finch.sock', debug: noopDebug}); + const args = mgr.getStartArgs(); + const idx = args.indexOf('--socket-addr'); + expect(idx).to.not.equal(-1); + args[idx + 1].should.equal('/tmp/finch.sock'); + }); + + it('should include --config-file for the finch-daemon config', () => { + const mgr = new FinchDaemonManager({containerdSocket: '/tmp/containerd.sock', debug: noopDebug}); + const args = mgr.getStartArgs(); + const idx = args.indexOf('--config-file'); + expect(idx).to.not.equal(-1); + args[idx + 1].should.match(/finch-daemon\.toml$/); + }); + + it('should include credential socket args', () => { + const mgr = new FinchDaemonManager({debug: noopDebug}); + const args = mgr.getStartArgs(); + expect(args).to.include('--credential-socket-addr'); + expect(args).to.include('--credential-socket-owner'); + }); + + it('should include --socket-owner', () => { + const mgr = new FinchDaemonManager({debug: noopDebug}); + const args = mgr.getStartArgs(); + const idx = args.indexOf('--socket-owner'); + expect(idx).to.not.equal(-1); + const owner = args[idx + 1]; + expect(owner).to.be.a('string'); + parseInt(owner, 10).should.be.a('number'); + }); + + it('should include --debug flag', () => { + const mgr = new FinchDaemonManager({debug: noopDebug}); + const args = mgr.getStartArgs(); + expect(args).to.include('--debug'); + }); + }); + + // --- Lifecycle tests requiring mock-fs and sinon --- + + describe('#_isProcessRunning', () => { + /** @type {sinon.SinonSandbox} */ + let sandbox; + const testConfRoot = '/tmp/test-finch-mgr'; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + mockFs.restore(); + }); + + it('should return false when no PID file exists', () => { + mockFs({[testConfRoot]: {}}); + + const mgr = new FinchDaemonManager({userConfRoot: testConfRoot, debug: noopDebug}); + expect(mgr._isProcessRunning()).to.be.false; + }); + + it('should return false when PID file contains non-numeric data', () => { + mockFs({ + [path.join(testConfRoot, 'run')]: { + 'finch-daemon.pid': 'not-a-number\n', + }, + }); + + const mgr = new FinchDaemonManager({userConfRoot: testConfRoot, debug: noopDebug}); + expect(mgr._isProcessRunning()).to.be.false; + }); + + it('should return true when process.kill(pid, 0) succeeds', () => { + mockFs({ + [path.join(testConfRoot, 'run')]: { + 'finch-daemon.pid': '12345', + }, + }); + + sandbox.stub(process, 'kill').returns(true); + + const mgr = new FinchDaemonManager({userConfRoot: testConfRoot, debug: noopDebug}); + expect(mgr._isProcessRunning()).to.be.true; + }); + + it('should return false when process.kill(pid, 0) throws ESRCH', () => { + mockFs({ + [path.join(testConfRoot, 'run')]: { + 'finch-daemon.pid': '99999', + }, + }); + + const esrchErr = new Error('ESRCH'); + esrchErr.code = 'ESRCH'; + sandbox.stub(process, 'kill').throws(esrchErr); + + const mgr = new FinchDaemonManager({userConfRoot: testConfRoot, debug: noopDebug}); + expect(mgr._isProcessRunning()).to.be.false; + }); + + it('should return true when process.kill throws EPERM (running as different user)', () => { + mockFs({ + [path.join(testConfRoot, 'run')]: { + 'finch-daemon.pid': '12345', + }, + }); + + const epermErr = new Error('EPERM'); + epermErr.code = 'EPERM'; + sandbox.stub(process, 'kill').throws(epermErr); + + const mgr = new FinchDaemonManager({userConfRoot: testConfRoot, debug: noopDebug}); + expect(mgr._isProcessRunning()).to.be.true; + }); + }); + + describe('#start', () => { + // NOTE: start() uses `const {spawn} = require('child_process')` which captures + // the reference at import time. Without proxyquire/rewire, we cannot intercept + // the spawn call via sinon. Tests here cover the early-return path and + // pre-spawn setup behavior that can be verified without mocking spawn. + + /** @type {sinon.SinonSandbox} */ + let sandbox; + const testConfRoot = '/tmp/test-finch-start'; + const testSocketDir = '/tmp/test-finch-sockets'; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + mockFs.restore(); + }); + + it('should return early without side effects if already running', async () => { + mockFs({ + [testSocketDir]: {}, + [path.join(testConfRoot, 'run')]: { + 'finch-daemon.pid': '12345', + }, + }); + + sandbox.stub(process, 'kill').returns(true); + + const mgr = new FinchDaemonManager({ + userConfRoot: testConfRoot, + socketPath: path.join(testSocketDir, 'finch.sock'), + credentialSocketPath: path.join(testSocketDir, 'finch-credential.sock'), + debug: noopDebug, + }); + await mgr.start(); + + // Config file should NOT have been written (early return before any setup) + expect(fs.existsSync(mgr.configPath)).to.be.false; + }); + + it('should generate correct start args including all required flags', () => { + const mgr = new FinchDaemonManager({ + userConfRoot: testConfRoot, + socketPath: path.join(testSocketDir, 'finch.sock'), + credentialSocketPath: path.join(testSocketDir, 'finch-credential.sock'), + debug: noopDebug, + }); + const args = mgr.getStartArgs(); + + // Verify all critical args are present and paired correctly + const flagPairs = {}; + for (let i = 0; i < args.length; i++) { + if (args[i].startsWith('--') && i + 1 < args.length && !args[i + 1].startsWith('--')) { + flagPairs[args[i]] = args[i + 1]; + } + } + + expect(flagPairs['--socket-addr']).to.equal(path.join(testSocketDir, 'finch.sock')); + expect(flagPairs['--pidfile']).to.equal(path.join(testConfRoot, 'run', 'finch-daemon.pid')); + expect(flagPairs['--config-file']).to.equal(path.join(testConfRoot, 'config', 'finch-daemon.toml')); + expect(flagPairs['--credential-socket-addr']).to.equal( + path.join(testSocketDir, 'finch-credential.sock'), + ); + expect(args).to.include('--debug'); + }); + }); + + describe('#stop', () => { + /** @type {sinon.SinonSandbox} */ + let sandbox; + const testConfRoot = '/tmp/test-finch-stop'; + const testSocketDir = '/tmp/test-finch-stop-sockets'; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + mockFs.restore(); + }); + + it('should do nothing if no PID file exists', async () => { + mockFs({[testConfRoot]: {}}); + + const killStub = sandbox.stub(process, 'kill'); + const mgr = new FinchDaemonManager({ + userConfRoot: testConfRoot, + socketPath: path.join(testSocketDir, 'finch.sock'), + credentialSocketPath: path.join(testSocketDir, 'finch-credential.sock'), + debug: noopDebug, + }); + await mgr.stop(); + + expect(killStub.called).to.be.false; + }); + + it('should clean up if PID file has invalid content', async () => { + mockFs({ + [path.join(testConfRoot, 'run')]: { + 'finch-daemon.pid': 'garbage', + }, + }); + + const mgr = new FinchDaemonManager({ + userConfRoot: testConfRoot, + socketPath: path.join(testSocketDir, 'finch.sock'), + credentialSocketPath: path.join(testSocketDir, 'finch-credential.sock'), + debug: noopDebug, + }); + await mgr.stop(); + + expect(fs.existsSync(mgr.pidFile)).to.be.false; + }); + + it('should clean up if process is already gone', async () => { + mockFs({ + [path.join(testConfRoot, 'run')]: { + 'finch-daemon.pid': '99999', + }, + }); + + const esrchErr = new Error('ESRCH'); + esrchErr.code = 'ESRCH'; + sandbox.stub(process, 'kill').throws(esrchErr); + + const mgr = new FinchDaemonManager({ + userConfRoot: testConfRoot, + socketPath: path.join(testSocketDir, 'finch.sock'), + credentialSocketPath: path.join(testSocketDir, 'finch-credential.sock'), + debug: noopDebug, + }); + await mgr.stop(); + + expect(fs.existsSync(mgr.pidFile)).to.be.false; + }); + + it('should send SIGTERM to running process and clean up', async () => { + const clock = sandbox.useFakeTimers(); + + mockFs({ + [path.join(testConfRoot, 'run')]: { + 'finch-daemon.pid': '12345', + }, + [testSocketDir]: { + 'finch.sock': '', + 'finch-credential.sock': '', + }, + }); + + const esrchErr = new Error('ESRCH'); + esrchErr.code = 'ESRCH'; + let sigTermSent = false; + const killStub = sandbox.stub(process, 'kill').callsFake((pid, signal) => { + if (signal === 'SIGTERM') { + sigTermSent = true; + return true; + } + // signal 0 = existence check: succeed before SIGTERM, throw ESRCH after + if (signal === 0) { + if (sigTermSent) throw esrchErr; + return true; + } + return true; + }); + + const mgr = new FinchDaemonManager({ + userConfRoot: testConfRoot, + socketPath: path.join(testSocketDir, 'finch.sock'), + credentialSocketPath: path.join(testSocketDir, 'finch-credential.sock'), + debug: noopDebug, + }); + + const stopPromise = mgr.stop(); + clock.tick(1000); + await stopPromise; + + const sigtermCall = killStub.getCalls().find(c => c.args[1] === 'SIGTERM'); + expect(sigtermCall).to.exist; + expect(sigtermCall.args[0]).to.equal(12345); + + // Verify cleanup was performed after graceful shutdown + expect(fs.existsSync(mgr.pidFile)).to.be.false; + expect(fs.existsSync(mgr.socketPath)).to.be.false; + expect(fs.existsSync(mgr.credentialSocketPath)).to.be.false; + }); + }); + + describe('#isRunning', () => { + /** @type {sinon.SinonSandbox} */ + let sandbox; + const testConfRoot = '/tmp/test-finch-running'; + const testSocketDir = '/tmp/test-finch-running-sockets'; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + mockFs.restore(); + }); + + it('should return false when process is not running', async () => { + mockFs({[testConfRoot]: {}}); + + const mgr = new FinchDaemonManager({ + userConfRoot: testConfRoot, + socketPath: path.join(testSocketDir, 'finch.sock'), + credentialSocketPath: path.join(testSocketDir, 'finch-credential.sock'), + debug: noopDebug, + }); + const result = await mgr.isRunning(); + expect(result).to.be.false; + }); + + it('should return false when process runs but socket is missing', async () => { + mockFs({ + [path.join(testConfRoot, 'run')]: { + 'finch-daemon.pid': '12345', + }, + }); + + sandbox.stub(process, 'kill').returns(true); + + const mgr = new FinchDaemonManager({ + userConfRoot: testConfRoot, + socketPath: path.join(testSocketDir, 'finch.sock'), + credentialSocketPath: path.join(testSocketDir, 'finch-credential.sock'), + debug: noopDebug, + }); + const result = await mgr.isRunning(); + expect(result).to.be.false; + }); + + it('should return true when process runs and socket exists', async () => { + mockFs({ + [path.join(testConfRoot, 'run')]: { + 'finch-daemon.pid': '12345', + }, + [testSocketDir]: { + 'finch.sock': '', + }, + }); + + sandbox.stub(process, 'kill').returns(true); + + const mgr = new FinchDaemonManager({ + userConfRoot: testConfRoot, + socketPath: path.join(testSocketDir, 'finch.sock'), + credentialSocketPath: path.join(testSocketDir, 'finch-credential.sock'), + debug: noopDebug, + }); + const result = await mgr.isRunning(); + expect(result).to.be.true; + }); + }); + + describe('#_cleanup', () => { + const testConfRoot = '/tmp/test-finch-cleanup'; + const testSocketDir = '/tmp/test-finch-cleanup-sockets'; + + afterEach(() => { + mockFs.restore(); + }); + + it('should remove PID file, socket, and credential socket', () => { + mockFs({ + [path.join(testConfRoot, 'run')]: { + 'finch-daemon.pid': '12345', + }, + [testSocketDir]: { + 'finch.sock': '', + 'finch-credential.sock': '', + }, + }); + + const mgr = new FinchDaemonManager({ + userConfRoot: testConfRoot, + socketPath: path.join(testSocketDir, 'finch.sock'), + credentialSocketPath: path.join(testSocketDir, 'finch-credential.sock'), + debug: noopDebug, + }); + mgr._cleanup(); + + expect(fs.existsSync(mgr.pidFile)).to.be.false; + expect(fs.existsSync(mgr.socketPath)).to.be.false; + expect(fs.existsSync(mgr.credentialSocketPath)).to.be.false; + }); + + it('should not throw when files do not exist', () => { + mockFs({[testConfRoot]: {}}); + + const mgr = new FinchDaemonManager({ + userConfRoot: testConfRoot, + socketPath: path.join(testSocketDir, 'finch.sock'), + credentialSocketPath: path.join(testSocketDir, 'finch-credential.sock'), + debug: noopDebug, + }); + expect(() => mgr._cleanup()).to.not.throw(); + }); + }); +}); diff --git a/test/get-buildkit-config.spec.js b/test/get-buildkit-config.spec.js new file mode 100644 index 000000000..8d8b277ff --- /dev/null +++ b/test/get-buildkit-config.spec.js @@ -0,0 +1,157 @@ +/* + * Tests for get-buildkit-config. + * @file get-buildkit-config.spec.js + */ + +'use strict'; + +const os = require('os'); +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const getBuildkitConfig = require('./../utils/get-buildkit-config'); + +describe('get-buildkit-config', () => { + describe('#defaults', () => { + it('should return a string with correct TOML structure', () => { + const config = getBuildkitConfig(); + config.should.be.a('string'); + config.should.include('[worker.oci]'); + config.should.include('[worker.containerd]'); + }); + + it('should have worker.containerd enabled and worker.oci disabled', () => { + const config = getBuildkitConfig(); + // OCI worker disabled + config.should.include('[worker.oci]'); + config.should.include('enabled = false'); + // Containerd worker enabled + config.should.include('[worker.containerd]'); + config.should.include('enabled = true'); + }); + + it('should include auto-generated header comments', () => { + const config = getBuildkitConfig(); + config.should.include('# Lando BuildKit configuration'); + config.should.include('# Auto-generated'); + }); + + it('should use default containerdSocket', () => { + const config = getBuildkitConfig(); + config.should.include('address = "/run/lando/containerd.sock"'); + }); + + it('should include GC policy section', () => { + const config = getBuildkitConfig(); + config.should.include('[[worker.containerd.gcpolicy]]'); + config.should.include('keepDuration = 604800'); + config.should.include('all = true'); + }); + + it('should include platform support', () => { + const config = getBuildkitConfig(); + config.should.include('platforms = ["linux/amd64", "linux/arm64"]'); + }); + }); + + describe('#custom containerdSocket', () => { + it('should reflect custom containerdSocket in output', () => { + const config = getBuildkitConfig({containerdSocket: '/tmp/test.sock'}); + config.should.include('address = "/tmp/test.sock"'); + }); + + it('should not include default socket when custom is provided', () => { + const config = getBuildkitConfig({containerdSocket: '/custom/containerd.sock'}); + config.should.include('address = "/custom/containerd.sock"'); + config.should.not.include('/run/lando/containerd.sock'); + }); + }); + + describe('#GC policy', () => { + it('should use default gcMaxBytes (10GB)', () => { + const config = getBuildkitConfig(); + const defaultBytes = 10 * 1024 * 1024 * 1024; // 10GB + config.should.include(`reservedSpace = ${defaultBytes}`); + }); + + it('should use provided gcMaxBytes', () => { + const customBytes = 5 * 1024 * 1024 * 1024; // 5GB + const config = getBuildkitConfig({gcMaxBytes: customBytes}); + config.should.include(`reservedSpace = ${customBytes}`); + }); + + it('should use small gcMaxBytes value', () => { + const config = getBuildkitConfig({gcMaxBytes: 1024}); + config.should.include('reservedSpace = 1024'); + }); + }); + + describe('#parallelism', () => { + it('should default to CPU count', () => { + const config = getBuildkitConfig(); + const expectedParallelism = Math.max(1, os.cpus().length); + config.should.include(`max-parallelism = ${expectedParallelism}`); + }); + + it('should use custom parallelism when provided', () => { + const config = getBuildkitConfig({parallelism: 8}); + config.should.include('max-parallelism = 8'); + }); + + it('should accept parallelism of 1', () => { + const config = getBuildkitConfig({parallelism: 1}); + config.should.include('max-parallelism = 1'); + }); + }); + + describe('#debug', () => { + it('should not include debug flag by default', () => { + const config = getBuildkitConfig(); + config.should.not.include('debug = true'); + }); + + it('should add debug flag when debug is true', () => { + const config = getBuildkitConfig({debug: true}); + config.should.include('debug = true'); + }); + + it('should not add debug flag when debug is false', () => { + const config = getBuildkitConfig({debug: false}); + config.should.not.include('debug = true'); + }); + }); + + describe('#registry mirrors', () => { + it('should not include registry sections by default', () => { + const config = getBuildkitConfig(); + config.should.not.include('[registry.'); + }); + + it('should not include registry sections when empty object is passed', () => { + const config = getBuildkitConfig({registryMirrors: {}}); + config.should.not.include('[registry.'); + }); + + it('should include registry mirrors when configured', () => { + const config = getBuildkitConfig({ + registryMirrors: {'docker.io': 'https://mirror.example.com'}, + }); + config.should.include('[registry."docker.io"]'); + config.should.include('mirrors = ["https://mirror.example.com"]'); + }); + + it('should include multiple registry mirrors', () => { + const config = getBuildkitConfig({ + registryMirrors: { + 'docker.io': 'https://mirror1.example.com', + 'ghcr.io': 'https://mirror2.example.com', + }, + }); + config.should.include('[registry."docker.io"]'); + config.should.include('mirrors = ["https://mirror1.example.com"]'); + config.should.include('[registry."ghcr.io"]'); + config.should.include('mirrors = ["https://mirror2.example.com"]'); + }); + }); +}); diff --git a/test/get-containerd-config.spec.js b/test/get-containerd-config.spec.js new file mode 100644 index 000000000..19d2a50b9 --- /dev/null +++ b/test/get-containerd-config.spec.js @@ -0,0 +1,127 @@ +/* + * Tests for get-containerd-config. + * @file get-containerd-config.spec.js + */ + +'use strict'; + +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const getContainerdConfig = require('./../utils/get-containerd-config'); + +describe('get-containerd-config', () => { + describe('#defaults', () => { + it('should return a string with correct TOML structure', () => { + const config = getContainerdConfig(); + config.should.be.a('string'); + config.should.include('version = 3'); + config.should.include('[grpc]'); + config.should.include('state = '); + config.should.include('root = '); + config.should.include('[plugins]'); + }); + + it('should use default socketPath, stateDir, and rootDir', () => { + const config = getContainerdConfig(); + config.should.include('address = "/run/lando/containerd.sock"'); + config.should.include('state = "/run/lando/containerd"'); + config.should.include('root = "/var/lib/lando/containerd/root"'); + }); + + it('should include auto-generated header comments', () => { + const config = getContainerdConfig(); + config.should.include('# Lando containerd configuration'); + config.should.include('# Auto-generated'); + }); + + it('should use overlayfs snapshotter by default', () => { + const config = getContainerdConfig(); + config.should.include('io.containerd.snapshotter.v1.overlayfs'); + }); + }); + + describe('#custom paths', () => { + it('should reflect custom socketPath in output', () => { + const config = getContainerdConfig({socketPath: '/tmp/test.sock'}); + config.should.include('address = "/tmp/test.sock"'); + }); + + it('should reflect custom stateDir in output', () => { + const config = getContainerdConfig({stateDir: '/custom/state'}); + config.should.include('state = "/custom/state"'); + }); + + it('should reflect custom rootDir in output', () => { + const config = getContainerdConfig({rootDir: '/custom/root'}); + config.should.include('root = "/custom/root"'); + config.should.include('root_path = "/custom/root/snapshots"'); + }); + + it('should reflect all custom paths together', () => { + const config = getContainerdConfig({ + socketPath: '/my/sock', + stateDir: '/my/state', + rootDir: '/my/root', + }); + config.should.include('address = "/my/sock"'); + config.should.include('state = "/my/state"'); + config.should.include('root = "/my/root"'); + }); + }); + + describe('#debug', () => { + it('should not include [debug] section by default', () => { + const config = getContainerdConfig(); + config.should.not.include('[debug]'); + config.should.not.include('level = "debug"'); + }); + + it('should add [debug] section when debug is true', () => { + const config = getContainerdConfig({debug: true}); + config.should.include('[debug]'); + config.should.include('level = "debug"'); + }); + + it('should not add [debug] section when debug is false', () => { + const config = getContainerdConfig({debug: false}); + config.should.not.include('[debug]'); + }); + }); + + describe('#CRI plugin', () => { + it('should disable CRI plugin by default', () => { + const config = getContainerdConfig(); + config.should.include('disabled_plugins = ["io.containerd.grpc.v1.cri"]'); + }); + + it('should enable CRI plugin when disableCri is false', () => { + const config = getContainerdConfig({disableCri: false}); + config.should.not.include('disabled_plugins'); + }); + + it('should disable CRI plugin when disableCri is true', () => { + const config = getContainerdConfig({disableCri: true}); + config.should.include('disabled_plugins = ["io.containerd.grpc.v1.cri"]'); + }); + }); + + describe('#snapshotter', () => { + it('should use overlayfs snapshotter by default', () => { + const config = getContainerdConfig(); + config.should.include('io.containerd.snapshotter.v1.overlayfs'); + }); + + it('should use custom snapshotter when specified', () => { + const config = getContainerdConfig({snapshotter: 'native'}); + config.should.include('io.containerd.snapshotter.v1.native'); + config.should.not.include('io.containerd.snapshotter.v1.overlayfs'); + }); + + it('should set snapshots root_path under rootDir', () => { + const config = getContainerdConfig({rootDir: '/data/containerd'}); + config.should.include('root_path = "/data/containerd/snapshots"'); + }); + }); +}); diff --git a/test/get-containerd-download-url.spec.js b/test/get-containerd-download-url.spec.js new file mode 100644 index 000000000..02a52d7ae --- /dev/null +++ b/test/get-containerd-download-url.spec.js @@ -0,0 +1,165 @@ +/* + * Tests for get-containerd-download-url. + * @file get-containerd-download-url.spec.js + */ + +'use strict'; + +// Setup chai. +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const getUrl = require('./../utils/get-containerd-download-url'); + +describe('get-containerd-download-url', () => { + describe('#containerd', () => { + it('should return a valid GitHub URL for containerd on linux/amd64', () => { + const url = getUrl('containerd', {platform: 'linux', arch: 'amd64'}); + url.should.equal( + 'https://github.com/containerd/containerd/releases/download/v2.0.4/containerd-2.0.4-linux-amd64.tar.gz', + ); + }); + + it('should return a valid GitHub URL for containerd on linux/arm64', () => { + const url = getUrl('containerd', {platform: 'linux', arch: 'arm64'}); + url.should.equal( + 'https://github.com/containerd/containerd/releases/download/v2.0.4/containerd-2.0.4-linux-arm64.tar.gz', + ); + }); + + it('should return a valid GitHub URL for containerd on darwin/amd64', () => { + const url = getUrl('containerd', {platform: 'darwin', arch: 'amd64'}); + url.should.equal( + 'https://github.com/containerd/containerd/releases/download/v2.0.4/containerd-2.0.4-darwin-amd64.tar.gz', + ); + }); + + it('should return a valid GitHub URL for containerd on darwin/arm64', () => { + const url = getUrl('containerd', {platform: 'darwin', arch: 'arm64'}); + url.should.equal( + 'https://github.com/containerd/containerd/releases/download/v2.0.4/containerd-2.0.4-darwin-arm64.tar.gz', + ); + }); + + it('should accept a custom version', () => { + const url = getUrl('containerd', {version: '1.7.0', platform: 'linux', arch: 'amd64'}); + url.should.equal( + 'https://github.com/containerd/containerd/releases/download/v1.7.0/containerd-1.7.0-linux-amd64.tar.gz', + ); + }); + }); + + describe('#nerdctl', () => { + it('should return a valid GitHub URL for nerdctl on linux/amd64', () => { + const url = getUrl('nerdctl', {platform: 'linux', arch: 'amd64'}); + url.should.equal( + 'https://github.com/containerd/nerdctl/releases/download/v2.0.5/nerdctl-2.0.5-linux-amd64.tar.gz', + ); + }); + + it('should return a valid GitHub URL for nerdctl on linux/arm64', () => { + const url = getUrl('nerdctl', {platform: 'linux', arch: 'arm64'}); + url.should.equal( + 'https://github.com/containerd/nerdctl/releases/download/v2.0.5/nerdctl-2.0.5-linux-arm64.tar.gz', + ); + }); + + it('should return a valid GitHub URL for nerdctl on darwin/arm64', () => { + const url = getUrl('nerdctl', {platform: 'darwin', arch: 'arm64'}); + url.should.equal( + 'https://github.com/containerd/nerdctl/releases/download/v2.0.5/nerdctl-2.0.5-darwin-arm64.tar.gz', + ); + }); + + it('should accept a custom version', () => { + const url = getUrl('nerdctl', {version: '1.5.0', platform: 'linux', arch: 'amd64'}); + url.should.equal( + 'https://github.com/containerd/nerdctl/releases/download/v1.5.0/nerdctl-1.5.0-linux-amd64.tar.gz', + ); + }); + }); + + describe('#buildkit', () => { + it('should return a valid GitHub URL for buildkit on linux/amd64', () => { + const url = getUrl('buildkit', {platform: 'linux', arch: 'amd64'}); + url.should.equal( + 'https://github.com/moby/buildkit/releases/download/v0.18.2/buildkit-v0.18.2.linux-amd64.tar.gz', + ); + }); + + it('should return a valid GitHub URL for buildkit on linux/arm64', () => { + const url = getUrl('buildkit', {platform: 'linux', arch: 'arm64'}); + url.should.equal( + 'https://github.com/moby/buildkit/releases/download/v0.18.2/buildkit-v0.18.2.linux-arm64.tar.gz', + ); + }); + + it('should return a valid GitHub URL for buildkit on darwin/amd64', () => { + const url = getUrl('buildkit', {platform: 'darwin', arch: 'amd64'}); + url.should.equal( + 'https://github.com/moby/buildkit/releases/download/v0.18.2/buildkit-v0.18.2.darwin-amd64.tar.gz', + ); + }); + + it('should return a valid GitHub URL for buildkit on darwin/arm64', () => { + const url = getUrl('buildkit', {platform: 'darwin', arch: 'arm64'}); + url.should.equal( + 'https://github.com/moby/buildkit/releases/download/v0.18.2/buildkit-v0.18.2.darwin-arm64.tar.gz', + ); + }); + + it('should accept a custom version', () => { + const url = getUrl('buildkit', {version: '0.12.0', platform: 'linux', arch: 'amd64'}); + url.should.equal( + 'https://github.com/moby/buildkit/releases/download/v0.12.0/buildkit-v0.12.0.linux-amd64.tar.gz', + ); + }); + + it('should use a dot separator between version and platform (not dash)', () => { + const url = getUrl('buildkit', {platform: 'linux', arch: 'amd64'}); + // buildkit uses: buildkit-v{V}.{OS}-{ARCH} (dot between version and OS) + url.should.match(/buildkit-v[\d.]+\.linux-amd64/); + }); + }); + + describe('#error handling', () => { + it('should throw for an unknown binary name', () => { + expect(() => getUrl('podman', {platform: 'linux', arch: 'amd64'})) + .to.throw(/Unknown binary/); + }); + + it('should throw for unsupported platform/arch', () => { + expect(() => getUrl('containerd', {platform: 'windows', arch: 'amd64'})) + .to.throw(/Unsupported platform/); + }); + + it('should throw for unsupported arch', () => { + expect(() => getUrl('containerd', {platform: 'linux', arch: 'mips'})) + .to.throw(/Unsupported platform/); + }); + + it('should normalize win32 to windows before validation', () => { + // win32 gets mapped to windows, which is unsupported + expect(() => getUrl('containerd', {platform: 'win32', arch: 'amd64'})) + .to.throw(/Unsupported platform/); + }); + }); + + describe('#platform auto-detection', () => { + it('should work without explicit platform/arch (uses process defaults)', () => { + // This should not throw on supported platforms + const currentPlatform = process.platform; + const currentArch = process.arch === 'x64' ? 'amd64' : process.arch; + const key = `${currentPlatform}-${currentArch}`; + const supported = ['linux-amd64', 'linux-arm64', 'darwin-amd64', 'darwin-arm64']; + + if (supported.includes(key)) { + const url = getUrl('containerd'); + expect(url).to.be.a('string'); + url.should.include('github.com'); + url.should.include('containerd'); + } + }); + }); +}); diff --git a/test/get-finch-daemon-download-url.spec.js b/test/get-finch-daemon-download-url.spec.js new file mode 100644 index 000000000..c7ec0fe26 --- /dev/null +++ b/test/get-finch-daemon-download-url.spec.js @@ -0,0 +1,117 @@ +/* + * Tests for get-finch-daemon-download-url. + * @file get-finch-daemon-download-url.spec.js + */ + +'use strict'; + +// Setup chai. +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const getUrl = require('./../utils/get-finch-daemon-download-url'); + +describe('get-finch-daemon-download-url', () => { + describe('#linux', () => { + it('should return a valid GitHub URL for linux/amd64', () => { + const url = getUrl({platform: 'linux', arch: 'amd64'}); + url.should.equal( + 'https://github.com/runfinch/finch-daemon/releases/download/v0.22.0/finch-daemon-0.22.0-linux-amd64.tar.gz', + ); + }); + + it('should return a valid GitHub URL for linux/arm64', () => { + const url = getUrl({platform: 'linux', arch: 'arm64'}); + url.should.equal( + 'https://github.com/runfinch/finch-daemon/releases/download/v0.22.0/finch-daemon-0.22.0-linux-arm64.tar.gz', + ); + }); + }); + + describe('#darwin', () => { + it('should return a valid GitHub URL for darwin/amd64', () => { + const url = getUrl({platform: 'darwin', arch: 'amd64'}); + url.should.equal( + 'https://github.com/runfinch/finch-daemon/releases/download/v0.22.0/finch-daemon-0.22.0-darwin-amd64.tar.gz', + ); + }); + + it('should return a valid GitHub URL for darwin/arm64', () => { + const url = getUrl({platform: 'darwin', arch: 'arm64'}); + url.should.equal( + 'https://github.com/runfinch/finch-daemon/releases/download/v0.22.0/finch-daemon-0.22.0-darwin-arm64.tar.gz', + ); + }); + }); + + describe('#custom version', () => { + it('should accept a custom version', () => { + const url = getUrl({version: '0.21.0', platform: 'linux', arch: 'amd64'}); + url.should.equal( + 'https://github.com/runfinch/finch-daemon/releases/download/v0.21.0/finch-daemon-0.21.0-linux-amd64.tar.gz', + ); + }); + + it('should use default version when none specified', () => { + const url = getUrl({platform: 'linux', arch: 'amd64'}); + url.should.include('0.22.0'); + }); + }); + + describe('#error handling', () => { + it('should throw for unsupported platform/arch', () => { + expect(() => getUrl({platform: 'windows', arch: 'amd64'})) + .to.throw(/Unsupported platform/); + }); + + it('should throw for unsupported arch', () => { + expect(() => getUrl({platform: 'linux', arch: 'mips'})) + .to.throw(/Unsupported platform/); + }); + + it('should normalize win32 to windows before validation', () => { + // win32 gets mapped to windows, which is unsupported + expect(() => getUrl({platform: 'win32', arch: 'amd64'})) + .to.throw(/Unsupported platform/); + }); + }); + + describe('#url format', () => { + it('should point to the runfinch/finch-daemon GitHub repo', () => { + const url = getUrl({platform: 'linux', arch: 'amd64'}); + url.should.include('github.com/runfinch/finch-daemon'); + }); + + it('should use .tar.gz extension', () => { + const url = getUrl({platform: 'linux', arch: 'amd64'}); + url.should.match(/\.tar\.gz$/); + }); + + it('should include the version with v prefix in the tag path', () => { + const url = getUrl({version: '0.22.0', platform: 'linux', arch: 'amd64'}); + url.should.include('/download/v0.22.0/'); + }); + + it('should include version without v prefix in the filename', () => { + const url = getUrl({version: '0.22.0', platform: 'linux', arch: 'amd64'}); + url.should.include('finch-daemon-0.22.0-'); + }); + }); + + describe('#platform auto-detection', () => { + it('should work without explicit platform/arch (uses process defaults)', () => { + const currentPlatform = process.platform; + const currentArch = process.arch === 'x64' ? 'amd64' : process.arch; + const key = `${currentPlatform}-${currentArch}`; + const supported = ['linux-amd64', 'linux-arm64', 'darwin-amd64', 'darwin-arm64']; + + if (supported.includes(key)) { + const url = getUrl(); + expect(url).to.be.a('string'); + url.should.include('github.com'); + url.should.include('finch-daemon'); + } + }); + }); +}); diff --git a/test/get-nerdctl-config.spec.js b/test/get-nerdctl-config.spec.js new file mode 100644 index 000000000..adc179bd0 --- /dev/null +++ b/test/get-nerdctl-config.spec.js @@ -0,0 +1,70 @@ +/* + * Tests for get-nerdctl-config. + * + * The nerdctl config controls how nerdctl's OCI hooks resolve CNI paths. + * If the config has wrong paths, OCI hooks fall back to /etc/cni/net.d/ and + * self-deadlock on the system .nerdctl.lock file. These tests ensure the + * config always points to Lando's isolated CNI directories. + * + * @file get-nerdctl-config.spec.js + */ + +'use strict'; + +const chai = require('chai'); +const expect = chai.expect; + +const getNerdctlConfig = require('./../utils/get-nerdctl-config'); + +describe('get-nerdctl-config', () => { + it('should default CNI path to /usr/local/lib/lando/cni/bin', () => { + const config = getNerdctlConfig(); + expect(config).to.include('cni_netconfpath = "/etc/lando/cni"'); + expect(config).to.include('cni_path = "/usr/local/lib/lando/cni/bin"'); + }); + + it('should allow overriding CNI path', () => { + const config = getNerdctlConfig({cniPath: '/custom/cni'}); + expect(config).to.include('cni_path = "/custom/cni"'); + }); + + it('should use /etc/lando/cni as cni_netconfpath (NOT /etc/cni/net.d/)', () => { + // CRITICAL: If cni_netconfpath falls back to /etc/cni/net.d/ (the system + // default), nerdctl OCI hooks will self-deadlock on /etc/cni/net.d/.nerdctl.lock. + // The config MUST point to Lando's isolated CNI directory. + const config = getNerdctlConfig(); + expect(config).to.not.include('/etc/cni/net.d'); + expect(config).to.include('cni_netconfpath = "/etc/lando/cni"'); + }); + + it('should strip "finch" from cni_netconfpath when provided', () => { + // nerdctl internally appends the namespace (e.g. "finch") as a subdirectory + // to cni_netconfpath, so we must provide the parent directory. + const config = getNerdctlConfig({cniNetconfPath: '/etc/lando/cni/finch'}); + expect(config).to.include('cni_netconfpath = "/etc/lando/cni"'); + expect(config).to.not.include('cni_netconfpath = "/etc/lando/cni/finch"'); + }); + + it('should include the containerd socket address for OCI hook connectivity', () => { + const config = getNerdctlConfig({containerdSocket: '/run/lando/containerd.sock'}); + expect(config).to.include('address = "/run/lando/containerd.sock"'); + }); + + it('should include namespace for OCI hook context', () => { + const config = getNerdctlConfig(); + expect(config).to.include('namespace = "default"'); + }); + + it('should allow custom namespace override', () => { + const config = getNerdctlConfig({namespace: 'finch'}); + expect(config).to.include('namespace = "finch"'); + }); + + it('should use Lando-specific CNI binary path (NOT /opt/cni/bin)', () => { + // /opt/cni/bin is the system default. Lando MUST use its own CNI binaries + // to avoid conflicts with system containerd/Docker/Podman. + const config = getNerdctlConfig(); + expect(config).to.not.include('/opt/cni/bin'); + expect(config).to.include('/usr/local/lib/lando/cni/bin'); + }); +}); diff --git a/test/get-setup-engine.spec.js b/test/get-setup-engine.spec.js new file mode 100644 index 000000000..aeb2202cf --- /dev/null +++ b/test/get-setup-engine.spec.js @@ -0,0 +1,30 @@ +/* + * Tests for get-setup-engine. + */ + +'use strict'; + +const chai = require('chai'); +chai.should(); + +const getSetupEngine = require('../utils/get-setup-engine'); + +describe('get-setup-engine', () => { + it('prefers explicit config engine', () => { + const lando = { + cache: {get: () => 'docker'}, + config: {engine: 'containerd'}, + }; + + getSetupEngine(lando).should.equal('containerd'); + }); + + it('falls back to cached engine selection', () => { + const lando = { + cache: {get: () => 'containerd'}, + config: {engine: 'auto'}, + }; + + getSetupEngine(lando).should.equal('containerd'); + }); +}); diff --git a/test/lando-autostart-engine.spec.js b/test/lando-autostart-engine.spec.js new file mode 100644 index 000000000..6d2765473 --- /dev/null +++ b/test/lando-autostart-engine.spec.js @@ -0,0 +1,30 @@ +'use strict'; + +const chai = require('chai'); +const expect = chai.expect; +const sinon = require('sinon'); + +const autostartEngine = require('./../hooks/lando-autostart-engine'); + +describe('lando-autostart-engine', () => { + it('should skip Docker autostart logic for containerd backends', async () => { + const isUp = sinon.stub().resolves(false); + const runTasks = sinon.stub().resolves(); + const lando = { + _bootstrapLevel: 3, + config: {engine: 'containerd'}, + engine: { + engineBackend: 'containerd', + daemon: {isUp}, + }, + log: {debug: () => {}}, + runTasks, + shell: {sh: sinon.stub().resolves()}, + }; + + await autostartEngine(lando); + + expect(isUp.called).to.equal(false); + expect(runTasks.called).to.equal(false); + }); +}); diff --git a/test/lando-doctor-containerd.spec.js b/test/lando-doctor-containerd.spec.js new file mode 100644 index 000000000..f41f311f4 --- /dev/null +++ b/test/lando-doctor-containerd.spec.js @@ -0,0 +1,122 @@ +"use strict"; + +const chai = require("chai"); +const expect = chai.expect; + +const runChecks = require("../hooks/lando-doctor-containerd"); + +describe("lando-doctor-containerd", () => { + const mockLando = (overrides = {}) => ({ + config: { + userConfRoot: "/tmp/test-lando-doctor", + containerdBin: null, + buildkitdBin: null, + finchDaemonBin: null, + orchestratorBin: null, + containerdSocket: null, + finchDaemonSocket: null, + ...overrides, + }, + }); + + describe("#runChecks", () => { + it("should return an array of check results", async () => { + const checks = await runChecks(mockLando()); + expect(checks).to.be.an("array"); + expect(checks.length).to.be.greaterThan(0); + }); + + it("should include binary checks for all required binaries", async () => { + const checks = await runChecks(mockLando()); + const binaryChecks = checks.filter(c => c.title.includes("binary")); + // containerd, buildkitd, finch-daemon, docker-compose + expect(binaryChecks).to.have.lengthOf(4); + const names = binaryChecks.map(c => c.title); + expect(names).to.include("containerd binary"); + expect(names).to.include("buildkitd binary"); + expect(names).to.include("finch-daemon binary"); + expect(names).to.include("docker-compose binary"); + }); + + it("should NOT include nerdctl binary check (per BRIEF)", async () => { + const checks = await runChecks(mockLando()); + const nerdctlCheck = checks.find(c => c.title === "nerdctl binary"); + expect(nerdctlCheck).to.be.undefined; + }); + + it("should include daemon checks for all required daemons", async () => { + const checks = await runChecks(mockLando()); + const daemonChecks = checks.filter(c => c.title.endsWith("daemon")); + expect(daemonChecks).to.have.lengthOf(3); + const names = daemonChecks.map(c => c.title); + expect(names).to.include("containerd daemon"); + expect(names).to.include("buildkitd daemon"); + expect(names).to.include("finch-daemon daemon"); + }); + + it("should include finch-daemon connectivity check", async () => { + const checks = await runChecks(mockLando()); + const connCheck = checks.find(c => c.title === "finch-daemon connectivity"); + expect(connCheck).to.exist; + }); + + it("each check should have title, status, and message", async () => { + const checks = await runChecks(mockLando()); + for (const check of checks) { + expect(check).to.have.property("title").that.is.a("string"); + expect(check).to.have.property("status").that.is.oneOf(["ok", "warning", "error"]); + expect(check).to.have.property("message").that.is.a("string"); + } + }); + + it("should report error for missing binaries", async () => { + const checks = await runChecks(mockLando({ + containerdBin: "/nonexistent/containerd", + })); + const containerdCheck = checks.find(c => c.title === "containerd binary"); + expect(containerdCheck.status).to.equal("error"); + expect(containerdCheck.message).to.include("Not found"); + }); + + it("should report warning for missing daemon sockets", async () => { + const checks = await runChecks(mockLando({ + containerdSocket: "/nonexistent/containerd.sock", + })); + const daemonCheck = checks.find(c => c.title === "containerd daemon"); + expect(daemonCheck.status).to.equal("warning"); + expect(daemonCheck.message).to.include("not found"); + }); + + it("should use custom paths when provided in config", async () => { + const checks = await runChecks(mockLando({ + containerdBin: "/custom/path/containerd", + })); + const check = checks.find(c => c.title === "containerd binary"); + expect(check.message).to.include("/custom/path/containerd"); + }); + + it("should include CNI directory permissions check on Linux", async () => { + const checks = await runChecks(mockLando()); + const cniCheck = checks.find(c => c.title === "CNI directory permissions"); + if (process.platform === "linux") { + expect(cniCheck).to.exist; + expect(cniCheck).to.have.property("status").that.is.oneOf(["ok", "error"]); + expect(cniCheck).to.have.property("message").that.is.a("string"); + } else { + expect(cniCheck).to.not.exist; + } + }); + + it("should report error when CNI directory does not exist on Linux", async () => { + if (process.platform !== "linux") return; + // On CI / dev machines, /etc/lando/cni/finch likely doesn't exist, + // so the check should report an error with setup guidance. + const checks = await runChecks(mockLando()); + const cniCheck = checks.find(c => c.title === "CNI directory permissions"); + // If the dir doesn't exist, status should be error + if (cniCheck.status === "error") { + expect(cniCheck.message).to.match(/lando setup/i); + } + }); + }); +}); diff --git a/test/lima-manager.spec.js b/test/lima-manager.spec.js new file mode 100644 index 000000000..20e69c1e3 --- /dev/null +++ b/test/lima-manager.spec.js @@ -0,0 +1,683 @@ +/* + * Tests for LimaManager. + * @file lima-manager.spec.js + */ + +'use strict'; + +// Setup chai. +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const sinon = require('sinon'); +const os = require('os'); +const path = require('path'); +const LimaManager = require('./../lib/backends/containerd/lima-manager'); + +// Provide a noop debug function so tests don't need a real Lando Log instance +const noopDebug = () => {}; + +describe('lima-manager', () => { + // ========================================================================= + // Constructor + // ========================================================================= + describe('#constructor defaults', () => { + it('should set default limactl binary path', () => { + const mgr = new LimaManager({debug: noopDebug}); + mgr.limactl.should.equal('limactl'); + }); + + it('should set default VM name to "lando"', () => { + const mgr = new LimaManager({debug: noopDebug}); + mgr.vmName.should.equal('lando'); + }); + + it('should set default cpus to 4', () => { + const mgr = new LimaManager({debug: noopDebug}); + mgr.cpus.should.equal(4); + }); + + it('should set default memory to 4 (GB)', () => { + const mgr = new LimaManager({debug: noopDebug}); + mgr.memory.should.equal(4); + }); + + it('should set default disk to 60 (GB)', () => { + const mgr = new LimaManager({debug: noopDebug}); + mgr.disk.should.equal(60); + }); + + it('should set debug to a noop function when not provided', () => { + const mgr = new LimaManager(); + expect(mgr.debug).to.be.a('function'); + // Should not throw + mgr.debug('test message'); + }); + }); + + describe('#constructor custom options', () => { + it('should accept custom limactl path', () => { + const mgr = new LimaManager({limactl: '/usr/local/bin/limactl', debug: noopDebug}); + mgr.limactl.should.equal('/usr/local/bin/limactl'); + }); + + it('should accept custom VM name', () => { + const mgr = new LimaManager({vmName: 'my-vm', debug: noopDebug}); + mgr.vmName.should.equal('my-vm'); + }); + + it('should accept custom cpus', () => { + const mgr = new LimaManager({cpus: 8, debug: noopDebug}); + mgr.cpus.should.equal(8); + }); + + it('should accept custom memory', () => { + const mgr = new LimaManager({memory: 16, debug: noopDebug}); + mgr.memory.should.equal(16); + }); + + it('should accept custom disk', () => { + const mgr = new LimaManager({disk: 120, debug: noopDebug}); + mgr.disk.should.equal(120); + }); + + it('should accept custom debug function', () => { + const customDebug = sinon.stub(); + const mgr = new LimaManager({debug: customDebug}); + mgr.debug.should.equal(customDebug); + }); + }); + + // ========================================================================= + // getSocketPath + // ========================================================================= + describe('#getSocketPath', () => { + it('should return path under ~/.lima//sock/', () => { + const mgr = new LimaManager({debug: noopDebug}); + const expected = path.join(os.homedir(), '.lima', 'lando', 'sock', 'containerd.sock'); + mgr.getSocketPath().should.equal(expected); + }); + + it('should use custom vmName in the socket path', () => { + const mgr = new LimaManager({vmName: 'custom-vm', debug: noopDebug}); + const expected = path.join(os.homedir(), '.lima', 'custom-vm', 'sock', 'containerd.sock'); + mgr.getSocketPath().should.equal(expected); + }); + + it('should always end with containerd.sock', () => { + const mgr = new LimaManager({debug: noopDebug}); + mgr.getSocketPath().should.match(/containerd\.sock$/); + }); + }); + + // ========================================================================= + // _parseListOutput (private but critical logic) + // ========================================================================= + describe('#_parseListOutput', () => { + /** @type {LimaManager} */ + let mgr; + + beforeEach(() => { + mgr = new LimaManager({debug: noopDebug}); + }); + + it('should return empty array for empty string', () => { + const result = mgr._parseListOutput(''); + expect(result).to.be.an('array').that.is.empty; + }); + + it('should return empty array for null input', () => { + const result = mgr._parseListOutput(null); + expect(result).to.be.an('array').that.is.empty; + }); + + it('should return empty array for undefined input', () => { + const result = mgr._parseListOutput(undefined); + expect(result).to.be.an('array').that.is.empty; + }); + + it('should return empty array for whitespace-only string', () => { + const result = mgr._parseListOutput(' \n \n '); + expect(result).to.be.an('array').that.is.empty; + }); + + it('should parse a single NDJSON line', () => { + const line = JSON.stringify({name: 'lando', status: 'Running'}); + const result = mgr._parseListOutput(line); + expect(result).to.have.lengthOf(1); + result[0].name.should.equal('lando'); + result[0].status.should.equal('Running'); + }); + + it('should parse multiple NDJSON lines', () => { + const lines = [ + JSON.stringify({name: 'lando', status: 'Running'}), + JSON.stringify({name: 'other-vm', status: 'Stopped'}), + ].join('\n'); + + const result = mgr._parseListOutput(lines); + expect(result).to.have.lengthOf(2); + result[0].name.should.equal('lando'); + result[1].name.should.equal('other-vm'); + }); + + it('should skip blank lines between valid JSON', () => { + const lines = [ + JSON.stringify({name: 'lando', status: 'Running'}), + '', + ' ', + JSON.stringify({name: 'other-vm', status: 'Stopped'}), + ].join('\n'); + + const result = mgr._parseListOutput(lines); + expect(result).to.have.lengthOf(2); + }); + + it('should skip invalid JSON lines gracefully', () => { + const lines = [ + JSON.stringify({name: 'lando', status: 'Running'}), + 'this is not json', + JSON.stringify({name: 'other-vm', status: 'Stopped'}), + ].join('\n'); + + const result = mgr._parseListOutput(lines); + expect(result).to.have.lengthOf(2); + result[0].name.should.equal('lando'); + result[1].name.should.equal('other-vm'); + }); + + it('should call debug when encountering invalid JSON', () => { + const debugStub = sinon.stub(); + const debugMgr = new LimaManager({debug: debugStub}); + + debugMgr._parseListOutput('not-json'); + expect(debugStub.calledWith('failed to parse limactl JSON line: %s', 'not-json')).to.be.true; + }); + + it('should handle trailing newline', () => { + const line = JSON.stringify({name: 'lando', status: 'Running'}) + '\n'; + const result = mgr._parseListOutput(line); + expect(result).to.have.lengthOf(1); + result[0].name.should.equal('lando'); + }); + + it('should convert Buffer input via toString()', () => { + const buf = Buffer.from(JSON.stringify({name: 'lando', status: 'Running'})); + const result = mgr._parseListOutput(buf); + expect(result).to.have.lengthOf(1); + result[0].name.should.equal('lando'); + }); + }); + + // ========================================================================= + // vmExists + // ========================================================================= + describe('#vmExists', () => { + /** @type {sinon.SinonSandbox} */ + let sandbox; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + }); + + it('should return true when VM with matching name exists', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + const ndjson = JSON.stringify({name: 'lando', status: 'Stopped'}); + sandbox.stub(mgr, '_run').resolves({stdout: ndjson, stderr: '', code: 0}); + + const result = await mgr.vmExists(); + expect(result).to.be.true; + }); + + it('should return false when no VM with matching name exists', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + const ndjson = JSON.stringify({name: 'other-vm', status: 'Running'}); + sandbox.stub(mgr, '_run').resolves({stdout: ndjson, stderr: '', code: 0}); + + const result = await mgr.vmExists(); + expect(result).to.be.false; + }); + + it('should return false when limactl list returns empty output', async () => { + const mgr = new LimaManager({debug: noopDebug}); + sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + const result = await mgr.vmExists(); + expect(result).to.be.false; + }); + + it('should return false when _run throws an error', async () => { + const mgr = new LimaManager({debug: noopDebug}); + sandbox.stub(mgr, '_run').rejects(new Error('command not found')); + + const result = await mgr.vmExists(); + expect(result).to.be.false; + }); + + it('should call _run with correct arguments', async () => { + const mgr = new LimaManager({debug: noopDebug}); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + await mgr.vmExists(); + expect(runStub.calledOnce).to.be.true; + expect(runStub.firstCall.args[0]).to.deep.equal(['list', '--json']); + }); + + it('should find VM among multiple VMs', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + const ndjson = [ + JSON.stringify({name: 'other-vm', status: 'Stopped'}), + JSON.stringify({name: 'lando', status: 'Running'}), + JSON.stringify({name: 'test-vm', status: 'Stopped'}), + ].join('\n'); + sandbox.stub(mgr, '_run').resolves({stdout: ndjson, stderr: '', code: 0}); + + const result = await mgr.vmExists(); + expect(result).to.be.true; + }); + }); + + // ========================================================================= + // isRunning + // ========================================================================= + describe('#isRunning', () => { + /** @type {sinon.SinonSandbox} */ + let sandbox; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + }); + + it('should return true when VM status is "Running"', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + const ndjson = JSON.stringify({name: 'lando', status: 'Running'}); + sandbox.stub(mgr, '_run').resolves({stdout: ndjson, stderr: '', code: 0}); + + const result = await mgr.isRunning(); + expect(result).to.be.true; + }); + + it('should return false when VM status is "Stopped"', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + const ndjson = JSON.stringify({name: 'lando', status: 'Stopped'}); + sandbox.stub(mgr, '_run').resolves({stdout: ndjson, stderr: '', code: 0}); + + const result = await mgr.isRunning(); + expect(result).to.be.false; + }); + + it('should return false when VM does not exist', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + const ndjson = JSON.stringify({name: 'other-vm', status: 'Running'}); + sandbox.stub(mgr, '_run').resolves({stdout: ndjson, stderr: '', code: 0}); + + const result = await mgr.isRunning(); + expect(result).to.be.false; + }); + + it('should return false when _run throws an error', async () => { + const mgr = new LimaManager({debug: noopDebug}); + sandbox.stub(mgr, '_run').rejects(new Error('limactl not found')); + + const result = await mgr.isRunning(); + expect(result).to.be.false; + }); + + it('should return false for empty output', async () => { + const mgr = new LimaManager({debug: noopDebug}); + sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + const result = await mgr.isRunning(); + expect(result).to.be.false; + }); + + it('should distinguish Running from other statuses', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + + // Test each non-Running status + for (const status of ['Stopped', 'Starting', 'Broken', '']) { + const ndjson = JSON.stringify({name: 'lando', status}); + sandbox.stub(mgr, '_run').resolves({stdout: ndjson, stderr: '', code: 0}); + const result = await mgr.isRunning(); + expect(result).to.be.false; + sandbox.restore(); + sandbox = sinon.createSandbox(); + } + }); + }); + + // ========================================================================= + // createVM + // ========================================================================= + describe('#createVM', () => { + /** @type {sinon.SinonSandbox} */ + let sandbox; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + }); + + it('should skip creation when VM already exists', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + sandbox.stub(mgr, 'vmExists').resolves(true); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + await mgr.createVM(); + expect(runStub.called).to.be.false; + }); + + it('should call _run with correct create arguments when VM does not exist', async () => { + const mgr = new LimaManager({ + vmName: 'lando', + cpus: 4, + memory: 4, + disk: 60, + debug: noopDebug, + }); + sandbox.stub(mgr, 'vmExists').resolves(false); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + await mgr.createVM(); + expect(runStub.calledOnce).to.be.true; + expect(runStub.firstCall.args[0]).to.deep.equal([ + 'create', + '--name=lando', + '--containerd=system', + '--cpus=4', + '--memory=4', + '--disk=60', + '--tty=false', + 'template:default', + ]); + }); + + it('should use custom resource values in create arguments', async () => { + const mgr = new LimaManager({ + vmName: 'my-vm', + cpus: 8, + memory: 16, + disk: 120, + debug: noopDebug, + }); + sandbox.stub(mgr, 'vmExists').resolves(false); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + await mgr.createVM(); + const args = runStub.firstCall.args[0]; + expect(args).to.include('--name=my-vm'); + expect(args).to.include('--cpus=8'); + expect(args).to.include('--memory=16'); + expect(args).to.include('--disk=120'); + }); + + it('should propagate error if _run fails during creation', async () => { + const mgr = new LimaManager({debug: noopDebug}); + sandbox.stub(mgr, 'vmExists').resolves(false); + sandbox.stub(mgr, '_run').rejects(new Error('creation failed')); + + try { + await mgr.createVM(); + expect.fail('should have thrown'); + } catch (err) { + err.message.should.equal('creation failed'); + } + }); + }); + + // ========================================================================= + // startVM + // ========================================================================= + describe('#startVM', () => { + /** @type {sinon.SinonSandbox} */ + let sandbox; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + }); + + it('should skip start when VM is already running', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + sandbox.stub(mgr, 'isRunning').resolves(true); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + await mgr.startVM(); + expect(runStub.called).to.be.false; + }); + + it('should call _run with correct start arguments', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + sandbox.stub(mgr, 'isRunning').resolves(false); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + await mgr.startVM(); + expect(runStub.calledOnce).to.be.true; + expect(runStub.firstCall.args[0]).to.deep.equal(['start', 'lando']); + }); + + it('should use custom vmName in start arguments', async () => { + const mgr = new LimaManager({vmName: 'custom-vm', debug: noopDebug}); + sandbox.stub(mgr, 'isRunning').resolves(false); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + await mgr.startVM(); + expect(runStub.firstCall.args[0]).to.deep.equal(['start', 'custom-vm']); + }); + + it('should propagate error if _run fails during start', async () => { + const mgr = new LimaManager({debug: noopDebug}); + sandbox.stub(mgr, 'isRunning').resolves(false); + sandbox.stub(mgr, '_run').rejects(new Error('start failed')); + + try { + await mgr.startVM(); + expect.fail('should have thrown'); + } catch (err) { + err.message.should.equal('start failed'); + } + }); + }); + + // ========================================================================= + // stopVM + // ========================================================================= + describe('#stopVM', () => { + /** @type {sinon.SinonSandbox} */ + let sandbox; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + }); + + it('should skip stop when VM is not running', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + sandbox.stub(mgr, 'isRunning').resolves(false); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + await mgr.stopVM(); + expect(runStub.called).to.be.false; + }); + + it('should call _run with correct stop arguments', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + sandbox.stub(mgr, 'isRunning').resolves(true); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + await mgr.stopVM(); + expect(runStub.calledOnce).to.be.true; + expect(runStub.firstCall.args[0]).to.deep.equal(['stop', 'lando']); + }); + + it('should use custom vmName in stop arguments', async () => { + const mgr = new LimaManager({vmName: 'custom-vm', debug: noopDebug}); + sandbox.stub(mgr, 'isRunning').resolves(true); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + await mgr.stopVM(); + expect(runStub.firstCall.args[0]).to.deep.equal(['stop', 'custom-vm']); + }); + + it('should propagate error if _run fails during stop', async () => { + const mgr = new LimaManager({debug: noopDebug}); + sandbox.stub(mgr, 'isRunning').resolves(true); + sandbox.stub(mgr, '_run').rejects(new Error('stop failed')); + + try { + await mgr.stopVM(); + expect.fail('should have thrown'); + } catch (err) { + err.message.should.equal('stop failed'); + } + }); + }); + + // ========================================================================= + // exec + // ========================================================================= + describe('#exec', () => { + /** @type {sinon.SinonSandbox} */ + let sandbox; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + }); + + it('should call _run with shell, vmName, --, and provided args', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: 'output', stderr: '', code: 0}); + + await mgr.exec(['ls', '-la']); + expect(runStub.calledOnce).to.be.true; + expect(runStub.firstCall.args[0]).to.deep.equal(['shell', 'lando', '--', 'ls', '-la']); + }); + + it('should use custom vmName in exec arguments', async () => { + const mgr = new LimaManager({vmName: 'custom-vm', debug: noopDebug}); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + await mgr.exec(['cat', '/etc/hosts']); + expect(runStub.firstCall.args[0]).to.deep.equal(['shell', 'custom-vm', '--', 'cat', '/etc/hosts']); + }); + + it('should return the _run result', async () => { + const mgr = new LimaManager({debug: noopDebug}); + const expected = {stdout: 'hello', stderr: '', code: 0}; + sandbox.stub(mgr, '_run').resolves(expected); + + const result = await mgr.exec(['echo', 'hello']); + expect(result).to.deep.equal(expected); + }); + + it('should propagate error from _run', async () => { + const mgr = new LimaManager({debug: noopDebug}); + sandbox.stub(mgr, '_run').rejects(new Error('exec failed')); + + try { + await mgr.exec(['bad-command']); + expect.fail('should have thrown'); + } catch (err) { + err.message.should.equal('exec failed'); + } + }); + }); + + // ========================================================================= + // nerdctl + // ========================================================================= + describe('#nerdctl', () => { + /** @type {sinon.SinonSandbox} */ + let sandbox; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + }); + + it('should call _run with shell, vmName, --, sudo, nerdctl, and provided args', async () => { + const mgr = new LimaManager({vmName: 'lando', debug: noopDebug}); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + await mgr.nerdctl(['ps', '-a']); + expect(runStub.calledOnce).to.be.true; + expect(runStub.firstCall.args[0]).to.deep.equal([ + 'shell', 'lando', '--', 'sudo', 'nerdctl', 'ps', '-a', + ]); + }); + + it('should use custom vmName in nerdctl arguments', async () => { + const mgr = new LimaManager({vmName: 'custom-vm', debug: noopDebug}); + const runStub = sandbox.stub(mgr, '_run').resolves({stdout: '', stderr: '', code: 0}); + + await mgr.nerdctl(['images']); + expect(runStub.firstCall.args[0]).to.deep.equal([ + 'shell', 'custom-vm', '--', 'sudo', 'nerdctl', 'images', + ]); + }); + + it('should return the _run result', async () => { + const mgr = new LimaManager({debug: noopDebug}); + const expected = {stdout: 'image-list', stderr: '', code: 0}; + sandbox.stub(mgr, '_run').resolves(expected); + + const result = await mgr.nerdctl(['images']); + expect(result).to.deep.equal(expected); + }); + + it('should propagate error from _run', async () => { + const mgr = new LimaManager({debug: noopDebug}); + sandbox.stub(mgr, '_run').rejects(new Error('nerdctl failed')); + + try { + await mgr.nerdctl(['bad-command']); + expect.fail('should have thrown'); + } catch (err) { + err.message.should.equal('nerdctl failed'); + } + }); + }); + + // ========================================================================= + // _run (integration with run-command — argument forwarding only) + // ========================================================================= + describe('#_run', () => { + it('should pass limactl binary and args to the underlying run-command', async () => { + const mgr = new LimaManager({limactl: '/custom/limactl', debug: noopDebug}); + // Stub _run at the instance level to verify arg forwarding + // We cannot stub the require('run-command') without proxyquire, + // so we verify the method exists and accepts args correctly. + expect(mgr._run).to.be.a('function'); + }); + + it('should use the configured limactl path', () => { + const mgr = new LimaManager({limactl: '/opt/bin/limactl', debug: noopDebug}); + mgr.limactl.should.equal('/opt/bin/limactl'); + }); + }); +}); diff --git a/test/nerdctl-compose.spec.js b/test/nerdctl-compose.spec.js new file mode 100644 index 000000000..bf38b51e6 --- /dev/null +++ b/test/nerdctl-compose.spec.js @@ -0,0 +1,243 @@ +/* + * Tests for nerdctl-compose. + * @file nerdctl-compose.spec.js + */ + +'use strict'; + +// Setup chai. +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const NerdctlCompose = require('./../lib/backends/containerd/nerdctl-compose'); + +const defaultSocketPath = '/run/lando/containerd.sock'; +const customSocketPath = '/tmp/lando/run/containerd.sock'; + +const composeFiles = ['docker-compose.yml', 'docker-compose.override.yml']; +const project = 'myproject'; + +describe('nerdctl-compose', () => { + describe('#NerdctlCompose', () => { + it('should be a constructor', () => { + expect(NerdctlCompose).to.be.a('function'); + }); + + it('should set the default socket path', () => { + const nc = new NerdctlCompose(); + nc.socketPath.should.equal(defaultSocketPath); + }); + + it('should accept a custom socket path', () => { + const nc = new NerdctlCompose({socketPath: customSocketPath}); + nc.socketPath.should.equal(customSocketPath); + }); + }); + + describe('#_transform', () => { + it('should prepend connection flags and compose to cmd', () => { + const nc = new NerdctlCompose({socketPath: customSocketPath}); + const result = nc._transform({cmd: ['up', '--detach'], opts: {mode: 'attach'}}); + + result.cmd.should.deep.equal([ + '--address', customSocketPath, '--namespace', 'default', 'compose', + 'up', '--detach', + ]); + result.opts.mode.should.equal('attach'); + }); + + it('should preserve existing opts while merging auth env when needed', () => { + const nc = new NerdctlCompose(); + const originalOpts = {cwd: '/tmp', env: {FOO: 'bar'}}; + const result = nc._transform({cmd: ['ps'], opts: originalOpts}); + + result.opts.cwd.should.equal('/tmp'); + result.opts.env.FOO.should.equal('bar'); + result.opts.env.CONTAINERD_ADDRESS.should.equal(defaultSocketPath); + result.opts.env.CONTAINERD_NAMESPACE.should.equal('default'); + originalOpts.env.FOO.should.equal('bar'); + }); + }); + + describe('#start', () => { + it('should return an object with cmd and opts', () => { + const nc = new NerdctlCompose({socketPath: customSocketPath}); + const result = nc.start(composeFiles, project, {}); + + expect(result).to.be.an('object'); + expect(result).to.have.property('cmd').that.is.an('array'); + expect(result).to.have.property('opts').that.is.an('object'); + }); + + it('should include connection flags and compose in cmd', () => { + const nc = new NerdctlCompose({socketPath: customSocketPath}); + const result = nc.start(composeFiles, project, {}); + + result.cmd[0].should.equal('--address'); + result.cmd[1].should.equal(customSocketPath); + result.cmd[2].should.equal('--namespace'); + result.cmd[3].should.equal('default'); + result.cmd[4].should.equal('compose'); + }); + + it('should include project name in cmd', () => { + const nc = new NerdctlCompose(); + const result = nc.start(composeFiles, project, {}); + + result.cmd.should.include('--project-name'); + result.cmd.should.include(project); + }); + }); + + describe('#build', () => { + it('should return an object with cmd and opts', () => { + const nc = new NerdctlCompose(); + const result = nc.build(composeFiles, project, {services: ['web'], local: ['web']}); + + expect(result).to.be.an('object'); + expect(result).to.have.property('cmd').that.is.an('array'); + expect(result).to.have.property('opts'); + }); + + it('should include compose prefix with connection flags', () => { + const nc = new NerdctlCompose({socketPath: customSocketPath}); + const result = nc.build(composeFiles, project, {services: ['web'], local: ['web']}); + + result.cmd[0].should.equal('--address'); + result.cmd[1].should.equal(customSocketPath); + result.cmd[2].should.equal('--namespace'); + result.cmd[3].should.equal('default'); + result.cmd[4].should.equal('compose'); + }); + + it('should include build subcommand when local services match', () => { + const nc = new NerdctlCompose(); + const result = nc.build(composeFiles, project, {services: ['web'], local: ['web']}); + + // After the compose prefix, should have file flags, project, and 'build' + result.cmd.should.include('build'); + }); + + it('should fall back to ps when no local services match', () => { + const nc = new NerdctlCompose(); + // services are specified but local is empty — nothing to build + const result = nc.build(composeFiles, project, {services: ['web'], local: []}); + + // compose.build falls back to 'ps' when there's nothing to build + result.cmd.should.include('ps'); + }); + }); + + describe('#remove', () => { + it('should return an object with cmd and opts', () => { + const nc = new NerdctlCompose(); + const result = nc.remove(composeFiles, project, {}); + + expect(result).to.be.an('object'); + expect(result).to.have.property('cmd'); + expect(result).to.have.property('opts'); + }); + + it('should use down when purge is true', () => { + const nc = new NerdctlCompose(); + const result = nc.remove(composeFiles, project, {purge: true}); + + result.cmd.should.include('down'); + }); + + it('should use rm when purge is false', () => { + const nc = new NerdctlCompose(); + const result = nc.remove(composeFiles, project, {purge: false}); + + result.cmd.should.include('rm'); + }); + }); + + describe('#run', () => { + it('should return an object with cmd and opts', () => { + const nc = new NerdctlCompose(); + const result = nc.run(composeFiles, project, { + cmd: ['drush', 'cr'], + services: ['appserver'], + }); + + expect(result).to.be.an('object'); + expect(result).to.have.property('cmd'); + expect(result).to.have.property('opts'); + }); + + it('should include the compose prefix', () => { + const nc = new NerdctlCompose({socketPath: customSocketPath}); + const result = nc.run(composeFiles, project, { + cmd: ['ls'], + services: ['web'], + }); + + result.cmd[0].should.equal('--address'); + result.cmd[1].should.equal(customSocketPath); + result.cmd[2].should.equal('--namespace'); + result.cmd[3].should.equal('default'); + result.cmd[4].should.equal('compose'); + }); + }); + + describe('#stop', () => { + it('should return an object with cmd and opts', () => { + const nc = new NerdctlCompose(); + const result = nc.stop(composeFiles, project, {}); + + expect(result).to.be.an('object'); + result.cmd.should.include('stop'); + }); + + it('should include compose prefix', () => { + const nc = new NerdctlCompose({socketPath: customSocketPath}); + const result = nc.stop(composeFiles, project, {}); + + result.cmd.slice(0, 5).should.deep.equal([ + '--address', customSocketPath, '--namespace', 'default', 'compose', + ]); + }); + }); + + describe('#logs', () => { + it('should return an object with cmd and opts', () => { + const nc = new NerdctlCompose(); + const result = nc.logs(composeFiles, project, {}); + + expect(result).to.be.an('object'); + result.cmd.should.include('logs'); + }); + }); + + describe('#pull', () => { + it('should return an object with cmd and opts', () => { + const nc = new NerdctlCompose(); + const result = nc.pull(composeFiles, project, {}); + + expect(result).to.be.an('object'); + expect(result).to.have.property('cmd'); + }); + }); + + describe('#getId', () => { + it('should return an object with cmd and opts', () => { + const nc = new NerdctlCompose(); + const result = nc.getId(composeFiles, project, {}); + + expect(result).to.be.an('object'); + result.cmd.should.include('ps'); + }); + }); + + describe('#kill', () => { + it('should return an object with cmd and opts', () => { + const nc = new NerdctlCompose(); + const result = nc.kill(composeFiles, project, {}); + + expect(result).to.be.an('object'); + result.cmd.should.include('kill'); + }); + }); +}); diff --git a/test/perf-timer.spec.js b/test/perf-timer.spec.js new file mode 100644 index 000000000..fa17a6ef6 --- /dev/null +++ b/test/perf-timer.spec.js @@ -0,0 +1,115 @@ +/* + * Tests for perf-timer. + * @file perf-timer.spec.js + */ + +'use strict'; + +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const perfTimer = require('./../utils/perf-timer'); + +describe('perf-timer', () => { + describe('#return value', () => { + it('should return an object with label and stop function', () => { + const timer = perfTimer('test'); + expect(timer).to.be.an('object'); + expect(timer).to.have.property('label'); + expect(timer).to.have.property('stop'); + timer.stop.should.be.a('function'); + }); + + it('should have label matching what was passed in', () => { + const timer = perfTimer('my-operation'); + timer.label.should.equal('my-operation'); + }); + + it('should preserve empty string label', () => { + const timer = perfTimer(''); + timer.label.should.equal(''); + }); + }); + + describe('#stop()', () => { + it('should return a number (milliseconds)', () => { + const timer = perfTimer('test'); + const elapsed = timer.stop(); + elapsed.should.be.a('number'); + }); + + it('should return elapsed time >= 0', () => { + const timer = perfTimer('test'); + const elapsed = timer.stop(); + elapsed.should.be.at.least(0); + }); + + it('should measure real elapsed time', function(done) { + this.timeout(5000); + const timer = perfTimer('sleep-test'); + setTimeout(() => { + const elapsed = timer.stop(); + elapsed.should.be.at.least(50); + done(); + }, 55); // sleep slightly over 50ms to account for timer granularity + }); + + it('should be callable multiple times returning increasing values', function(done) { + this.timeout(5000); + const timer = perfTimer('multi-stop'); + const first = timer.stop(); + setTimeout(() => { + const second = timer.stop(); + second.should.be.at.least(first); + done(); + }, 20); + }); + }); + + describe('#multiple timers', () => { + it('should not interfere with each other', function(done) { + this.timeout(5000); + const timerA = perfTimer('timer-a'); + + setTimeout(() => { + const timerB = perfTimer('timer-b'); + + setTimeout(() => { + const elapsedA = timerA.stop(); + const elapsedB = timerB.stop(); + + // timerA was started ~60ms before timerB, so it should show more elapsed time + elapsedA.should.be.at.least(50); + elapsedB.should.be.at.least(25); + elapsedA.should.be.greaterThan(elapsedB); + + // Labels should remain independent + timerA.label.should.equal('timer-a'); + timerB.label.should.equal('timer-b'); + + done(); + }, 30); + }, 30); + }); + + it('should track separate start times', () => { + const timers = []; + for (let i = 0; i < 5; i++) { + timers.push(perfTimer(`timer-${i}`)); + } + + // All timers should return non-negative elapsed times + const results = timers.map(t => t.stop()); + results.forEach(elapsed => { + elapsed.should.be.a('number'); + elapsed.should.be.at.least(0); + }); + + // Labels should be correct + timers.forEach((t, i) => { + t.label.should.equal(`timer-${i}`); + }); + }); + }); +}); diff --git a/test/resolve-containerd-mount.spec.js b/test/resolve-containerd-mount.spec.js new file mode 100644 index 000000000..8f363a6c3 --- /dev/null +++ b/test/resolve-containerd-mount.spec.js @@ -0,0 +1,163 @@ +/* + * Tests for resolve-containerd-mount. + * @file resolve-containerd-mount.spec.js + */ + +'use strict'; + +const chai = require('chai'); +const expect = chai.expect; +const path = require('path'); +chai.should(); + +const {resolveContainerdMount, isPathAccessible} = require('./../utils/resolve-containerd-mount'); + +describe('resolve-containerd-mount', () => { + describe('#resolveContainerdMount', () => { + describe('Linux platform', () => { + it('should mark all paths as accessible', () => { + const result = resolveContainerdMount('/tmp/myproject', {platform: 'linux'}); + result.accessible.should.be.true; + expect(result.warning).to.be.null; + }); + + it('should leave paths unchanged', () => { + const result = resolveContainerdMount('/opt/code', {platform: 'linux'}); + result.resolvedPath.should.equal('/opt/code'); + }); + + it('should not produce warnings for any path', () => { + const paths = ['/tmp/myproject', '/opt/code', '/var/data', '/home/user/app']; + paths.forEach(p => { + const result = resolveContainerdMount(p, {platform: 'linux'}); + result.accessible.should.be.true; + expect(result.warning).to.be.null; + }); + }); + }); + + describe('macOS/darwin platform', () => { + const darwinOpts = {platform: 'darwin', homedir: '/Users/me'}; + + it('should mark paths under homedir as accessible', () => { + const result = resolveContainerdMount('/Users/me/code', darwinOpts); + result.accessible.should.be.true; + expect(result.warning).to.be.null; + result.resolvedPath.should.equal('/Users/me/code'); + }); + + it('should mark paths outside homedir as inaccessible with warning', () => { + const result = resolveContainerdMount('/tmp/myproject', darwinOpts); + result.accessible.should.be.false; + result.warning.should.be.a('string'); + result.warning.should.include('/tmp/myproject'); + result.warning.should.include('Lima'); + }); + + it('should mark /opt/code as inaccessible', () => { + const result = resolveContainerdMount('/opt/code', darwinOpts); + result.accessible.should.be.false; + result.warning.should.be.a('string'); + }); + + it('should expand tilde to homedir and mark as accessible', () => { + const result = resolveContainerdMount('~/code', darwinOpts); + result.accessible.should.be.true; + result.resolvedPath.should.equal('/Users/me/code'); + expect(result.warning).to.be.null; + }); + + it('should expand tilde for nested paths', () => { + const result = resolveContainerdMount('~/projects/app/src', darwinOpts); + result.accessible.should.be.true; + result.resolvedPath.should.equal('/Users/me/projects/app/src'); + }); + + it('should use custom limaMounts to allow paths outside homedir', () => { + const opts = { + ...darwinOpts, + limaMounts: [{location: '/data'}], + }; + const result = resolveContainerdMount('/data/app', opts); + result.accessible.should.be.true; + expect(result.warning).to.be.null; + }); + + it('should reject paths not matching custom limaMounts', () => { + const opts = { + ...darwinOpts, + limaMounts: [{location: '/data'}], + }; + const result = resolveContainerdMount('/tmp/other', opts); + result.accessible.should.be.false; + result.warning.should.be.a('string'); + }); + }); + + describe('WSL/win32 platform', () => { + it('should mark all paths as accessible', () => { + const result = resolveContainerdMount('/mnt/c/Users/me/project', {platform: 'win32'}); + result.accessible.should.be.true; + expect(result.warning).to.be.null; + }); + + it('should mark arbitrary paths as accessible', () => { + const result = resolveContainerdMount('/tmp/data', {platform: 'win32'}); + result.accessible.should.be.true; + expect(result.warning).to.be.null; + }); + }); + + describe('edge cases', () => { + it('should return accessible=false with warning for empty string', () => { + const result = resolveContainerdMount('', {platform: 'linux'}); + result.accessible.should.be.false; + result.warning.should.be.a('string'); + }); + + it('should return accessible=false with warning for null', () => { + const result = resolveContainerdMount(null, {platform: 'linux'}); + result.accessible.should.be.false; + result.warning.should.be.a('string'); + }); + + it('should return accessible=false with warning for undefined', () => { + const result = resolveContainerdMount(undefined, {platform: 'linux'}); + result.accessible.should.be.false; + result.warning.should.be.a('string'); + }); + + it('should resolve relative paths to absolute', () => { + const result = resolveContainerdMount('src/app', {platform: 'linux'}); + result.accessible.should.be.true; + path.isAbsolute(result.resolvedPath).should.be.true; + result.resolvedPath.should.equal(path.resolve('src/app')); + }); + }); + }); + + describe('#isPathAccessible', () => { + it('should return true for accessible paths', () => { + isPathAccessible('/home/user/code', {platform: 'linux'}).should.be.true; + }); + + it('should return false for inaccessible paths', () => { + isPathAccessible('/tmp/myproject', {platform: 'darwin', homedir: '/Users/me'}).should.be.false; + }); + + it('should match resolveContainerdMount result', () => { + const testCases = [ + {path: '/Users/me/code', opts: {platform: 'darwin', homedir: '/Users/me'}}, + {path: '/tmp/outside', opts: {platform: 'darwin', homedir: '/Users/me'}}, + {path: '/opt/data', opts: {platform: 'linux'}}, + {path: '~/projects', opts: {platform: 'darwin', homedir: '/Users/me'}}, + ]; + + testCases.forEach(tc => { + const full = resolveContainerdMount(tc.path, tc.opts); + const quick = isPathAccessible(tc.path, tc.opts); + expect(quick).to.equal(full.accessible); + }); + }); + }); +}); diff --git a/test/run-powershell-script.spec.js b/test/run-powershell-script.spec.js new file mode 100644 index 000000000..489a728de --- /dev/null +++ b/test/run-powershell-script.spec.js @@ -0,0 +1,21 @@ +'use strict'; + +const chai = require('chai'); +const expect = chai.expect; + +const runPowerShellScript = require('./../utils/run-powershell-script'); + +describe('run-powershell-script', () => { + describe('WSL interop errors', () => { + it('should detect UtilAcceptVsock failures', () => { + expect(runPowerShellScript._isWSLInteropError('<3>WSL (1 - ) ERROR: UtilAcceptVsock:271: accept4 failed 110\n')).to.equal(true); + expect(runPowerShellScript._isWSLInteropError('some other error')).to.equal(false); + }); + + it('should format a friendly restart recommendation', () => { + const message = runPowerShellScript._formatWSLInteropError('<3>WSL (1 - ) ERROR: UtilAcceptVsock:271: accept4 failed 110\n'); + + expect(message).to.equal('Windows interop is unavailable from WSL; restart WSL with `wsl --shutdown` and try again.'); + }); + }); +}); diff --git a/test/setup-containerd-auth.spec.js b/test/setup-containerd-auth.spec.js new file mode 100644 index 000000000..d77e8515d --- /dev/null +++ b/test/setup-containerd-auth.spec.js @@ -0,0 +1,273 @@ +/* + * Tests for setup-containerd-auth. + * @file setup-containerd-auth.spec.js + */ + +'use strict'; + +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const {getContainerdAuthConfig, getDockerConfigPath} = require('./../utils/setup-containerd-auth'); + +describe('setup-containerd-auth', () => { + describe('#getDockerConfigPath', () => { + it('should return default ~/.docker when no options provided', () => { + const result = getDockerConfigPath({env: {}}); + result.should.equal(path.join(os.homedir(), '.docker')); + }); + + it('should respect explicit configPath option', () => { + const result = getDockerConfigPath({configPath: '/custom/docker-config'}); + result.should.equal(path.resolve('/custom/docker-config')); + }); + + it('should respect DOCKER_CONFIG env var', () => { + const result = getDockerConfigPath({env: {DOCKER_CONFIG: '/env/docker-config'}}); + result.should.equal(path.resolve('/env/docker-config')); + }); + + it('should prefer configPath over DOCKER_CONFIG env var', () => { + const result = getDockerConfigPath({ + configPath: '/explicit/path', + env: {DOCKER_CONFIG: '/env/path'}, + }); + result.should.equal(path.resolve('/explicit/path')); + }); + + it('should return an absolute path for relative configPath', () => { + const result = getDockerConfigPath({configPath: 'relative/docker'}); + path.isAbsolute(result).should.be.true; + }); + }); + + describe('#getContainerdAuthConfig', () => { + describe('with default config path', () => { + it('should return an object with dockerConfig, env, configExists, and credentialHelpers', () => { + const result = getContainerdAuthConfig({env: {}}); + + expect(result).to.be.an('object'); + expect(result).to.have.property('dockerConfig').that.is.a('string'); + expect(result).to.have.property('env').that.is.an('object'); + expect(result).to.have.property('configExists').that.is.a('boolean'); + expect(result).to.have.property('credentialHelpers').that.is.an('array'); + }); + + it('should use ~/.docker as dockerConfig by default', () => { + const result = getContainerdAuthConfig({env: {}}); + result.dockerConfig.should.equal(path.join(os.homedir(), '.docker')); + }); + + it('should return empty env when config has no credsStore', () => { + // Use a temp dir with a config.json that has NO credsStore. + // The real ~/.docker/config.json may have credsStore which triggers + // sanitization and sets DOCKER_CONFIG — that's correct behavior. + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'lando-auth-default-')); + fs.writeFileSync(path.join(tmpDir, 'config.json'), JSON.stringify({ + auths: {'https://index.docker.io/v1/': {}}, + })); + + try { + const result = getContainerdAuthConfig({configPath: tmpDir}); + // Non-standard path → DOCKER_CONFIG is set, but that's the path override. + // The key assertion: no *additional* sanitization redirect happened. + result.env.should.have.property('DOCKER_CONFIG', tmpDir); + } finally { + fs.unlinkSync(path.join(tmpDir, 'config.json')); + fs.rmdirSync(tmpDir); + } + }); + + it('should sanitize credsStore and redirect DOCKER_CONFIG', () => { + // When config.json has credsStore, the implementation strips it and + // writes a sanitized copy to ~/.lando/docker-config/ because + // finch-daemon treats credential helper errors as fatal. + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'lando-auth-creds-')); + fs.writeFileSync(path.join(tmpDir, 'config.json'), JSON.stringify({ + credsStore: 'desktop', + auths: {}, + })); + + try { + const result = getContainerdAuthConfig({configPath: tmpDir}); + // Should redirect to sanitized config dir + result.env.should.have.property('DOCKER_CONFIG'); + result.env.DOCKER_CONFIG.should.include('docker-config'); + result.credentialHelpers.should.include('docker-credential-desktop'); + } finally { + fs.unlinkSync(path.join(tmpDir, 'config.json')); + fs.rmdirSync(tmpDir); + } + }); + }); + + describe('with custom configPath (registryAuth override)', () => { + it('should set DOCKER_CONFIG in env when configPath is non-standard', () => { + const result = getContainerdAuthConfig({configPath: '/custom/docker'}); + result.env.should.have.property('DOCKER_CONFIG'); + result.env.DOCKER_CONFIG.should.equal('/custom/docker'); + }); + + it('should set dockerConfig to the custom path', () => { + const result = getContainerdAuthConfig({configPath: '/my/config'}); + result.dockerConfig.should.equal(path.resolve('/my/config')); + }); + + it('should not redirect DOCKER_CONFIG when config has no credsStore', () => { + // Use a temp dir at a path that resolves to the default ~/.docker. + // But since we can't guarantee the real ~/.docker has no credsStore, + // test with a controlled temp dir that has no credsStore. + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'lando-auth-nocreds-')); + fs.writeFileSync(path.join(tmpDir, 'config.json'), JSON.stringify({ + auths: {'https://index.docker.io/v1/': {}}, + })); + + try { + const result = getContainerdAuthConfig({configPath: tmpDir}); + // DOCKER_CONFIG should be set to tmpDir (because it's non-standard) + // but NOT redirected to ~/.lando/docker-config/ (no credsStore to sanitize) + result.env.should.deep.equal({DOCKER_CONFIG: tmpDir}); + } finally { + fs.unlinkSync(path.join(tmpDir, 'config.json')); + fs.rmdirSync(tmpDir); + } + }); + }); + + describe('with DOCKER_CONFIG env var', () => { + it('should set DOCKER_CONFIG in env when env var points to non-standard path', () => { + const result = getContainerdAuthConfig({env: {DOCKER_CONFIG: '/env/docker'}}); + result.env.should.have.property('DOCKER_CONFIG'); + result.env.DOCKER_CONFIG.should.equal('/env/docker'); + }); + }); + + describe('when no docker config exists', () => { + it('should set configExists to false for a non-existent path', () => { + const result = getContainerdAuthConfig({configPath: '/nonexistent/path/that/does/not/exist'}); + result.configExists.should.be.false; + }); + + it('should return empty credentialHelpers when config does not exist', () => { + const result = getContainerdAuthConfig({configPath: '/nonexistent/path'}); + result.credentialHelpers.should.be.an('array').that.is.empty; + }); + + it('should still return valid env even when config does not exist', () => { + const result = getContainerdAuthConfig({configPath: '/nonexistent/path'}); + result.env.should.have.property('DOCKER_CONFIG'); + result.env.DOCKER_CONFIG.should.equal('/nonexistent/path'); + }); + }); + + describe('credential helper detection', () => { + let tmpDir; + let configFile; + + beforeEach(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'lando-auth-test-')); + configFile = path.join(tmpDir, 'config.json'); + }); + + afterEach(() => { + try { + if (fs.existsSync(configFile)) fs.unlinkSync(configFile); + fs.rmdirSync(tmpDir); + } catch { + // cleanup best-effort + } + }); + + it('should detect credsStore helper', () => { + fs.writeFileSync(configFile, JSON.stringify({ + credsStore: 'osxkeychain', + })); + + const result = getContainerdAuthConfig({configPath: tmpDir}); + result.configExists.should.be.true; + result.credentialHelpers.should.include('docker-credential-osxkeychain'); + }); + + it('should detect credHelpers per-registry helpers', () => { + fs.writeFileSync(configFile, JSON.stringify({ + credHelpers: { + 'gcr.io': 'gcloud', + '123456.dkr.ecr.us-east-1.amazonaws.com': 'ecr-login', + }, + })); + + const result = getContainerdAuthConfig({configPath: tmpDir}); + result.configExists.should.be.true; + result.credentialHelpers.should.include('docker-credential-gcloud'); + result.credentialHelpers.should.include('docker-credential-ecr-login'); + }); + + it('should detect both credsStore and credHelpers together', () => { + fs.writeFileSync(configFile, JSON.stringify({ + credsStore: 'desktop', + credHelpers: { + 'gcr.io': 'gcloud', + }, + })); + + const result = getContainerdAuthConfig({configPath: tmpDir}); + result.credentialHelpers.should.include('docker-credential-desktop'); + result.credentialHelpers.should.include('docker-credential-gcloud'); + }); + + it('should return empty credentialHelpers when config has no cred fields', () => { + fs.writeFileSync(configFile, JSON.stringify({ + auths: { + 'https://index.docker.io/v1/': {}, + }, + })); + + const result = getContainerdAuthConfig({configPath: tmpDir}); + result.configExists.should.be.true; + result.credentialHelpers.should.be.an('array').that.is.empty; + }); + + it('should deduplicate credential helpers', () => { + fs.writeFileSync(configFile, JSON.stringify({ + credsStore: 'desktop', + credHelpers: { + 'docker.io': 'desktop', + 'gcr.io': 'desktop', + }, + })); + + const result = getContainerdAuthConfig({configPath: tmpDir}); + const desktopCount = result.credentialHelpers + .filter(h => h === 'docker-credential-desktop').length; + desktopCount.should.equal(1); + }); + + it('should handle malformed config.json gracefully', () => { + fs.writeFileSync(configFile, 'not valid json {{{'); + + const result = getContainerdAuthConfig({configPath: tmpDir}); + result.configExists.should.be.false; + result.credentialHelpers.should.be.an('array').that.is.empty; + }); + + it('should handle config.json that is valid JSON but has unexpected shape', () => { + fs.writeFileSync(configFile, JSON.stringify('just a string')); + + const result = getContainerdAuthConfig({configPath: tmpDir}); + result.configExists.should.be.true; + result.credentialHelpers.should.be.an('array').that.is.empty; + }); + + it('should set configExists to true when config.json exists', () => { + fs.writeFileSync(configFile, JSON.stringify({})); + + const result = getContainerdAuthConfig({configPath: tmpDir}); + result.configExists.should.be.true; + }); + }); + }); +}); diff --git a/test/wsl-helper.spec.js b/test/wsl-helper.spec.js new file mode 100644 index 000000000..3c0892bd1 --- /dev/null +++ b/test/wsl-helper.spec.js @@ -0,0 +1,263 @@ +/* + * Tests for WslHelper. + * @file wsl-helper.spec.js + */ + +'use strict'; + +// Setup chai. +const chai = require('chai'); +const expect = chai.expect; +chai.should(); + +const sinon = require('sinon'); +const mockFs = require('mock-fs'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const WslHelper = require('./../lib/backends/containerd/wsl-helper'); + +// Provide a noop debug function so tests don't need a real Lando Log instance +const noopDebug = () => {}; + +describe('wsl-helper', () => { + // ========================================================================= + // Constructor + // ========================================================================= + describe('#constructor defaults', () => { + it('should set debug to a noop function when not provided', () => { + const helper = new WslHelper(); + expect(helper.debug).to.be.a('function'); + // Should not throw + helper.debug('test'); + }); + + it('should set userConfRoot to ~/.lando by default', () => { + const helper = new WslHelper(); + const expected = path.join(os.homedir(), '.lando'); + helper.userConfRoot.should.equal(expected); + }); + }); + + describe('#constructor custom options', () => { + it('should accept custom debug function', () => { + const customDebug = sinon.stub(); + const helper = new WslHelper({debug: customDebug}); + helper.debug.should.equal(customDebug); + }); + + it('should accept custom userConfRoot', () => { + const helper = new WslHelper({userConfRoot: '/custom/root'}); + helper.userConfRoot.should.equal('/custom/root'); + }); + }); + + // ========================================================================= + // isWsl (static method) + // ========================================================================= + describe('.isWsl', () => { + /** @type {sinon.SinonSandbox} */ + let sandbox; + /** @type {string} */ + let originalPlatform; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + originalPlatform = process.platform; + }); + + afterEach(() => { + sandbox.restore(); + mockFs.restore(); + // Restore platform — Object.defineProperty is needed because process.platform + // is a read-only property + Object.defineProperty(process, 'platform', {value: originalPlatform}); + }); + + it('should return false on non-linux platforms', () => { + Object.defineProperty(process, 'platform', {value: 'darwin'}); + expect(WslHelper.isWsl()).to.be.false; + }); + + it('should return false on Windows platform', () => { + Object.defineProperty(process, 'platform', {value: 'win32'}); + expect(WslHelper.isWsl()).to.be.false; + }); + + it('should return true when /proc/version contains "microsoft" (lowercase)', () => { + Object.defineProperty(process, 'platform', {value: 'linux'}); + mockFs({ + '/proc/version': 'Linux version 5.15.90.1-microsoft-standard-WSL2', + }); + expect(WslHelper.isWsl()).to.be.true; + }); + + it('should return true when /proc/version contains "Microsoft" (mixed case)', () => { + Object.defineProperty(process, 'platform', {value: 'linux'}); + mockFs({ + '/proc/version': 'Linux version 4.4.0-19041-Microsoft', + }); + expect(WslHelper.isWsl()).to.be.true; + }); + + it('should return false when /proc/version does not contain "microsoft"', () => { + Object.defineProperty(process, 'platform', {value: 'linux'}); + mockFs({ + '/proc/version': 'Linux version 6.1.0-18-amd64 (debian-kernel@lists.debian.org)', + }); + expect(WslHelper.isWsl()).to.be.false; + }); + + it('should return false when /proc/version cannot be read', () => { + Object.defineProperty(process, 'platform', {value: 'linux'}); + // mock-fs with empty filesystem — /proc/version does not exist + mockFs({}); + expect(WslHelper.isWsl()).to.be.false; + }); + }); + + // ========================================================================= + // isDockerDesktopRunning + // ========================================================================= + describe('#isDockerDesktopRunning', () => { + afterEach(() => { + mockFs.restore(); + }); + + it('should return true when Docker Desktop WSL proxy socket exists', async () => { + mockFs({ + '/mnt/wsl/docker-desktop/docker-desktop-proxy': '', + }); + + const helper = new WslHelper({debug: noopDebug}); + const result = await helper.isDockerDesktopRunning(); + expect(result).to.be.true; + }); + + it('should return true when /var/run/docker.sock exists', async () => { + mockFs({ + '/var/run/docker.sock': '', + }); + + const helper = new WslHelper({debug: noopDebug}); + const result = await helper.isDockerDesktopRunning(); + expect(result).to.be.true; + }); + + it('should return true when both sockets exist', async () => { + mockFs({ + '/mnt/wsl/docker-desktop/docker-desktop-proxy': '', + '/var/run/docker.sock': '', + }); + + const helper = new WslHelper({debug: noopDebug}); + const result = await helper.isDockerDesktopRunning(); + expect(result).to.be.true; + }); + + it('should return false when neither socket exists', async () => { + mockFs({}); + + const helper = new WslHelper({debug: noopDebug}); + const result = await helper.isDockerDesktopRunning(); + expect(result).to.be.false; + }); + }); + + // ========================================================================= + // ensureSocketPermissions + // ========================================================================= + describe('#ensureSocketPermissions', () => { + /** @type {sinon.SinonSandbox} */ + let sandbox; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + mockFs.restore(); + }); + + it('should create parent directory recursively', async () => { + mockFs({}); + // Stub process.getuid/getgid and fs.chownSync since mock-fs doesn't support chown + sandbox.stub(process, 'getuid').returns(1000); + sandbox.stub(process, 'getgid').returns(1000); + sandbox.stub(fs, 'chownSync'); + + const helper = new WslHelper({debug: noopDebug}); + await helper.ensureSocketPermissions('/run/lando/finch.sock'); + + // Verify the directory was created + expect(fs.existsSync('/run/lando')).to.be.true; + }); + + it('should call chownSync with current uid and gid', async () => { + mockFs({'/run/lando': {}}); + sandbox.stub(process, 'getuid').returns(1000); + sandbox.stub(process, 'getgid').returns(1001); + const chownStub = sandbox.stub(fs, 'chownSync'); + + const helper = new WslHelper({debug: noopDebug}); + await helper.ensureSocketPermissions('/run/lando/finch.sock'); + + expect(chownStub.calledOnce).to.be.true; + expect(chownStub.firstCall.args[0]).to.equal('/run/lando'); + expect(chownStub.firstCall.args[1]).to.equal(1000); + expect(chownStub.firstCall.args[2]).to.equal(1001); + }); + + it('should call debug on success', async () => { + mockFs({'/run': {}}); + sandbox.stub(process, 'getuid').returns(1000); + sandbox.stub(process, 'getgid').returns(1000); + sandbox.stub(fs, 'chownSync'); + + const debugStub = sinon.stub(); + const helper = new WslHelper({debug: debugStub}); + await helper.ensureSocketPermissions('/run/lando/finch.sock'); + + expect(debugStub.calledWith( + 'ensured socket directory permissions for %s', + '/run/lando', + )).to.be.true; + }); + + it('should handle errors gracefully without throwing', async () => { + mockFs({}); + sandbox.stub(process, 'getuid').returns(1000); + sandbox.stub(process, 'getgid').returns(1000); + // mkdirSync will work but chownSync will fail + sandbox.stub(fs, 'chownSync').throws(new Error('EPERM: operation not permitted')); + + const debugStub = sinon.stub(); + const helper = new WslHelper({debug: debugStub}); + + // Should not throw + await helper.ensureSocketPermissions('/run/lando/finch.sock'); + + expect(debugStub.calledWith( + 'could not set socket directory permissions: %s', + 'EPERM: operation not permitted', + )).to.be.true; + }); + + it('should handle mkdirSync failure gracefully', async () => { + // Use a path that can't be created + sandbox.stub(fs, 'mkdirSync').throws(new Error('EACCES: permission denied')); + + const debugStub = sinon.stub(); + const helper = new WslHelper({debug: debugStub}); + + // Should not throw + await helper.ensureSocketPermissions('/root/protected/path/finch.sock'); + + expect(debugStub.calledWith( + 'could not set socket directory permissions: %s', + 'EACCES: permission denied', + )).to.be.true; + }); + }); +}); diff --git a/todo.md b/todo.md new file mode 100644 index 000000000..a3b928d67 --- /dev/null +++ b/todo.md @@ -0,0 +1,82 @@ +# Containerd Engine — Remaining Work + +Status of production-readiness tasks. Completed tasks are listed briefly for reference; remaining work is detailed. + +--- + +## Completed Tasks + +- **Task 22:** Lima setup hook for macOS `lando setup` — `hooks/lando-setup-containerd-engine-darwin.js` +- **Task 23:** Containerd config file management — `utils/get-containerd-config.js`, tests +- **Task 24:** BuildKit configuration and cache management — `utils/get-buildkit-config.js`, tests +- **Task 25:** Registry authentication — `utils/setup-containerd-auth.js` +- **Task 26:** Volume mount compatibility layer — `utils/resolve-containerd-mount.js`, `hooks/app-check-containerd-mounts.js`, tests +- **Task 27:** Networking parity — `test/containerd-networking.spec.js`, `hooks/app-add-2-landonet.js` (updated for Dockerode) +- **Task 29:** Engine selection UX — `hooks/lando-setup-engine-select.js`, `hooks/lando-doctor-containerd.js`, `docs/config/engine.md` +- **Task 31:** Performance benchmarking — `scripts/benchmark-engines.sh`, `utils/perf-timer.js`, `docs/dev/containerd-performance.md` +- **Task 28:** Proxy (Traefik) compatibility with containerd backend: + - `lib/backends/containerd/proxy-adapter.js` (new) — CNI pre-creation, finch-daemon compat checks + - `lib/backends/containerd/index.js` — added ContainerdProxyAdapter export + - `hooks/app-add-proxy-2-landonet.js` — removed containerd early return; added CNI ensurance for bridge net + - `hooks/app-start-proxy.js` — added proxy CNI network pre-creation and app proxy network ensurance for containerd + - `docs/dev/containerd-proxy-design.md` (new) — architecture documentation + - `test/containerd-proxy-adapter.spec.js` (new) — 14 tests covering proxy-adapter and hook changes + - **Verified:** finch-daemon passes all Traefik compat checks (ping, events API, label format) + - **Known caveats:** End-to-end `lando start` blocked by Docker Desktop WSL proxy (ports 80/443) and CNI dir permissions (pre-existing issues, not Task 28 specific) +- **Task 33:** CNI directory permissions — `lando setup` now sets group-writable perms on `/etc/cni/net.d/finch`; doctor checks CNI dir; fixed proxy-adapter test +- **Task 32:** Fix BRIEF violations — removed nerdctl shellouts from user-facing code: + - `hooks/lando-doctor-containerd.js` — removed nerdctl binary check, added docker-compose check + - `messages/nerdctl-not-found.js` → renamed to `containerd-binaries-not-found.js` + - `messages/nerdctl-compose-failed.js` → renamed to `compose-failed-containerd.js` + - `messages/update-nerdctl-warning.js` → renamed/rewritten as `update-containerd-warning.js` + - `hooks/app-check-containerd-compat.js` — replaced nerdctl compose shellout with docker-compose + DOCKER_HOST check + - Updated tests: `containerd-messages.spec.js`, `lando-doctor-containerd.spec.js`, `backend-manager.spec.js`, `containerd-integration.spec.js` + +--- + +## Remaining Work + +### Test coverage gaps (from "Not Started" list) +- End-to-end integration test for actual `lando start` via `docker-compose + finch-daemon` path (current integration tests use stubs) + +### Other remaining items +- macOS support (Lima VM integration exists but untested with new architecture) +- Windows non-WSL support +- Plugin compatibility verification +- Installer/packaging updates to bundle containerd stack + +--- + +## Recently Completed + +- **Task 36:** Unit tests for LimaManager, WslHelper, and smoke test script update + - `test/lima-manager.spec.js` (new) — **60 tests** covering: constructor defaults/custom options, `getSocketPath()` (path construction, custom vmName), `_parseListOutput()` (empty/null/undefined input, single NDJSON, multiple NDJSON, blank line skipping, invalid JSON tolerance, trailing newline, Buffer input), `vmExists()` (match/no-match/empty/error/correct args/multi-VM), `isRunning()` (Running/Stopped/not-exist/error/empty/all-non-Running statuses), `createVM()` (skip-if-exists, correct create args, custom resource values, error propagation), `startVM()` (skip-if-running, correct args, custom vmName, error propagation), `stopVM()` (skip-if-not-running, correct args, custom vmName, error propagation), `exec()` (args forwarding, custom vmName, return value, error propagation), `nerdctl()` (args including sudo nerdctl, custom vmName, return value, error propagation). + - `test/wsl-helper.spec.js` (new) — **19 tests** covering: constructor defaults/custom options, `isWsl()` static method (non-linux platforms, WSL1/WSL2 detection via /proc/version, native Linux detection, read failure), `isDockerDesktopRunning()` (Docker Desktop WSL proxy socket, /var/run/docker.sock, both present, neither present), `ensureSocketPermissions()` (recursive dir creation, chown with uid/gid, debug logging, EPERM error handling, mkdirSync failure handling). + - `scripts/test-containerd-engine.sh` — **Rewritten** to test the production compose path: `docker-compose + DOCKER_HOST + finch-daemon` instead of the deprecated `nerdctl compose`. Now starts finch-daemon as the Docker API bridge, uses `docker-compose` with `DOCKER_HOST=unix://` for all compose operations, verifies containers via `docker-compose ps` and Docker API, and checks finch-daemon Docker API compatibility via `_ping` endpoint. Required binaries updated from `containerd + nerdctl + buildkitd` to `containerd + buildkitd + finch-daemon + docker-compose`. + +- **Task 35:** Bug fix, test coverage, and dead code cleanup + - `hooks/lando-setup-containerd-engine-check.js` — **Bug fix:** binary check was looking in `~/.lando/bin/` for `containerd` and `buildkitd`, but they're installed to `/usr/local/lib/lando/bin/` (system binaries). Only `nerdctl` lives in `~/.lando/bin/`. Fixed to use `containerdSystemBinDir` config, matching the setup hook and backend-manager. + - `test/ensure-cni-network.spec.js` (new) — **23 tests** covering: conflist creation, duplicate detection, CNI conflist JSON structure validation, bridge plugin properties, unique nerdctlID generation, subnet allocation (empty dir, increment past existing, max across multiple, sequential allocation, exhaustion at 255), invalid JSON/non-matching subnet skip, IPAM routes, EACCES/EPERM error handling with user-friendly message, non-permission write errors, non-existent directory handling, debug logging, default/custom cniNetconfPath options. + - `test/finch-daemon-manager.spec.js` — **Extended from 18 to 34 tests** adding: `_isProcessRunning` (no PID file, invalid PID, running process, ESRCH, EPERM), `start` (early return when running, start args validation), `stop` (no PID file, invalid PID, already gone process, SIGTERM signal), `isRunning` (not running, running without socket, running with socket), `_cleanup` (removes files, handles missing files). + - `lib/backends/containerd/nerdctl-compose.js` — Marked as **@deprecated** (not used in production; `docker-compose + DOCKER_HOST` is the actual path via `BackendManager._createContainerdEngine()`). + - `utils/setup-engine-containerd.js` — Marked as **@deprecated** (superseded by `BackendManager._createContainerdEngine()`). + - `lib/backends/containerd/index.js` — Removed `NerdctlCompose` from public exports; updated JSDoc example to reflect production usage (Dockerode + finch-daemon). + - `test/containerd-integration.spec.js` — Updated to import `NerdctlCompose` directly instead of from index exports. + +- **Task 34:** Comprehensive CNI network config bridging for all compose-defined networks + - `utils/ensure-compose-cni-networks.js` (new) — Parses compose YAML files and pre-creates CNI conflist files for ALL non-external networks, not just `_default`. Resolves docker-compose-style network names (explicit `name:` property or `${project}_${networkName}` convention). Handles multiple compose files with merge semantics matching docker-compose behavior. + - `lib/backend-manager.js` — Updated containerd compose wrapper to use `ensureComposeCniNetworks()` instead of single-network `ensureCniNetwork()`. Now ensures CNI configs for the implicit `_default` network PLUS any explicitly defined networks (custom bridge networks, proxy edge networks, etc.) before docker-compose up. + - `test/ensure-compose-cni-networks.spec.js` (new) — 17 tests covering: default network handling, custom network extraction, explicit `name:` resolution, external network skipping (both `external: true` and compose v2 object syntax), multiple compose file merging, proxy network scenario, deduplication, error handling (missing files, invalid YAML), and CNI conflist content validation (unique subnet allocation). + - **Fixes the core blocker:** Previously, only `${project}_default` got a CNI conflist in the compose wrapper. Custom networks defined in compose files (e.g. `frontend`, `backend`, proxy `edge`) would fail at container start because the nerdctl OCI hook couldn't find their CNI configs. Now all compose-defined networks are covered. + +- **Task 33:** CNI directory permissions — fix EACCES blocker for user-land `ensureCniNetwork()` + - `hooks/lando-setup-containerd-engine.js` — setup task now runs `chgrp lando` + `chmod g+w` on `/etc/cni/net.d/finch` after creating it + - Systemd `ExecStartPre` updated to enforce CNI dir permissions on every service start (survives package updates, manual resets) + - `hasRun` check updated to detect missing CNI permissions (re-running `lando setup` will trigger the fix on existing installs) + - `hooks/lando-doctor-containerd.js` — added CNI directory permissions check (reports error if dir missing or not group-writable) + - `test/lando-doctor-containerd.spec.js` — added 2 tests for CNI permission check + - `test/containerd-proxy-adapter.spec.js` — fixed pre-existing test failure (added mock-fs for CNI directory in `app-add-proxy-2-landonet` hook test) +- **Task 30:** Troubleshooting documentation — `docs/troubleshooting/containerd.md` + - Covers all 10 error scenarios from message modules + - Sections: quick diagnostics, containerd/buildkitd/finch-daemon not running, binaries not found, permission denied, socket conflict, compose failures, component updates, macOS Lima issues, CNI networking, logs reference + - Updated 7 message modules (`containerd-not-running`, `containerd-socket-conflict`, `containerd-binaries-not-found`, `containerd-permission-denied`, `compose-failed-containerd`, `buildkitd-not-running`, `finch-daemon-not-running`) to link to the new troubleshooting page instead of the generic engine config page diff --git a/utils/allocate-ports.js b/utils/allocate-ports.js new file mode 100644 index 000000000..ee380211d --- /dev/null +++ b/utils/allocate-ports.js @@ -0,0 +1,90 @@ +'use strict'; + +const net = require('net'); + +/** + * Find a free port on the host. + * @param {string} [host="127.0.0.1"] - Host to bind to. + * @param {number} [startPort=32768] - Start of ephemeral range. + * @returns {Promise} A free port number. + */ +const findFreePort = (host = '127.0.0.1', startPort = 32768) => { + return new Promise((resolve, reject) => { + const server = net.createServer(); + server.listen(startPort, host, () => { + const port = server.address().port; + server.close(() => resolve(port)); + }); + server.on('error', () => { + // Port in use, try next + if (startPort < 65535) { + resolve(findFreePort(host, startPort + 1)); + } else { + reject(new Error('No free ports available')); + } + }); + }); +}; + +/** + * Rewrite port mappings in a compose-style ports array to use explicit host ports. + * + * Handles these formats: + * - "80" → "127.0.0.1:FREE:80" + * - "127.0.0.1::80" → "127.0.0.1:FREE:80" + * - "::80" → "127.0.0.1:FREE:80" + * - ":80" → "127.0.0.1:FREE:80" + * - "127.0.0.1:8080:80" → unchanged (already has host port) + * - "8080:80" → unchanged + * + * @param {Array} ports - Array of port mappings. + * @returns {Promise>} Rewritten port mappings. + */ +const allocatePorts = async ports => { + if (!ports || !Array.isArray(ports)) return ports; + + const result = []; + for (const port of ports) { + if (typeof port !== 'string') { + // Object format or number — pass through + result.push(port); + continue; + } + + // Parse the port spec + // Formats: "80", ":80", "::80", "127.0.0.1::80", "8080:80", "127.0.0.1:8080:80" + const parts = port.split(':'); + + if (parts.length === 1) { + // "80" — just container port, no host port + const freePort = await findFreePort(); + result.push(`127.0.0.1:${freePort}:${parts[0]}`); + } else if (parts.length === 2) { + if (parts[0] === '') { + // ":80" — empty host port + const freePort = await findFreePort(); + result.push(`127.0.0.1:${freePort}:${parts[1]}`); + } else { + // "8080:80" — has host port, pass through + result.push(port); + } + } else if (parts.length === 3) { + const [host, hostPort, containerPort] = parts; + if (hostPort === '') { + // "127.0.0.1::80" or "::80" — empty host port + const bindHost = host || '127.0.0.1'; + const freePort = await findFreePort(bindHost); + result.push(`${bindHost}:${freePort}:${containerPort}`); + } else { + // "127.0.0.1:8080:80" — fully specified, pass through + result.push(port); + } + } else { + // Unknown format, pass through + result.push(port); + } + } + return result; +}; + +module.exports = {findFreePort, allocatePorts}; diff --git a/utils/build-docker-exec.js b/utils/build-docker-exec.js index 651f505e2..7a26cd5a7 100644 --- a/utils/build-docker-exec.js +++ b/utils/build-docker-exec.js @@ -4,6 +4,10 @@ const _ = require('lodash'); /* * Build docker exec opts + * + * Per BRIEF: "No sudo in runtime code" and "Never shell out to nerdctl from + * user-facing code." The containerd backend uses docker-compose + finch-daemon + * via DOCKER_HOST, so dockerBin is always the docker CLI — never nerdctl. */ const getExecOpts = (docker, datum) => { const exec = [docker, 'exec']; diff --git a/utils/build-tooling-runner.js b/utils/build-tooling-runner.js index 6436548c7..c23dadbb8 100644 --- a/utils/build-tooling-runner.js +++ b/utils/build-tooling-runner.js @@ -4,7 +4,15 @@ const _ = require('lodash'); const path = require('path'); const getContainer = (app, service) => { - return app?.containers?.[service] ?? `${app.project}_${service}_1`; + const isContainerd = app?.engine?.engineBackend === 'containerd' + || app?._config?.engineConfig?.containerdMode === true + || process.env.LANDO_ENGINE === 'containerd'; + + if (app?.containers?.[service]) { + return isContainerd ? app.containers[service].replace(/_/g, '-') : app.containers[service]; + } + + return isContainerd ? `${app.project}-${service}-1` : `${app.project}_${service}_1`; }; const getContainerPath = (appRoot, appMount = undefined) => { diff --git a/utils/ensure-cni-network.js b/utils/ensure-cni-network.js new file mode 100644 index 000000000..d5f64e477 --- /dev/null +++ b/utils/ensure-cni-network.js @@ -0,0 +1,193 @@ +'use strict'; + +const fs = require('fs'); +const path = require('path'); +const crypto = require('crypto'); + +/** + * The expected CNI plugin types in the correct order. + * Used both for new conflist creation and for migrating stale configs. + * + * Plugin chain: + * - bridge: Creates Linux bridge, assigns IP via IPAM, enables MASQUERADE + * - firewall: Manages iptables FORWARD rules for container traffic + * - tuning: Allows sysctl and interface tuning on the container veth + * + * NOTE: portmap was previously included but is removed because: + * 1. The CNI portmap plugin rejects HostPort:0 (random port), which Docker + * handles via its own port allocator before container start. In the + * containerd path, nerdctl's OCI hook passes HostPort:0 directly to + * portmap, which fails with "Invalid host port number: 0". + * 2. Lando uses Traefik proxy for HTTP port routing, not CNI-level port + * publishing. Port mappings in docker-compose are handled by the proxy. + * + * NOTE: tc-redirect-tap was previously included but is NOT installed by + * `lando setup` (it's from github.com/awslabs/tc-redirect-tap, not + * the standard containernetworking/plugins release). It's only needed + * for VM-based runtimes (Kata, Firecracker), not standard runc containers. + * + * @type {string[]} + */ +const EXPECTED_PLUGIN_TYPES = ['bridge', 'firewall', 'tuning']; + +/** + * Build the standard CNI plugin array for a Lando network conflist. + * + * @param {string} bridgeName - The Linux bridge device name (e.g. 'br-abcdef012345'). + * @param {number} subnet - The third octet for the 10.4.x.0/24 subnet. + * @returns {Object[]} Array of CNI plugin config objects. + */ +const buildPlugins = (bridgeName, subnet) => [ + { + type: 'bridge', + bridge: bridgeName, + isGateway: true, + ipMasq: true, + hairpinMode: true, + ipam: { + ranges: [[{gateway: `10.4.${subnet}.1`, subnet: `10.4.${subnet}.0/24`}]], + routes: [{dst: '0.0.0.0/0'}], + type: 'host-local', + }, + }, + { + type: 'firewall', + }, + { + type: 'tuning', + }, +]; + +/** + * Check whether an existing conflist has the expected plugin chain. + * Returns false if the conflist uses the old plugin chain (e.g. with + * tc-redirect-tap) or is missing expected plugins (e.g. portmap, tuning). + * + * @param {Object} conflist - Parsed conflist JSON. + * @returns {boolean} true if the plugin chain matches EXPECTED_PLUGIN_TYPES. + */ +const hasExpectedPlugins = (conflist) => { + if (!conflist || !Array.isArray(conflist.plugins)) return false; + const types = conflist.plugins.map(p => p.type); + if (types.length !== EXPECTED_PLUGIN_TYPES.length) return false; + return EXPECTED_PLUGIN_TYPES.every((t, i) => types[i] === t); +}; + +/** + * Ensure a CNI network conflist exists for a given network name. + * + * When using docker-compose via finch-daemon, networks are created at the + * Docker API level but NOT at the CNI level. The nerdctl OCI hook needs + * CNI configs to set up container networking. This utility creates the + * conflist file if it doesn't already exist. + * + * If a conflist already exists but uses a stale plugin chain (e.g. the old + * tc-redirect-tap chain), it is rewritten in-place with the correct plugins + * while preserving the subnet, bridge name, and nerdctlID. + * + * @param {string} networkName - The network name (e.g. 'containerdtest_default'). + * @param {Object} [opts={}] - Options. + * @param {string} [opts.cniNetconfPath='/etc/lando/cni/finch'] - CNI config directory. + * @param {Function} [opts.debug] - Debug logging function. + * @returns {boolean} true if a conflist was created or updated, false if it already existed and was up-to-date. + */ +module.exports = (networkName, opts = {}) => { + const cniNetconfPath = opts.cniNetconfPath || '/etc/lando/cni/finch'; + const debug = opts.debug || (() => {}); + const conflistPath = path.join(cniNetconfPath, `nerdctl-${networkName}.conflist`); + + // If the conflist exists, check if it needs migration + if (fs.existsSync(conflistPath)) { + try { + const existing = JSON.parse(fs.readFileSync(conflistPath, 'utf8')); + if (hasExpectedPlugins(existing)) { + debug('CNI conflist already exists and is up-to-date for network %s', networkName); + return false; + } + + // Stale conflist — migrate in-place preserving subnet/bridge/nerdctlID + debug('CNI conflist for network %s has stale plugin chain, migrating', networkName); + const bridgePlugin = (existing.plugins || []).find(p => p.type === 'bridge'); + const bridgeName = bridgePlugin ? bridgePlugin.bridge : `br-${(existing.nerdctlID || crypto.randomBytes(32).toString('hex')).slice(0, 12)}`; + const ipamRanges = bridgePlugin && bridgePlugin.ipam && bridgePlugin.ipam.ranges; + const subnetMatch = ipamRanges && ipamRanges[0] && ipamRanges[0][0] && (ipamRanges[0][0].subnet || '').match(/^10\.4\.(\d+)\.0\/24$/); + const existingSubnet = subnetMatch ? parseInt(subnetMatch[1], 10) : 1; + + const updated = { + ...existing, + plugins: buildPlugins(bridgeName, existingSubnet), + }; + + const tmpPath = `${conflistPath}.${process.pid}.tmp`; + fs.writeFileSync(tmpPath, JSON.stringify(updated, null, 2), 'utf8'); + fs.renameSync(tmpPath, conflistPath); + debug('migrated CNI conflist for network %s (preserved subnet 10.4.%d.0/24)', networkName, existingSubnet); + return true; + } catch (err) { + // If we can't read/parse the existing file, fall through to re-create + debug('failed to read existing CNI conflist for network %s: %s', networkName, err.message); + } + } + + // Find the next available subnet by scanning existing configs + let maxSubnet = 0; + try { + const files = fs.readdirSync(cniNetconfPath).filter(f => f.endsWith('.conflist')); + for (const file of files) { + try { + const content = JSON.parse(fs.readFileSync(path.join(cniNetconfPath, file), 'utf8')); + const plugins = content.plugins || []; + for (const plugin of plugins) { + const ranges = (plugin.ipam && plugin.ipam.ranges) || []; + for (const range of ranges) { + for (const r of range) { + const match = (r.subnet || '').match(/^10\.4\.(\d+)\.0\/24$/); + if (match) maxSubnet = Math.max(maxSubnet, parseInt(match[1], 10)); + } + } + } + } catch { /* skip invalid configs */ } + } + } catch { /* directory doesn't exist or can't be read */ } + + const subnet = maxSubnet + 1; + if (subnet > 255) { + debug('no available subnets in 10.4.0.0/16 range for network %s', networkName); + return false; + } + + const nerdctlID = crypto.randomBytes(32).toString('hex'); + const bridgeName = `br-${nerdctlID.slice(0, 12)}`; + + const conflist = { + cniVersion: '1.0.0', + name: networkName, + nerdctlID, + nerdctlLabels: {}, + plugins: buildPlugins(bridgeName, subnet), + }; + + // Write atomically via temp file + rename to prevent concurrent processes + // from reading a partially-written conflist + const tmpPath = `${conflistPath}.${process.pid}.tmp`; + try { + fs.writeFileSync(tmpPath, JSON.stringify(conflist, null, 2), 'utf8'); + fs.renameSync(tmpPath, conflistPath); + debug('created CNI conflist for network %s at %s (subnet 10.4.%d.0/24)', networkName, conflistPath, subnet); + return true; + } catch (err) { + // Clean up temp file on failure + try { fs.unlinkSync(tmpPath); } catch {} + + // Permission errors must surface to the user — silent failure here leads + // to cryptic container networking errors downstream + if (err.code === 'EACCES' || err.code === 'EPERM') { + throw new Error( + `Permission denied writing CNI config for network "${networkName}" at ${conflistPath}. ` + + 'Run "lando setup" to fix CNI directory permissions.', + ); + } + debug('failed to create CNI conflist for network %s: %s', networkName, err.message); + return false; + } +}; diff --git a/utils/ensure-compose-cni-networks.js b/utils/ensure-compose-cni-networks.js new file mode 100644 index 000000000..0d2407a2f --- /dev/null +++ b/utils/ensure-compose-cni-networks.js @@ -0,0 +1,87 @@ +'use strict'; + +const fs = require('fs'); +const yaml = require('js-yaml'); + +const ensureCniNetwork = require('./ensure-cni-network'); + +/** + * Ensure CNI network conflist files exist for ALL networks that docker-compose + * will create from a set of compose files. + * + * When using docker-compose via finch-daemon, networks are created at the + * Docker API level but NOT at the CNI level. The nerdctl OCI runtime hook + * needs CNI conflist files for container networking to work. This utility + * parses the compose YAML files, resolves docker-compose-style network names, + * and pre-creates CNI configs for each one. + * + * This covers the gap where only `${project}_default` was handled previously. + * Custom networks defined in compose files (e.g. `edge`, `backend`, etc.) + * now get CNI configs too. + * + * @param {string[]} composeFiles - Array of paths to compose YAML files. + * @param {string} project - The compose project name (e.g. 'myapp'). + * @param {Object} [opts={}] - Options. + * @param {string} [opts.cniNetconfPath] - CNI config directory (passed to ensureCniNetwork). + * @param {Function} [opts.debug] - Debug logging function. + * @returns {string[]} Array of network names for which CNI configs were ensured. + */ +module.exports = (composeFiles, project, opts = {}) => { + const debug = opts.debug || (() => {}); + const ensuredNetworks = []; + + // Always ensure the implicit _default network — docker-compose creates + // this even when no networks are explicitly defined in compose files. + const defaultNet = `${project}_default`; + ensureCniNetwork(defaultNet, opts); + ensuredNetworks.push(defaultNet); + + // Collect network definitions from all compose files. + // docker-compose merges networks across multiple files, so we do the same. + /** @type {Object} */ + const allNetworks = {}; + + for (const file of composeFiles) { + try { + const content = fs.readFileSync(file, 'utf8'); + const doc = yaml.load(content); + if (doc && doc.networks && typeof doc.networks === 'object') { + for (const [name, config] of Object.entries(doc.networks)) { + // Later files override earlier ones (same as docker-compose merge) + allNetworks[name] = config || {}; + } + } + } catch (err) { + // Log but don't fail — missing or invalid compose files will be caught + // by docker-compose itself with a better error message. + debug('failed to parse compose file %s for CNI network extraction: %s', file, err.message); + } + } + + // Ensure CNI configs for each non-external network + for (const [name, config] of Object.entries(allNetworks)) { + // External networks are managed elsewhere — not created by docker-compose. + // docker-compose treats any truthy `external` value as external (boolean or object with name). + if (config.external) { + debug('skipping external network %s for CNI config', name); + continue; + } + + // Resolve the actual network name docker-compose will create: + // - If the network has an explicit `name:` property, docker-compose uses it as-is + // - Otherwise, docker-compose prefixes with `${project}_` + const resolvedName = config.name || `${project}_${name}`; + + // Skip if already ensured (e.g. if the default network is also explicitly defined) + if (ensuredNetworks.includes(resolvedName)) { + continue; + } + + debug('ensuring CNI config for compose network %s (resolved: %s)', name, resolvedName); + ensureCniNetwork(resolvedName, opts); + ensuredNetworks.push(resolvedName); + } + + debug('ensured CNI configs for %d networks: %s', ensuredNetworks.length, ensuredNetworks.join(', ')); + return ensuredNetworks; +}; diff --git a/utils/get-buildkit-config.js b/utils/get-buildkit-config.js new file mode 100644 index 000000000..da9e2a51e --- /dev/null +++ b/utils/get-buildkit-config.js @@ -0,0 +1,73 @@ +'use strict'; + +/** + * Generate a BuildKit TOML configuration string. + * + * This is the config generator for the buildkitd daemon that Lando manages + * alongside containerd. It produces a config that uses the containerd worker + * (not OCI), sets up garbage collection policies, and optionally configures + * registry mirrors. + * + * @param {Object} [opts={}] - Configuration options. + * @param {string} [opts.containerdSocket="/run/lando/containerd.sock"] - containerd gRPC socket address. + * @param {string} [opts.buildkitSocket] - buildkitd gRPC listen address (unix socket path). If provided, a [grpc] section is added. + * @param {string} [opts.cacheDir="/var/lib/lando/buildkit"] - BuildKit cache directory. + * @param {number} [opts.gcMaxBytes=10737418240] - Max bytes for GC policy (default 10GB). + * @param {number} [opts.parallelism] - Max parallelism for builds (default: CPU count). + * @param {boolean} [opts.debug=false] - Enable debug-level logging. + * @param {Object} [opts.registryMirrors={}] - Registry mirror map, e.g. {"docker.io": "https://mirror.example.com"}. + * @returns {string} TOML configuration content. + * + * @since 4.0.0 + * @example + * const getBuildkitConfig = require('../utils/get-buildkit-config'); + * const config = getBuildkitConfig({containerdSocket: '/run/lando/containerd.sock'}); + * fs.writeFileSync('/path/to/buildkit-config.toml', config, 'utf8'); + */ +module.exports = (opts = {}) => { + const os = require('os'); + const containerdSocket = opts.containerdSocket || '/run/lando/containerd.sock'; + const buildkitSocket = opts.buildkitSocket || null; + const cacheDir = opts.cacheDir || '/var/lib/lando/buildkit'; + const gcMaxBytes = opts.gcMaxBytes || 10 * 1024 * 1024 * 1024; // 10GB default + const parallelism = opts.parallelism || Math.max(1, os.cpus().length); + const debug = opts.debug || false; + const registryMirrors = opts.registryMirrors || {}; // { "docker.io": "https://mirror.example.com" } + + const lines = [ + '# Lando BuildKit configuration', + '# Auto-generated — do not edit manually', + '', + debug ? 'debug = true' : undefined, + debug ? '' : undefined, + // gRPC listen address (buildkitd socket) + buildkitSocket ? '[grpc]' : undefined, + buildkitSocket ? ` address = ["unix://${buildkitSocket}"]` : undefined, + buildkitSocket ? '' : undefined, + '# Use containerd worker, disable OCI worker', + '[worker.oci]', + ' enabled = false', + '', + '[worker.containerd]', + ' enabled = true', + ` address = "${containerdSocket}"`, + ' platforms = ["linux/amd64", "linux/arm64"]', + ` max-parallelism = ${parallelism}`, + '', + ' # Garbage collection policy', + ' [[worker.containerd.gcpolicy]]', + ` reservedSpace = ${gcMaxBytes}`, + ' keepDuration = 604800', + ' all = true', + '', + ]; + + // Add registry mirrors if configured + for (const [registry, mirror] of Object.entries(registryMirrors)) { + lines.push(`[registry."${registry}"]`); + lines.push(` mirrors = ["${mirror}"]`); + lines.push(''); + } + + return lines.filter(l => l !== undefined).join('\n'); +}; diff --git a/utils/get-buildkit-x.js b/utils/get-buildkit-x.js new file mode 100644 index 000000000..ee091450c --- /dev/null +++ b/utils/get-buildkit-x.js @@ -0,0 +1,50 @@ +'use strict'; + +const _ = require('lodash'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const shell = require('shelljs'); + +/** + * Locate the buildkitd binary. + * + * Resolution order: + * 1. Config override (config.buildkitdBin) + * 2. ~/.lando/bin/buildkitd + * 3. PATH lookup (which buildkitd) + * 4. false (not found) + * + * Follows the same pattern as get-docker-x.js. + * + * @param {Object} [opts={}] - Options. + * @param {string} [opts.buildkitdBin] - Explicit binary path override. + * @param {string} [opts.userConfRoot] - Lando config root (default ~/.lando). + * @returns {string|false} Absolute path to the buildkitd binary, or false. + */ +module.exports = ({buildkitdBin, userConfRoot = path.join(os.homedir(), '.lando')} = {}) => { + const bin = 'buildkitd'; + const join = (process.platform === 'win32') ? path.win32.join : path.posix.join; + + // 1. Config override + if (buildkitdBin && fs.existsSync(buildkitdBin)) { + return path.normalize(buildkitdBin); + } + + // 2. ~/.lando/bin/buildkitd + const landoBin = join(userConfRoot, 'bin', bin); + if (fs.existsSync(landoBin) && !fs.statSync(landoBin).isDirectory()) { + return path.normalize(landoBin); + } + + // 3. PATH lookup + if (process.platform !== 'win32') { + const whichBin = _.toString(shell.which(bin)); + if (whichBin && fs.existsSync(whichBin)) { + return path.normalize(whichBin); + } + } + + // 4. Not found + return false; +}; diff --git a/utils/get-config-defaults.js b/utils/get-config-defaults.js index ead6181e8..7d55fbf72 100644 --- a/utils/get-config-defaults.js +++ b/utils/get-config-defaults.js @@ -27,6 +27,28 @@ const defaultConfig = options => ({ disablePlugins: [], dockerBin: require('../utils/get-docker-x')(), dockerBinDir: require('../utils/get-docker-bin-path')(), + // Engine selection: 'auto' | 'docker' | 'containerd' + engine: 'auto', + // Containerd system-level binary directory for root-owned binaries + // (containerd, containerd-shim-runc-v2, runc, buildkitd, buildctl) + containerdSystemBinDir: '/usr/local/lib/lando/bin', + // Containerd binary path overrides (null = use defaults) + // containerdBin defaults to containerdSystemBinDir/containerd + // nerdctlBin defaults to ~/.lando/bin/nerdctl (user-owned, only talks to socket) + // buildkitdBin defaults to containerdSystemBinDir/buildkitd + containerdBin: null, + nerdctlBin: null, + buildkitdBin: null, + // Containerd socket path override (null = use default at /run/lando/containerd.sock) + containerdSocket: null, + // Finch daemon binary path override (null = use default at ~/.lando/bin/finch-daemon) + finchDaemonBin: null, + // Finch daemon socket path override (null = use default at /run/lando/finch.sock) + finchDaemonSocket: null, + // Registry auth config path override (null = use default ~/.docker/config.json) + registryAuth: null, + // BuildKit build cache max size (human-readable string for config display) + buildkitCacheMax: '10GB', env: process.env, home: os.homedir(), isArmed: _.includes(['arm64', 'aarch64'], process.arch), diff --git a/utils/get-containerd-config.js b/utils/get-containerd-config.js new file mode 100644 index 000000000..cf2a70ab6 --- /dev/null +++ b/utils/get-containerd-config.js @@ -0,0 +1,82 @@ +'use strict'; + +/** + * Generate a containerd TOML configuration string. + * + * This is the shared config generator used by the containerd daemon manager + * on all platforms (Linux, WSL, macOS/Lima). It produces a minimal config + * that isolates Lando's containerd instance from any other container runtime + * on the host. + * + * @param {Object} [opts={}] - Configuration options. + * @param {string} [opts.socketPath="/run/lando/containerd.sock"] - containerd gRPC socket address. + * @param {string} [opts.stateDir="/run/lando/containerd"] - containerd state directory (ephemeral, under RuntimeDirectory). + * @param {string} [opts.rootDir="/var/lib/lando/containerd/root"] - containerd root directory. + * @param {boolean} [opts.debug=false] - Enable debug-level logging. + * @param {string} [opts.snapshotter="overlayfs"] - Snapshotter plugin name. + * @param {boolean} [opts.disableCri=true] - Disable the CRI plugin (saves resources). + * @param {string} [opts.platform] - Override platform detection (for testing). + * @returns {string} TOML configuration content. + * + * @since 4.0.0 + * @example + * const getContainerdConfig = require('../utils/get-containerd-config'); + * const config = getContainerdConfig({socketPath: '/run/lando/containerd.sock'}); + * fs.writeFileSync('/path/to/config.toml', config, 'utf8'); + */ +module.exports = (opts = {}) => { + const socketPath = opts.socketPath || '/run/lando/containerd.sock'; + // State directory MUST be under /run/ (tmpfs) so shim bundles are cleaned up on + // reboot. The hardcoded shim socket dir (/run/containerd/s/) is a compile-time + // constant in containerd — hashes are unique per containerd instance, so sharing + // the directory is safe. Using a Lando-specific state dir avoids stale-bundle + // problems that cause "get state: context deadline exceeded" errors after restarts. + const stateDir = opts.stateDir || '/run/lando/containerd'; + const rootDir = opts.rootDir || '/var/lib/lando/containerd/root'; + const debug = opts.debug || false; + const snapshotter = opts.snapshotter || 'overlayfs'; + const disableCri = opts.disableCri !== false; // default true + const platform = opts.platform || process.platform; + + // Top-level keys MUST come before any [section] in TOML + const lines = [ + '# Lando containerd configuration', + '# Auto-generated — do not edit manually', + 'version = 3', + `root = "${rootDir}"`, + `state = "${stateDir}"`, + ]; + + // Disable CRI plugin (not needed for Lando — saves resources) + if (disableCri) { + lines.push('disabled_plugins = ["io.containerd.grpc.v1.cri"]'); + } + + lines.push(''); + + // Sections + lines.push('[grpc]'); + lines.push(` address = "${socketPath}"`); + lines.push(''); + + // ttrpc socket must also be redirected to avoid /run/containerd permission errors + const ttrpcSocket = socketPath.replace(/containerd\.sock$/, 'containerd-ttrpc.sock'); + lines.push('[ttrpc]'); + lines.push(` address = "${ttrpcSocket}"`); + lines.push(''); + + // Debug logging + if (debug) { + lines.push('[debug]'); + lines.push(' level = "debug"'); + lines.push(''); + } + + // Snapshotter config + lines.push('[plugins]'); + lines.push(` [plugins."io.containerd.snapshotter.v1.${snapshotter}"]`); + lines.push(` root_path = "${rootDir}/snapshots"`); + lines.push(''); + + return lines.join('\n'); +}; diff --git a/utils/get-containerd-download-url.js b/utils/get-containerd-download-url.js new file mode 100644 index 000000000..9c92b6fc3 --- /dev/null +++ b/utils/get-containerd-download-url.js @@ -0,0 +1,71 @@ +'use strict'; + +/** + * Return the GitHub release download URL for containerd-stack binaries. + * + * Supports three binaries — `containerd`, `nerdctl`, and `buildkit` (buildkitd) + * — across linux/{amd64,arm64} and darwin/{amd64,arm64}. + * + * Each release tarball extracts into a `bin/` directory containing the + * executable(s). The caller is responsible for extracting and placing + * the binary. + * + * Default versions are intentionally conservative and match the 2.0.x / 0.18.x + * series referenced in the containerd-engine design. + * + * @param {string} binary - One of 'containerd', 'nerdctl', or 'buildkit'. + * @param {Object} [opts={}] - Options. + * @param {string} [opts.version] - Semver version (no leading "v"). + * @param {string} [opts.platform] - 'linux' or 'darwin' (default: process.platform). + * @param {string} [opts.arch] - 'amd64' or 'arm64' (default: auto-detected). + * @returns {string} The full download URL. + * @throws {Error} If an unsupported binary, platform, or arch is given. + */ +module.exports = (binary, {version, platform, arch} = {}) => { + // Normalise platform + platform = platform || process.platform; + if (platform === 'win32') platform = 'windows'; + + // Normalise arch from Node conventions to Go conventions + arch = arch || (process.arch === 'x64' ? 'amd64' : process.arch); + + // Validate platform + arch + const supported = ['linux-amd64', 'linux-arm64', 'darwin-amd64', 'darwin-arm64']; + const key = `${platform}-${arch}`; + if (!supported.includes(key)) { + throw new Error(`Unsupported platform/arch combination: ${key}`); + } + + switch (binary) { + // containerd releases: + // https://github.com/containerd/containerd/releases/download/v{V}/containerd-{V}-{OS}-{ARCH}.tar.gz + // tarball contains: bin/containerd, bin/containerd-shim*, bin/ctr + case 'containerd': { + const v = version || '2.0.4'; + // Note: containerd does not ship darwin binaries on GitHub — macOS users + // would use Lima or Homebrew. We still return the URL for consistency; + // the download step will surface the 404 in a human-friendly way. + return `https://github.com/containerd/containerd/releases/download/v${v}/containerd-${v}-${platform}-${arch}.tar.gz`; + } + + // nerdctl releases: + // https://github.com/containerd/nerdctl/releases/download/v{V}/nerdctl-{V}-{OS}-{ARCH}.tar.gz + // tarball contains: nerdctl + case 'nerdctl': { + const v = version || '2.0.5'; + return `https://github.com/containerd/nerdctl/releases/download/v${v}/nerdctl-${v}-${platform}-${arch}.tar.gz`; + } + + // buildkit releases: + // https://github.com/moby/buildkit/releases/download/v{V}/buildkit-v{V}.{OS}-{ARCH}.tar.gz + // Note: uses a dot (.) between version and OS, not a dash (-) + // tarball contains: bin/buildkitd, bin/buildctl + case 'buildkit': { + const v = version || '0.18.2'; + return `https://github.com/moby/buildkit/releases/download/v${v}/buildkit-v${v}.${platform}-${arch}.tar.gz`; + } + + default: + throw new Error(`Unknown binary "${binary}". Expected one of: containerd, nerdctl, buildkit`); + } +}; diff --git a/utils/get-containerd-paths.js b/utils/get-containerd-paths.js new file mode 100644 index 000000000..1008a26f7 --- /dev/null +++ b/utils/get-containerd-paths.js @@ -0,0 +1,20 @@ +'use strict'; + +const os = require('os'); +const path = require('path'); + +module.exports = (config = {}) => { + const userConfRoot = config.userConfRoot || path.join(os.homedir(), '.lando'); + const socketDir = config.containerdSocketDir || '/run/lando'; + + return { + userConfRoot, + configDir: path.join(userConfRoot, 'config'), + runDir: path.join(userConfRoot, 'run'), + socketDir, + containerdSocket: config.containerdSocket || path.join(socketDir, 'containerd.sock'), + buildkitSocket: config.buildkitSocket || path.join(socketDir, 'buildkitd.sock'), + finchSocket: config.finchDaemonSocket || config.finchSocket || path.join(socketDir, 'finch.sock'), + finchCredentialSocket: config.finchCredentialSocket || path.join(socketDir, 'finch-credential.sock'), + }; +}; diff --git a/utils/get-containerd-x.js b/utils/get-containerd-x.js new file mode 100644 index 000000000..d58efb3d1 --- /dev/null +++ b/utils/get-containerd-x.js @@ -0,0 +1,50 @@ +'use strict'; + +const _ = require('lodash'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const shell = require('shelljs'); + +/** + * Locate the containerd binary. + * + * Resolution order: + * 1. Config override (config.containerdBin) + * 2. ~/.lando/bin/containerd + * 3. PATH lookup (which containerd) + * 4. false (not found) + * + * Follows the same pattern as get-docker-x.js. + * + * @param {Object} [opts={}] - Options. + * @param {string} [opts.containerdBin] - Explicit binary path override. + * @param {string} [opts.userConfRoot] - Lando config root (default ~/.lando). + * @returns {string|false} Absolute path to the containerd binary, or false. + */ +module.exports = ({containerdBin, userConfRoot = path.join(os.homedir(), '.lando')} = {}) => { + const bin = 'containerd'; + const join = (process.platform === 'win32') ? path.win32.join : path.posix.join; + + // 1. Config override + if (containerdBin && fs.existsSync(containerdBin)) { + return path.normalize(containerdBin); + } + + // 2. ~/.lando/bin/containerd + const landoBin = join(userConfRoot, 'bin', bin); + if (fs.existsSync(landoBin) && !fs.statSync(landoBin).isDirectory()) { + return path.normalize(landoBin); + } + + // 3. PATH lookup + if (process.platform !== 'win32') { + const whichBin = _.toString(shell.which(bin)); + if (whichBin && fs.existsSync(whichBin)) { + return path.normalize(whichBin); + } + } + + // 4. Not found + return false; +}; diff --git a/utils/get-finch-daemon-download-url.js b/utils/get-finch-daemon-download-url.js new file mode 100644 index 000000000..975eb232f --- /dev/null +++ b/utils/get-finch-daemon-download-url.js @@ -0,0 +1,46 @@ +'use strict'; + +/** + * Return the GitHub release download URL for the finch-daemon binary. + * + * finch-daemon provides a Docker-compatible API socket backed by containerd, + * allowing Traefik (and other Docker-API consumers) to work unchanged with + * the containerd engine. + * + * Currently finch-daemon only publishes Linux/amd64 release assets on GitHub. + * Darwin and arm64 are accepted for forward-compatibility but the caller + * should be aware that those assets may not exist upstream yet. + * + * Release asset naming: + * `finch-daemon-{version}-{platform}-{arch}.tar.gz` + * + * Default version is intentionally conservative and matches the latest stable + * release at the time of implementation. + * + * @param {Object} [opts={}] - Options. + * @param {string} [opts.version] - Semver version (no leading "v"). + * @param {string} [opts.platform] - 'linux' or 'darwin' (default: process.platform). + * @param {string} [opts.arch] - 'amd64' or 'arm64' (default: auto-detected). + * @returns {string} The full download URL. + * @throws {Error} If an unsupported platform or arch is given. + */ +module.exports = ({version, platform, arch} = {}) => { + const v = version || '0.22.0'; + + // Normalise platform + platform = platform || process.platform; + if (platform === 'win32') platform = 'windows'; + + // Normalise arch from Node conventions to Go conventions + arch = arch || (process.arch === 'x64' ? 'amd64' : process.arch); + + // Validate platform + arch + const supported = ['linux-amd64', 'linux-arm64', 'darwin-amd64', 'darwin-arm64']; + const key = `${platform}-${arch}`; + if (!supported.includes(key)) { + throw new Error(`Unsupported platform/arch combination: ${key}`); + } + + // https://github.com/runfinch/finch-daemon/releases/download/v{V}/finch-daemon-{V}-{OS}-{ARCH}.tar.gz + return `https://github.com/runfinch/finch-daemon/releases/download/v${v}/finch-daemon-${v}-${key}.tar.gz`; +}; diff --git a/utils/get-nerdctl-config.js b/utils/get-nerdctl-config.js new file mode 100644 index 000000000..c9919ee10 --- /dev/null +++ b/utils/get-nerdctl-config.js @@ -0,0 +1,22 @@ +'use strict'; + +const path = require('path'); + +module.exports = (opts = {}) => { + const address = opts.containerdSocket || '/run/lando/containerd.sock'; + const namespace = opts.namespace || 'default'; + const cniNetconfPath = opts.cniNetconfPath || '/etc/lando/cni/finch'; + const finchCniRoot = opts.finchCniRoot + || (path.basename(cniNetconfPath) === 'finch' ? path.dirname(cniNetconfPath) : cniNetconfPath); + const cniPath = opts.cniPath || '/usr/local/lib/lando/cni/bin'; + + return [ + '# Lando containerd client configuration', + '# Auto-generated - do not edit manually', + `address = "${address}"`, + `namespace = "${namespace}"`, + `cni_netconfpath = "${finchCniRoot}"`, + `cni_path = "${cniPath}"`, + '', + ].join('\n'); +}; diff --git a/utils/get-nerdctl-x.js b/utils/get-nerdctl-x.js new file mode 100644 index 000000000..1414c0054 --- /dev/null +++ b/utils/get-nerdctl-x.js @@ -0,0 +1,50 @@ +'use strict'; + +const _ = require('lodash'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const shell = require('shelljs'); + +/** + * Locate the nerdctl binary. + * + * Resolution order: + * 1. Config override (config.nerdctlBin) + * 2. ~/.lando/bin/nerdctl + * 3. PATH lookup (which nerdctl) + * 4. false (not found) + * + * Follows the same pattern as get-docker-x.js. + * + * @param {Object} [opts={}] - Options. + * @param {string} [opts.nerdctlBin] - Explicit binary path override. + * @param {string} [opts.userConfRoot] - Lando config root (default ~/.lando). + * @returns {string|false} Absolute path to the nerdctl binary, or false. + */ +module.exports = ({nerdctlBin, userConfRoot = path.join(os.homedir(), '.lando')} = {}) => { + const bin = 'nerdctl'; + const join = (process.platform === 'win32') ? path.win32.join : path.posix.join; + + // 1. Config override + if (nerdctlBin && fs.existsSync(nerdctlBin)) { + return path.normalize(nerdctlBin); + } + + // 2. ~/.lando/bin/nerdctl + const landoBin = join(userConfRoot, 'bin', bin); + if (fs.existsSync(landoBin) && !fs.statSync(landoBin).isDirectory()) { + return path.normalize(landoBin); + } + + // 3. PATH lookup + if (process.platform !== 'win32') { + const whichBin = _.toString(shell.which(bin)); + if (whichBin && fs.existsSync(whichBin)) { + return path.normalize(whichBin); + } + } + + // 4. Not found + return false; +}; diff --git a/utils/get-setup-engine.js b/utils/get-setup-engine.js new file mode 100644 index 000000000..8f991696b --- /dev/null +++ b/utils/get-setup-engine.js @@ -0,0 +1,24 @@ +'use strict'; + +const fs = require('fs'); +const os = require('os'); +const path = require('path'); + +module.exports = (lando, options = {}) => { + const requested = options.engine || lando.config.engine || 'auto'; + if (requested === 'docker' || requested === 'containerd') return requested; + + const cached = lando.cache.get('engine-selection'); + if (cached === 'docker' || cached === 'containerd') return cached; + + const dockerBin = lando.config.dockerBin || require('./get-docker-x')(); + if (dockerBin && fs.existsSync(dockerBin)) return 'docker'; + + const userConfRoot = lando.config.userConfRoot || path.join(os.homedir(), '.lando'); + const systemBinDir = lando.config.containerdSystemBinDir || '/usr/local/lib/lando/bin'; + const containerdBin = lando.config.containerdBin || path.join(systemBinDir, 'containerd'); + const nerdctlBin = lando.config.nerdctlBin || path.join(userConfRoot, 'bin', 'nerdctl'); + if (fs.existsSync(containerdBin) || fs.existsSync(nerdctlBin)) return 'containerd'; + + return 'docker'; +}; diff --git a/utils/get-sudo-command.js b/utils/get-sudo-command.js new file mode 100644 index 000000000..c157daa92 --- /dev/null +++ b/utils/get-sudo-command.js @@ -0,0 +1,12 @@ +'use strict'; + +/** + * Get the sudo command prefix for running a command with elevated privileges. + * + * Uses `sudo -n` (non-interactive) which requires passwordless sudo to be + * configured for the current user (e.g. via NOPASSWD in sudoers). + * + * @param {...string} cmd - Command and arguments to prefix with sudo. + * @return {string[]} The command array prefixed with sudo -n. + */ +module.exports = (...cmd) => ['sudo', '-n', ...cmd]; diff --git a/utils/perf-timer.js b/utils/perf-timer.js new file mode 100644 index 000000000..5c9878d7c --- /dev/null +++ b/utils/perf-timer.js @@ -0,0 +1,32 @@ +'use strict'; + +/** + * Create a performance timer. + * + * Returns an object with a `label` property and a `stop()` method that + * returns the elapsed time in milliseconds (fractional) since the timer + * was created. Uses `process.hrtime.bigint()` for nanosecond precision. + * + * @param {string} label - Human-readable label for the timer. + * @returns {{stop: function(): number, label: string}} + * @since 4.0.0 + * + * @example + * const perfTimer = require('../utils/perf-timer'); + * const timer = perfTimer('container start'); + * // ... do work ... + * const ms = timer.stop(); + * console.log(`${timer.label}: ${ms}ms`); + */ +const perfTimer = label => { + const start = process.hrtime.bigint(); + return { + label, + stop: () => { + const end = process.hrtime.bigint(); + return Number(end - start) / 1e6; // nanoseconds → milliseconds + }, + }; +}; + +module.exports = perfTimer; diff --git a/utils/remove-compose-cni-conflists.js b/utils/remove-compose-cni-conflists.js new file mode 100644 index 000000000..a365b6827 --- /dev/null +++ b/utils/remove-compose-cni-conflists.js @@ -0,0 +1,63 @@ +'use strict'; + +const fs = require('fs'); +const path = require('path'); +const yaml = require('js-yaml'); + +/** + * Remove CNI conflist files for a project's networks. + * + * This clears conflist files so finch-daemon doesn't auto-report Docker API + * networks (without compose labels) when `listNetworks` is called. After + * removal, docker-compose can create networks fresh with proper compose labels, + * and finch-daemon will write new conflist files for them. + * + * @param {string[]} composeFiles - Array of paths to compose YAML files. + * @param {string} project - The compose project name. + * @param {Object} [opts={}] - Options. + * @param {string} [opts.cniNetconfPath='/etc/lando/cni/finch'] - CNI config directory. + * @param {Function} [opts.debug] - Debug logging function. + * @return {string[]} Array of removed conflist file paths. + */ +module.exports = (composeFiles, project, opts = {}) => { + const cniNetconfPath = opts.cniNetconfPath || '/etc/lando/cni/finch'; + const debug = opts.debug || (() => {}); + const removed = []; + + // Collect all network names this project uses + const networkNames = new Set(); + networkNames.add(`${project}_default`); + + for (const file of composeFiles) { + try { + const content = fs.readFileSync(file, 'utf8'); + const doc = yaml.load(content); + if (doc && doc.networks && typeof doc.networks === 'object') { + for (const [name, config] of Object.entries(doc.networks)) { + const cfg = config || {}; + if (cfg.external) continue; + const resolvedName = cfg.name || `${project}_${name}`; + networkNames.add(resolvedName); + } + } + } catch (err) { + debug('failed to parse compose file %s for CNI conflist removal: %s', file, err.message); + } + } + + // Remove conflist files for each network + for (const name of networkNames) { + const conflistPath = path.join(cniNetconfPath, `nerdctl-${name}.conflist`); + try { + if (fs.existsSync(conflistPath)) { + fs.unlinkSync(conflistPath); + removed.push(conflistPath); + debug('removed CNI conflist %s', conflistPath); + } + } catch (err) { + debug('failed to remove CNI conflist %s: %s', conflistPath, err.message); + } + } + + return removed; +}; diff --git a/utils/remove-stale-compose-networks.js b/utils/remove-stale-compose-networks.js new file mode 100644 index 000000000..da2b7b94f --- /dev/null +++ b/utils/remove-stale-compose-networks.js @@ -0,0 +1,74 @@ +'use strict'; + +/** + * Remove project networks that exist without docker-compose labels. + * + * **Why this is needed**: finch-daemon does not persist Docker API network + * labels (including `com.docker.compose.*`) across daemon restarts. When the + * `lando-containerd.service` restarts, all networks lose their labels. + * docker-compose v2 validates that existing networks have the correct + * `com.docker.compose.network` label and refuses to start if it doesn't + * match (error: "network was not created by compose"). + * + * This utility removes project networks that lack compose labels so that + * `docker-compose up` can recreate them with proper labels. Only networks + * with no connected containers are removed (safe for stopped containers; + * running containers are left untouched). + * + * @param {Object} dockerode - Dockerode instance pointed at finch-daemon. + * @param {string} project - The compose project name (e.g. 'landocontainerd'). + * @param {Function} [debug] - Debug logging function. + * @return {Promise} Names of removed networks. + */ +module.exports = async (dockerode, project, debug = () => {}) => { + const removed = []; + + /** @type {Array<{Name: string, Id: string, Labels: Object}>} */ + let nets; + try { + nets = await dockerode.listNetworks(); + } catch (err) { + debug('failed to list networks for stale cleanup: %s', err.message); + return removed; + } + + const projectPrefix = `${project}_`; + + for (const net of nets) { + // Only consider networks belonging to this project + if (!net.Name || !net.Name.startsWith(projectPrefix)) continue; + + // If the network already has compose labels, docker-compose will accept it + const labels = net.Labels || {}; + if (labels['com.docker.compose.project']) continue; + + // Safety check: don't remove networks with connected containers + try { + const info = await dockerode.getNetwork(net.Id || net.Name).inspect(); + const containers = info.Containers || {}; + if (Object.keys(containers).length > 0) { + debug('skipping removal of stale network %s — has %d connected containers', + net.Name, Object.keys(containers).length); + continue; + } + } catch (err) { + // If inspect fails, skip this network rather than risk removing it + debug('failed to inspect network %s, skipping: %s', net.Name, err.message); + continue; + } + + try { + debug('removing stale network %s (no compose labels)', net.Name); + await dockerode.getNetwork(net.Id || net.Name).remove(); + removed.push(net.Name); + } catch (err) { + debug('failed to remove stale network %s: %s', net.Name, err.message); + } + } + + if (removed.length > 0) { + debug('removed %d stale project networks: %s', removed.length, removed.join(', ')); + } + + return removed; +}; diff --git a/utils/resolve-containerd-mount.js b/utils/resolve-containerd-mount.js new file mode 100644 index 000000000..b8316b4f6 --- /dev/null +++ b/utils/resolve-containerd-mount.js @@ -0,0 +1,106 @@ +'use strict'; + +const os = require('os'); +const path = require('path'); + +/** + * Resolve a host mount path for the containerd backend and determine accessibility. + * + * On Linux and WSL2, bind mounts work natively so paths are passed through as-is. + * On macOS (Lima), only paths within Lima mount points (default: home directory) + * are accessible inside the VM. Paths outside the mounts are flagged as inaccessible + * with a warning message explaining how to add the path to the Lima VM config. + * + * @param {string} hostPath - The host-side path to resolve. + * @param {Object} [opts={}] - Options. + * @param {string} [opts.platform] - Override platform (default: process.platform). + * @param {string} [opts.homedir] - Override home directory (default: os.homedir()). + * @param {Array} [opts.limaMounts] - Lima mount definitions. Each entry should have + * `{location: string, writable?: boolean}`. Defaults to `[{location: homedir, writable: true}]`. + * @returns {{resolvedPath: string, accessible: boolean, warning: string|null}} + * + * @since 4.0.0 + * @example + * const {resolveContainerdMount} = require('../utils/resolve-containerd-mount'); + * + * const result = resolveContainerdMount('/tmp/myproject', {platform: 'darwin'}); + * // => {resolvedPath: '/tmp/myproject', accessible: false, warning: '...'} + * + * const result2 = resolveContainerdMount('~/code/app', {platform: 'darwin'}); + * // => {resolvedPath: '/Users/me/code/app', accessible: true, warning: null} + */ +const resolveContainerdMount = (hostPath, opts = {}) => { + const platform = opts.platform || process.platform; + const homedir = opts.homedir || os.homedir(); + const limaMounts = opts.limaMounts || [{location: homedir, writable: true}]; + + // Resolve the path: expand ~ and make relative paths absolute + let resolvedPath = hostPath; + if (!resolvedPath || typeof resolvedPath !== 'string') { + return {resolvedPath: '', accessible: false, warning: 'Mount path is empty or invalid'}; + } + + // Expand tilde + if (resolvedPath.startsWith('~')) { + resolvedPath = path.join(homedir, resolvedPath.slice(1)); + } + + // Resolve to absolute + resolvedPath = path.resolve(resolvedPath); + + // Linux: bind mounts work natively, always accessible + if (platform === 'linux') { + return {resolvedPath, accessible: true, warning: null}; + } + + // WSL2 (detected via win32 platform or explicit): /mnt/c/ paths work fine + if (platform === 'win32') { + return {resolvedPath, accessible: true, warning: null}; + } + + // macOS / Darwin: check if path is within a Lima mount point + if (platform === 'darwin') { + const isWithinMount = limaMounts.some(mount => { + const mountLocation = path.resolve(mount.location); + // path must be the mount location itself or a subdirectory of it + return resolvedPath === mountLocation || resolvedPath.startsWith(mountLocation + path.sep); + }); + + if (isWithinMount) { + return {resolvedPath, accessible: true, warning: null}; + } + + return { + resolvedPath, + accessible: false, + warning: `Path "${resolvedPath}" is not shared with the Lima VM. ` + + 'Lima only mounts your home directory by default. ' + + 'To mount paths outside your home directory, add them to your Lima VM config ' + + '(~/.lima/lando/lima.yaml) under the "mounts" section and restart the VM. ' + + 'See https://lima-vm.io/docs/config/mount/ for details.', + }; + } + + // Unknown platform: passthrough + return {resolvedPath, accessible: true, warning: null}; +}; + +/** + * Quick boolean check for whether a host path is accessible to containerd. + * + * @param {string} hostPath - The host-side path to check. + * @param {Object} [opts={}] - Same options as `resolveContainerdMount`. + * @returns {boolean} True if the path is accessible, false otherwise. + * + * @since 4.0.0 + * @example + * const {isPathAccessible} = require('../utils/resolve-containerd-mount'); + * if (!isPathAccessible('/tmp/outside', {platform: 'darwin'})) { + * console.warn('Path is not accessible in the Lima VM'); + * } + */ +const isPathAccessible = (hostPath, opts = {}) => { + return resolveContainerdMount(hostPath, opts).accessible; +}; + +module.exports = {resolveContainerdMount, isPathAccessible}; diff --git a/utils/run-elevated.js b/utils/run-elevated.js index 88325dd6e..096a97121 100644 --- a/utils/run-elevated.js +++ b/utils/run-elevated.js @@ -50,7 +50,6 @@ module.exports = (command, options, stdout = '', stderr = '') => { command.unshift('--'); // if we want to notify the user if (options.notify) command.unshift('--bell'); - // if this is non-interactive then pass that along to sudo if (!options.isInteractive) command.unshift('--non-interactive'); // if interactive and have a password then add -S so we can write the password to stdin if (options.isInteractive && options.password) command.unshift('--stdin'); diff --git a/utils/run-powershell-script.js b/utils/run-powershell-script.js index dfbdb9dd0..9fd7868de 100644 --- a/utils/run-powershell-script.js +++ b/utils/run-powershell-script.js @@ -8,6 +8,9 @@ const winpath = require('./wslpath-2-winpath'); const {spawn, spawnSync} = require('child_process'); const parseArgs = args => args.map(arg => arg.startsWith('-') ? arg : `"${arg}"`).join(' '); +const isWSLInteropError = stderr => typeof stderr === 'string' && stderr.includes('UtilAcceptVsock:271: accept4 failed 110'); +// TODO: Once the UI supports multi-line follow-up guidance, surface a restart-WSL recommendation alongside this error. +const formatWSLInteropError = () => 'Windows interop is unavailable from WSL; restart WSL with `wsl --shutdown` and try again.'; // get the bosmang const defaults = { @@ -68,7 +71,8 @@ module.exports = (script, args = [], options = {}, stdout = '', stderr = '', car debug('powershell script %o done with code %o', script, code); // if code is non-zero and we arent ignoring then reject here if (code !== 0 && !options.ignoreReturnCode) { - const error = new Error(stderr); + const message = isWSLInteropError(stderr) ? formatWSLInteropError(stderr) : stderr; + const error = new Error(message); error.code = code; reject(error); } @@ -79,3 +83,6 @@ module.exports = (script, args = [], options = {}, stdout = '', stderr = '', car }); }); }; + +module.exports._isWSLInteropError = isWSLInteropError; +module.exports._formatWSLInteropError = formatWSLInteropError; diff --git a/utils/setup-containerd-auth.js b/utils/setup-containerd-auth.js new file mode 100644 index 000000000..be75da477 --- /dev/null +++ b/utils/setup-containerd-auth.js @@ -0,0 +1,174 @@ +'use strict'; + +const fs = require('fs'); +const os = require('os'); +const path = require('path'); + +/** + * Known Docker credential helper binaries. + * + * These are the `credsStore` / `credHelpers` values that may appear in a + * Docker config.json. finch-daemon and docker-compose support the same + * credential helper protocol, so we just need to verify the helper binary + * is available on `$PATH`. + * + * @type {string[]} + * @private + */ +const KNOWN_CRED_HELPERS = [ + 'docker-credential-osxkeychain', + 'docker-credential-desktop', + 'docker-credential-ecr-login', + 'docker-credential-gcloud', + 'docker-credential-pass', + 'docker-credential-secretservice', + 'docker-credential-wincred', +]; + +/** + * Resolve the path to the Docker config directory. + * + * Checks (in order): + * 1. Explicit `configPath` option (override) + * 2. `DOCKER_CONFIG` environment variable + * 3. Default `~/.docker` + * + * @param {Object} [opts={}] - Options. + * @param {string} [opts.configPath] - Explicit path to the Docker config *directory*. + * @param {Object} [opts.env] - Environment variables to inspect (default: `process.env`). + * @returns {string} Absolute path to the Docker config directory. + */ +const getDockerConfigPath = (opts = {}) => { + if (opts.configPath) return path.resolve(opts.configPath); + const env = opts.env || process.env; + if (env.DOCKER_CONFIG) return path.resolve(env.DOCKER_CONFIG); + return path.join(os.homedir(), '.docker'); +}; + +/** + * Detect credential helpers referenced in a Docker config.json. + * + * Reads the `credsStore` and `credHelpers` fields and returns a list of + * unique helper binary names (e.g. `docker-credential-osxkeychain`). + * + * @param {Object} configJson - Parsed contents of `config.json`. + * @returns {string[]} Unique credential helper binary names found in the config. + * @private + */ +const detectCredentialHelpers = configJson => { + const helpers = new Set(); + + // credsStore — global credential store + if (configJson.credsStore) { + helpers.add(`docker-credential-${configJson.credsStore}`); + } + + // credHelpers — per-registry credential helpers + if (configJson.credHelpers && typeof configJson.credHelpers === 'object') { + for (const helper of Object.values(configJson.credHelpers)) { + helpers.add(`docker-credential-${helper}`); + } + } + + return Array.from(helpers); +}; + +/** + * Build the auth configuration for the containerd backend. + * + * finch-daemon and docker-compose read `~/.docker/config.json` natively for + * registry authentication, using the same format and credential helpers as + * Docker. This function: + * + * 1. Locates the Docker config directory (respects `DOCKER_CONFIG` env var). + * 2. Reads and parses `config.json` if it exists. + * 3. Detects any credential helpers referenced in the config. + * 4. Returns the config path and environment variables to inject into + * docker-compose/Dockerode commands so that auth "just works" with + * Lando's isolated containerd. + * + * @param {Object} [opts={}] - Configuration options. + * @param {string} [opts.configPath] - Explicit Docker config directory override. + * When set, `DOCKER_CONFIG` will be injected into the returned env so + * docker-compose/Dockerode finds it. When `null`/`undefined`, the default + * `~/.docker` is used and no extra env is needed. + * @param {Object} [opts.env] - Environment variables to inspect (default: `process.env`). + * @param {boolean} [opts.debug] - Reserved for future debug logging support. + * @returns {{dockerConfig: string, env: Object, configExists: boolean, credentialHelpers: string[]}} + * - `dockerConfig` — absolute path to the Docker config *directory*. + * - `env` — environment variables to inject (e.g. `{DOCKER_CONFIG: '...'}`). + * Empty object when the default path is used. + * - `configExists` — whether `config.json` was found in the directory. + * - `credentialHelpers` — credential helper binaries referenced in the config. + * + * @since 4.0.0 + * @example + * const {getContainerdAuthConfig} = require('../utils/setup-containerd-auth'); + * const auth = getContainerdAuthConfig(); + * // auth.env → {} (default path, no override needed) + * // auth.configExists → true + * // auth.credentialHelpers → ['docker-credential-osxkeychain'] + * + * @example + * const auth = getContainerdAuthConfig({configPath: '/custom/docker'}); + * // auth.env → {DOCKER_CONFIG: '/custom/docker'} + */ +const getContainerdAuthConfig = (opts = {}) => { + const configDir = getDockerConfigPath(opts); + const configFile = path.join(configDir, 'config.json'); + + // Determine whether we need to set DOCKER_CONFIG. + // docker-compose uses ~/.docker by default — we only need to override when + // the config lives somewhere non-standard. + const defaultDir = path.join(os.homedir(), '.docker'); + const isNonStandardPath = path.resolve(configDir) !== path.resolve(defaultDir); + + const env = {}; + if (isNonStandardPath) { + env.DOCKER_CONFIG = configDir; + } + + // Attempt to read config.json + let configExists = false; + let credentialHelpers = []; + + try { + if (fs.existsSync(configFile)) { + configExists = true; + const raw = fs.readFileSync(configFile, 'utf8'); + const configJson = JSON.parse(raw); + credentialHelpers = detectCredentialHelpers(configJson); + + // Check if credsStore references a non-existent helper binary (e.g. desktop.exe on WSL). + // If so, create a sanitized config without it and redirect DOCKER_CONFIG. + if (configJson.credsStore) { + // finch-daemon treats credential helper errors as fatal (unlike Docker + // which falls back to anonymous). On WSL, desktop.exe helper exists but + // fails for registries without stored credentials. Always sanitize. + { + // Create a sanitized config without the broken credsStore + const sanitizedDir = path.join(os.homedir(), '.lando', 'docker-config'); + fs.mkdirSync(sanitizedDir, {recursive: true}); + const sanitized = {...configJson}; + delete sanitized.credsStore; + fs.writeFileSync(path.join(sanitizedDir, 'config.json'), JSON.stringify(sanitized, null, 2), 'utf8'); + env.DOCKER_CONFIG = sanitizedDir; + } + } + } + } catch { + // If we can't read or parse the config, that's fine — finch-daemon will + // simply operate without auth, which is correct for public images. + configExists = false; + credentialHelpers = []; + } + + return { + dockerConfig: configDir, + env, + configExists, + credentialHelpers, + }; +}; + +module.exports = {getContainerdAuthConfig, getDockerConfigPath}; diff --git a/utils/setup-containerd-binaries.js b/utils/setup-containerd-binaries.js new file mode 100644 index 000000000..23a6f4fd1 --- /dev/null +++ b/utils/setup-containerd-binaries.js @@ -0,0 +1,230 @@ +'use strict'; + +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const {nanoid} = require('nanoid'); + +/** + * Setup task helper that ensures containerd, nerdctl, and buildkitd binaries + * are present at ~/.lando/bin/. + * + * For each binary: + * 1. Check if it already exists at the target location. + * 2. If missing, download the release tarball via download-x.js. + * 3. Extract the binary from the tarball. + * 4. Make it executable (chmod +x). + * + * @param {Object} [opts={}] - Options. + * @param {string} [opts.userConfRoot] - Lando config root (default ~/.lando). + * @param {string} [opts.containerdVersion] - containerd version to download. + * @param {string} [opts.nerdctlVersion] - nerdctl version to download. + * @param {string} [opts.buildkitVersion] - buildkit version to download. + * @param {Function} [opts.debug] - Debug logger function. + * @returns {Promise} An object describing what was installed: + * { containerd: {installed, path, version}, nerdctl: {...}, buildkitd: {...} } + */ +module.exports = async (opts = {}) => { + const debug = opts.debug || require('debug')('@lando/setup-containerd-binaries'); + const userConfRoot = opts.userConfRoot || path.join(os.homedir(), '.lando'); + const binDir = path.join(userConfRoot, 'bin'); + + // Ensure bin directory exists + fs.mkdirSync(binDir, {recursive: true}); + + const getContainerdDownloadUrl = require('./get-containerd-download-url'); + const downloadX = require('./download-x'); + const makeExecutable = require('./make-executable'); + + const results = { + containerd: {installed: false, path: false, version: false, skipped: false}, + nerdctl: {installed: false, path: false, version: false, skipped: false}, + buildkitd: {installed: false, path: false, version: false, skipped: false}, + }; + + // Binary definitions: + // Each entry maps a binary name to its tarball key, the path inside the + // tarball where the binary lives, and the download URL builder args. + const binaries = [ + { + name: 'containerd', + key: 'containerd', + // containerd tarball extracts to: bin/containerd + innerPath: path.join('bin', 'containerd'), + version: opts.containerdVersion, + }, + { + name: 'nerdctl', + key: 'nerdctl', + // nerdctl tarball extracts to: nerdctl (top-level) + innerPath: 'nerdctl', + version: opts.nerdctlVersion, + }, + { + name: 'buildkitd', + key: 'buildkit', + // buildkit tarball extracts to: bin/buildkitd + innerPath: path.join('bin', 'buildkitd'), + version: opts.buildkitVersion, + }, + ]; + + for (const bin of binaries) { + const destPath = path.join(binDir, bin.name); + + // Skip if binary already exists + if (fs.existsSync(destPath) && !fs.statSync(destPath).isDirectory()) { + debug('%s already exists at %s, skipping', bin.name, destPath); + results[bin.name].skipped = true; + results[bin.name].path = destPath; + continue; + } + + // Build the download URL + const urlOpts = {}; + if (bin.version) urlOpts.version = bin.version; + + let url; + try { + url = getContainerdDownloadUrl(bin.key, urlOpts); + } catch (error) { + debug('could not determine download URL for %s: %s', bin.name, error.message); + continue; + } + + debug('downloading %s from %s', bin.name, url); + + // Download the tarball to a temp location + const tmpDest = path.join(os.tmpdir(), `lando-${bin.name}-${nanoid()}.tar.gz`); + + try { + await downloadX(url, {dest: tmpDest, debug}); + } catch (error) { + debug('failed to download %s: %s', bin.name, error.message); + continue; + } + + // Extract the specific binary from the tarball + try { + await _extractBinaryFromTarball(tmpDest, bin.innerPath, destPath, debug); + makeExecutable([bin.name], binDir); + + results[bin.name].installed = true; + results[bin.name].path = destPath; + results[bin.name].version = bin.version || 'default'; + debug('installed %s to %s', bin.name, destPath); + } catch (error) { + debug('failed to extract %s from tarball: %s', bin.name, error.message); + } + + // Clean up temp tarball + try { + if (fs.existsSync(tmpDest)) fs.unlinkSync(tmpDest); + } catch { + // best-effort cleanup + } + } + + return results; +}; + +/** + * Extract a single file from a tar.gz archive to a destination path. + * + * Uses the system `tar` command, which is available on Linux, macOS, and WSL. + * + * @param {string} tarball - Path to the .tar.gz file. + * @param {string} innerPath - Relative path of the file inside the tarball. + * @param {string} dest - Destination path on disk. + * @param {Function} debug - Debug logger. + * @returns {Promise} + * @private + */ +function _extractBinaryFromTarball(tarball, innerPath, dest, debug) { + return new Promise((resolve, reject) => { + const {execFile} = require('child_process'); + const tmpDir = path.join(os.tmpdir(), `lando-extract-${nanoid()}`); + + fs.mkdirSync(tmpDir, {recursive: true}); + + // Extract just the file we need + execFile('tar', [ + 'xzf', tarball, + '-C', tmpDir, + '--strip-components', String(innerPath.split(path.sep).length - 1 || 0), + innerPath, + ], (error) => { + // If --strip-components extraction didn't work, try without stripping + // and look for the file manually + const binaryName = path.basename(innerPath); + const extractedPath = path.join(tmpDir, binaryName); + + if (error || !fs.existsSync(extractedPath)) { + // Fallback: extract everything and find the binary + debug('targeted extraction failed for %s, trying full extraction', innerPath); + execFile('tar', ['xzf', tarball, '-C', tmpDir], (err2) => { + if (err2) { + _cleanupDir(tmpDir); + return reject(err2); + } + + // Search for the binary in the extracted directory + const found = _findFile(tmpDir, binaryName); + if (!found) { + _cleanupDir(tmpDir); + return reject(new Error(`Could not find ${binaryName} in tarball`)); + } + + // Copy to destination + fs.mkdirSync(path.dirname(dest), {recursive: true}); + fs.copyFileSync(found, dest); + _cleanupDir(tmpDir); + resolve(); + }); + return; + } + + // Copy to destination + fs.mkdirSync(path.dirname(dest), {recursive: true}); + fs.copyFileSync(extractedPath, dest); + _cleanupDir(tmpDir); + resolve(); + }); + }); +} + +/** + * Recursively find a file by name in a directory. + * + * @param {string} dir - Directory to search. + * @param {string} name - File name to find. + * @returns {string|null} Full path to the file, or null. + * @private + */ +function _findFile(dir, name) { + const entries = fs.readdirSync(dir, {withFileTypes: true}); + for (const entry of entries) { + const fullPath = path.join(dir, entry.name); + if (entry.isDirectory()) { + const found = _findFile(fullPath, name); + if (found) return found; + } else if (entry.name === name) { + return fullPath; + } + } + return null; +} + +/** + * Remove a directory tree (best-effort). + * + * @param {string} dir - Directory to remove. + * @private + */ +function _cleanupDir(dir) { + try { + fs.rmSync(dir, {recursive: true, force: true}); + } catch { + // best-effort + } +} diff --git a/utils/setup-engine-containerd.js b/utils/setup-engine-containerd.js new file mode 100644 index 000000000..13f68f060 --- /dev/null +++ b/utils/setup-engine-containerd.js @@ -0,0 +1,86 @@ +'use strict'; + +const os = require('os'); +const path = require('path'); + +const getContainerdPaths = require('./get-containerd-paths'); + +/** + * Create a containerd-backed Engine instance. + * + * @deprecated This utility is **not used in production**. The containerd engine + * is now created via `BackendManager._createContainerdEngine()` in + * `lib/backend-manager.js`, which uses `docker-compose` + `DOCKER_HOST` instead + * of `NerdctlCompose`. This file is retained for reference only and may be + * removed in a future release. + * + * This is the containerd equivalent of `utils/setup-engine.js`. It creates + * an Engine wired with: + * - **ContainerdDaemon** — manages the containerd + buildkitd lifecycle + * - **ContainerdContainer** — low-level container/network ops via Dockerode + finch-daemon + * - **NerdctlCompose** — compose orchestration via `nerdctl compose` (deprecated) + * + * @param {Object} config - The full Lando config object. + * @param {Object} cache - A Lando Cache instance. + * @param {Object} events - A Lando Events instance. + * @param {Object} log - A Lando Log instance. + * @param {Object} shell - A Lando Shell instance. + * @param {string} [id='lando'] - The Lando instance identifier. + * @returns {Engine} A fully configured Engine instance using containerd backends. + * + * @since 4.0.0 + */ +module.exports = (config, cache, events, log, shell, id = 'lando') => { + const Engine = require('../lib/engine'); + const {ContainerdDaemon, ContainerdContainer} = require('../lib/backends/containerd'); + const NerdctlCompose = require('../lib/backends/containerd/nerdctl-compose'); + + const userConfRoot = config.userConfRoot || path.join(os.homedir(), '.lando'); + const paths = getContainerdPaths(config); + const systemBinDir = config.containerdSystemBinDir || '/usr/local/lib/lando/bin'; + + // Resolve binary paths — config overrides take precedence, then standard ~/.lando/bin/ locations + const containerdBin = config.containerdBin || path.join(systemBinDir, 'containerd'); + const nerdctlBin = config.nerdctlBin || path.join(userConfRoot, 'bin', 'nerdctl'); + const buildkitdBin = config.buildkitdBin || path.join(systemBinDir, 'buildkitd'); + const socketPath = paths.containerdSocket; + + // Create the daemon backend — manages containerd + buildkitd lifecycle + const daemon = new ContainerdDaemon({ + userConfRoot, + containerdBin, + buildkitdBin, + nerdctlBin, + socketPath, + events, + cache, + log, + }); + + // Create the container backend — low-level container/network ops via Dockerode + finch-daemon + // ContainerdContainer uses Dockerode pointed at finch-daemon's Docker-compatible socket + // instead of shelling out to nerdctl. finch-daemon provides Docker API v1.43 compat backed + // by containerd. + const docker = new ContainerdContainer({ + finchSocket: paths.finchSocket, + id, + debug: require('./debug-shim')(log), + }); + + // Create the compose backend — produces {cmd, opts} shell descriptors + const nerdctlCompose = new NerdctlCompose({ + socketPath, + buildkitHost: `unix://${daemon.buildkitSocket}`, + namespace: 'default', + nerdctlConfig: path.join(userConfRoot, 'config', 'nerdctl.toml'), + }); + + // Create the compose function with the standard (cmd, datum) => Promise contract. + // Gets {cmd, opts} from NerdctlCompose, then executes via shell.sh([nerdctlBin, ...cmd], opts). + const compose = (cmd, datum) => { + const run = nerdctlCompose[cmd](datum.compose, datum.project, datum.opts); + return shell.sh([nerdctlBin].concat(run.cmd), run.opts); + }; + + return new Engine(daemon, docker, compose, config); +};