diff --git a/.github/scripts/doc-generator.sh b/.github/scripts/doc-generator.sh
index 89184ce7..b1de5586 100755
--- a/.github/scripts/doc-generator.sh
+++ b/.github/scripts/doc-generator.sh
@@ -47,6 +47,33 @@ run_doc_generation() {
fi
echo "::endgroup::"
+ echo "::group::Running node-update-monitoring-docker-compose"
+ if npm run node-update-monitoring-docker-compose; then
+ echo "✅ Updated alloy service in monitoring page"
+ else
+ echo "❌ node-update-monitoring-docker-compose failed"
+ failed=true
+ fi
+ echo "::endgroup::"
+
+ echo "::group::Running node-update-monitoring-alloy-config"
+ if npm run node-update-monitoring-alloy-config; then
+ echo "✅ Updated alloy config in monitoring page"
+ else
+ echo "❌ node-update-monitoring-alloy-config failed"
+ failed=true
+ fi
+ echo "::endgroup::"
+
+ echo "::group::Running node-update-greybox"
+ if npm run node-update-greybox; then
+ echo "✅ Updated greybox section in genvm configuration"
+ else
+ echo "❌ node-update-greybox failed"
+ failed=true
+ fi
+ echo "::endgroup::"
+
echo "::group::Running node-generate-api-docs"
if npm run node-generate-api-docs; then
echo "✅ Generated API documentation"
diff --git a/.github/workflows/check-node-docs-sync.yml b/.github/workflows/check-node-docs-sync.yml
index 1e12b2ed..b8feca3c 100644
--- a/.github/workflows/check-node-docs-sync.yml
+++ b/.github/workflows/check-node-docs-sync.yml
@@ -28,6 +28,9 @@ jobs:
npm run node-update-setup-guide
npm run node-update-config
npm run node-update-docker-compose
+ npm run node-update-monitoring-docker-compose
+ npm run node-update-monitoring-alloy-config
+ npm run node-update-greybox
npm run node-generate-api-docs
- name: Check for uncommitted changes
diff --git a/.github/workflows/sync-docs-from-node.yml b/.github/workflows/sync-docs-from-node.yml
index 2fd0da7a..17556db0 100644
--- a/.github/workflows/sync-docs-from-node.yml
+++ b/.github/workflows/sync-docs-from-node.yml
@@ -92,7 +92,7 @@ jobs:
needs: prepare
strategy:
matrix:
- sync_type: [changelog, config, config_asimov, config_bradbury, docker_compose, api_gen, api_debug, api_ops]
+ sync_type: [changelog, config, config_asimov, config_bradbury, docker_compose, docker_compose_monitoring, alloy_config, greybox_setup, api_gen, api_debug, api_ops]
fail-fast: false
steps:
- name: Checkout documentation repository
@@ -118,6 +118,8 @@ jobs:
configs/node/asimov.yaml.example
configs/node/bradbury.yaml.example
release/docker-compose.yaml
+ release/alloy-config.river
+ release/greybox-setup-guide.md
sparse-checkout-cone-mode: true
path: source-repo
ref: ${{ needs.prepare.outputs.version }}
@@ -156,6 +158,24 @@ jobs:
echo "target_path=content/validators/docker-compose.yaml" >> $GITHUB_OUTPUT
echo "filter_pattern=.*" >> $GITHUB_OUTPUT
;;
+ "docker_compose_monitoring")
+ echo "title=Docker Compose File (Monitoring)" >> $GITHUB_OUTPUT
+ echo "source_path=source-repo/release/docker-compose.yaml" >> $GITHUB_OUTPUT
+ echo "target_path=content/validators/docker-compose-monitoring.yaml" >> $GITHUB_OUTPUT
+ echo "filter_pattern=.*" >> $GITHUB_OUTPUT
+ ;;
+ "alloy_config")
+ echo "title=Alloy Config File" >> $GITHUB_OUTPUT
+ echo "source_path=source-repo/release/alloy-config.river" >> $GITHUB_OUTPUT
+ echo "target_path=content/validators/alloy-config.river" >> $GITHUB_OUTPUT
+ echo "filter_pattern=.*" >> $GITHUB_OUTPUT
+ ;;
+ "greybox_setup")
+ echo "title=Greybox Setup Guide" >> $GITHUB_OUTPUT
+ echo "source_path=source-repo/release/greybox-setup-guide.md" >> $GITHUB_OUTPUT
+ echo "target_path=content/validators/greybox-setup-guide.md" >> $GITHUB_OUTPUT
+ echo "filter_pattern=.*" >> $GITHUB_OUTPUT
+ ;;
"api_gen")
echo "title=API Gen Methods" >> $GITHUB_OUTPUT
echo "source_path=source-repo/${{ github.event.inputs.api_gen_path || github.event.client_payload.api_gen_path || 'docs/api/rpc' }}" >> $GITHUB_OUTPUT
@@ -421,6 +441,9 @@ jobs:
- \`npm run node-update-setup-guide\`
- \`npm run node-update-config\`
- \`npm run node-update-docker-compose\`
+ - \`npm run node-update-monitoring-docker-compose\`
+ - \`npm run node-update-monitoring-alloy-config\`
+ - \`npm run node-update-greybox\`
- \`npm run node-generate-api-docs\`
Please review the changes and merge if everything looks correct.
@@ -488,7 +511,7 @@ jobs:
echo "" >> $GITHUB_STEP_SUMMARY
# Process each sync type report
- for sync_type in changelog config config_asimov config_bradbury docker_compose api_gen api_debug api_ops; do
+ for sync_type in changelog config config_asimov config_bradbury docker_compose docker_compose_monitoring alloy_config greybox_setup api_gen api_debug api_ops; do
# Get proper title
case "$sync_type" in
"changelog") title="📝 Changelog Sync" ;;
@@ -496,6 +519,9 @@ jobs:
"config_asimov") title="⚙️ Config File Sync (Asimov)" ;;
"config_bradbury") title="⚙️ Config File Sync (Bradbury)" ;;
"docker_compose") title="🐳 Docker Compose Sync" ;;
+ "docker_compose_monitoring") title="🐳 Docker Compose Sync (Monitoring)" ;;
+ "alloy_config") title="📊 Alloy Config Sync" ;;
+ "greybox_setup") title="🔧 Greybox Setup Guide Sync" ;;
"api_gen") title="🔧 API Gen Methods Sync" ;;
"api_debug") title="🐛 API Debug Methods Sync" ;;
"api_ops") title="📊 API Ops Methods Sync" ;;
diff --git a/content/validators/alloy-config.river b/content/validators/alloy-config.river
new file mode 100644
index 00000000..db983230
--- /dev/null
+++ b/content/validators/alloy-config.river
@@ -0,0 +1,160 @@
+// Grafana Alloy Configuration for GenLayer Node Telemetry
+// Handles both log collection and metrics forwarding
+
+// ==========================================
+// Log Collection and Forwarding
+// ==========================================
+
+// Discovery component to find log files using local.file_match
+// Supports different log file patterns:
+// - Single node: "/var/log/genlayer/node.log"
+// - Multi-node: "/var/log/genlayer/*/logs/node.log" (each node in subdirectory)
+// - Custom pattern via LOG_FILE_PATTERN env var
+local.file_match "genlayer_logs" {
+ path_targets = [{
+ __path__ = coalesce(sys.env("LOG_FILE_PATTERN"), "/var/log/genlayer/node*.log"),
+ }]
+}
+
+// Relabel to add metadata labels to log entries
+discovery.relabel "add_labels" {
+ targets = local.file_match.genlayer_logs.targets
+
+ // Add instance label from environment variable
+ rule {
+ target_label = "instance"
+ replacement = sys.env("NODE_ID")
+ }
+
+ // Add validator_name label from environment variable
+ rule {
+ target_label = "validator_name"
+ replacement = sys.env("VALIDATOR_NAME")
+ }
+
+ // Add component label
+ rule {
+ target_label = "component"
+ replacement = "alloy"
+ }
+
+ // Add job label
+ rule {
+ target_label = "job"
+ replacement = "genlayer-node"
+ }
+}
+
+// Source component to read log files
+loki.source.file "genlayer" {
+ targets = discovery.relabel.add_labels.output
+ forward_to = [loki.write.central.receiver]
+
+ // Tail from end to avoid ingesting entire log history on startup
+ tail_from_end = true
+}
+
+// Write logs to central Loki instance
+loki.write "central" {
+ endpoint {
+ url = sys.env("CENTRAL_LOKI_URL")
+
+ // HTTP Basic Authentication
+ basic_auth {
+ username = sys.env("CENTRAL_LOKI_USERNAME")
+ password = sys.env("CENTRAL_LOKI_PASSWORD")
+ }
+
+ // Enable retry with default exponential backoff
+ // Note: Alloy's loki.write doesn't have a retry block; retries are handled automatically
+ // with exponential backoff by default when the endpoint is unreachable
+
+ // Configurable batch settings for efficient log sending
+ batch_size = coalesce(sys.env("LOKI_BATCH_SIZE"), "1MiB") // Maximum batch size before sending
+ batch_wait = coalesce(sys.env("LOKI_BATCH_WAIT"), "60s") // Maximum wait time before sending partial batch
+ }
+}
+
+// ==========================================
+// Prometheus Metrics Collection and Forwarding
+// ==========================================
+
+// Scrape metrics from GenLayer node(s)
+// Supports both single node and multi-node configurations
+//
+// Single Node Mode:
+// Set NODE_METRICS_ENDPOINT, NODE_ID, VALIDATOR_NAME
+//
+// Multi-Node Mode:
+// Set SCRAPE_TARGETS_JSON with JSON array of target objects
+// Example: [{"__address__":"host.docker.internal:9250","instance":"0x...","validator_name":"node-1"}]
+//
+// Note: The "network" label is emitted by the node itself (auto-detected from consensus address),
+// so it does not need to be configured here.
+prometheus.scrape "genlayer_node" {
+ // Dynamic targets based on environment variable
+ // If SCRAPE_TARGETS_JSON is set, use it (multi-node mode)
+ // Otherwise, build single target from individual env vars (single node mode)
+ targets = encoding.from_json(coalesce(sys.env("SCRAPE_TARGETS_JSON"), string.format("[{\"__address__\":\"%s\",\"instance\":\"%s\",\"validator_name\":\"%s\"}]", coalesce(sys.env("NODE_METRICS_ENDPOINT"), "host.docker.internal:9153"), coalesce(sys.env("NODE_ID"), "local"), coalesce(sys.env("VALIDATOR_NAME"), "default"))))
+
+ forward_to = [prometheus.relabel.metrics.receiver]
+
+ // Configurable scrape intervals
+ scrape_interval = coalesce(sys.env("METRICS_SCRAPE_INTERVAL"), "60s")
+ scrape_timeout = coalesce(sys.env("METRICS_SCRAPE_TIMEOUT"), "10s")
+}
+
+// Relabel metrics to filter before forwarding
+prometheus.relabel "metrics" {
+ forward_to = [prometheus.remote_write.central.receiver]
+
+ // Option 1: Forward all metrics (default)
+ // Currently forwarding all metrics from the node.
+
+ // Option 2: Only keep genlayer_node_* metrics to reduce bandwidth (recommended)
+ // To enable filtering and reduce bandwidth, uncomment the following rule:
+ /*
+ rule {
+ source_labels = ["__name__"]
+ regex = "genlayer_node_.*"
+ action = "keep"
+ }
+ */
+}
+
+// Remote write configuration for sending metrics to central Prometheus
+prometheus.remote_write "central" {
+ endpoint {
+ url = sys.env("CENTRAL_MONITORING_URL")
+
+ // HTTP Basic Authentication
+ basic_auth {
+ username = sys.env("CENTRAL_MONITORING_USERNAME")
+ password = sys.env("CENTRAL_MONITORING_PASSWORD")
+ }
+
+ // Queue configuration for reliability
+ queue_config {
+ capacity = 10000
+ max_shards = 5
+ max_samples_per_send = 500
+ batch_send_deadline = coalesce(sys.env("METRICS_BATCH_SEND_DEADLINE"), "60s")
+ }
+ }
+}
+
+// ==========================================
+// Alloy Self-Monitoring
+// ==========================================
+
+// Alloy internal exporter for health monitoring
+prometheus.exporter.self "alloy" {}
+
+// Expose Alloy's own metrics on the HTTP server
+prometheus.scrape "alloy" {
+ targets = prometheus.exporter.self.alloy.targets
+ forward_to = [] // Not forwarding Alloy metrics to reduce noise
+
+ // Configurable scrape interval for Alloy's internal health monitoring
+ scrape_interval = coalesce(sys.env("ALLOY_SELF_MONITORING_INTERVAL"), "60s")
+}
diff --git a/content/validators/changelog/v0.5.7.mdx b/content/validators/changelog/v0.5.7.mdx
new file mode 100644
index 00000000..1c236df5
--- /dev/null
+++ b/content/validators/changelog/v0.5.7.mdx
@@ -0,0 +1,18 @@
+## v0.5.7
+
+### New features
+
+- Add LLM greyboxing with configurable chain order via YAML meta
+
+### Bug fixes
+
+- Use PendingAt for idle activator rotation
+- Remove unnecessary workaround from view calls
+- Restart sync pipeline after RPC failures
+- Suppress shutdown errors in watchers
+- Reduce FindAcceptanceBlock range to fit RPC limit
+
+### Misc
+
+- Include Alloy healthcheck script in release tarball
+- Increase Alloy push intervals to 60s
diff --git a/content/validators/docker-compose-monitoring.yaml b/content/validators/docker-compose-monitoring.yaml
new file mode 100644
index 00000000..932490d5
--- /dev/null
+++ b/content/validators/docker-compose-monitoring.yaml
@@ -0,0 +1,116 @@
+services:
+ webdriver-container:
+ container_name: genlayer-node-webdriver
+ image: yeagerai/genlayer-genvm-webdriver:0.0.10
+ shm_size: 2gb
+ security_opt:
+ - no-new-privileges:true
+ environment:
+ PORT: 4444
+ ports:
+ - "${WEBDRIVER_PORT:-4444}:4444"
+ restart: unless-stopped
+
+ genlayer-node:
+ image: yeagerai/genlayer-node:${NODE_VERSION:-latest}
+ entrypoint: [
+ "sh", "-c", "/app/bin/genlayernode run --password ${NODE_PASSWORD:-12345678}",
+ ]
+ container_name: genlayer-node
+ restart: unless-stopped
+ env_file:
+ - path: ./.env
+ required: false
+ ports:
+ - "${NODE_RPC_PORT:-9151}:9151"
+ - "${NODE_OPS_PORT:-9153}:9153"
+ volumes:
+ - ${NODE_CONFIG_PATH:-./configs/node/config.yaml}:/app/configs/node/config.yaml:ro
+ - ${NODE_DATA_PATH:-./data}:/app/data
+ - ./genvm-module-web-docker.yaml:/app/third_party/genvm/config/genvm-module-web.yaml
+ - /var/run/docker.sock:/var/run/docker.sock:ro # required for webdriver metrics collection
+ security_opt:
+ - no-new-privileges:true
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "100m"
+ max-file: "3"
+ compress: "true"
+ depends_on:
+ - webdriver-container
+ profiles:
+ - node
+
+ # Grafana Alloy for both logs and metrics forwarding
+ # Supports both single node and multi-node configurations
+ #
+ # Single Node Mode:
+ # Set NODE_ID, VALIDATOR_NAME, NODE_METRICS_ENDPOINT in .env
+ # docker compose --profile monitoring up -d
+ #
+ # Multi-Node Mode:
+ # Set SCRAPE_TARGETS_JSON in .env
+ # docker compose --profile monitoring up -d
+ alloy:
+ image: grafana/alloy:v1.12.0
+ container_name: genlayer-node-alloy
+ command:
+ - run
+ - /etc/alloy/config.river
+ - --server.http.listen-addr=0.0.0.0:12345
+ - --storage.path=/var/lib/alloy/data
+ volumes:
+ - ./alloy-config.river:/etc/alloy/config.river:ro
+ - ./alloy-healthcheck.sh:/etc/alloy/healthcheck.sh:ro
+ - ${NODE_LOGS_PATH:-./data/node/logs}:/var/log/genlayer:ro
+ - alloy_data:/var/lib/alloy
+ healthcheck:
+ test: ["CMD", "sh", "/etc/alloy/healthcheck.sh"]
+ interval: 120s
+ timeout: 15s
+ retries: 1
+ start_period: 120s
+ environment:
+ # Central monitoring endpoints
+ - CENTRAL_LOKI_URL=${CENTRAL_LOKI_URL:-https://logs-prod-042.grafana.net/loki/api/v1/push}
+ - CENTRAL_MONITORING_URL=${CENTRAL_MONITORING_URL:-https://prometheus-prod-66-prod-us-east-3.grafana.net/api/prom/push}
+
+ # Metrics (Prometheus) authentication
+ - CENTRAL_MONITORING_USERNAME=${CENTRAL_MONITORING_USERNAME:-telemetric}
+ - CENTRAL_MONITORING_PASSWORD=${CENTRAL_MONITORING_PASSWORD:-12345678}
+
+ # Logs (Loki) authentication
+ - CENTRAL_LOKI_USERNAME=${CENTRAL_LOKI_USERNAME:-telemetric}
+ - CENTRAL_LOKI_PASSWORD=${CENTRAL_LOKI_PASSWORD:-12345678}
+
+ # Single node configuration
+ - NODE_ID=${NODE_ID:-validator-001}
+ - VALIDATOR_NAME=${VALIDATOR_NAME:-MyValidator}
+ - NODE_METRICS_ENDPOINT=${NODE_METRICS_ENDPOINT:-host.docker.internal:9153}
+
+ # Multi-node configuration
+ # When set, overrides single node config above
+ - SCRAPE_TARGETS_JSON=${SCRAPE_TARGETS_JSON:-}
+
+ # Scraping configuration
+ - METRICS_SCRAPE_INTERVAL=${METRICS_SCRAPE_INTERVAL:-60s}
+ - METRICS_SCRAPE_TIMEOUT=${METRICS_SCRAPE_TIMEOUT:-10s}
+ - ALLOY_SELF_MONITORING_INTERVAL=${ALLOY_SELF_MONITORING_INTERVAL:-60s}
+
+ # Log collection configuration
+ - LOG_FILE_PATTERN=${LOG_FILE_PATTERN:-/var/log/genlayer/node*.log}
+
+ # Log batching configuration
+ - LOKI_BATCH_SIZE=${LOKI_BATCH_SIZE:-1MiB}
+ - LOKI_BATCH_WAIT=${LOKI_BATCH_WAIT:-60s}
+ ports:
+ - "12345:12345" # Alloy UI for debugging
+ restart: unless-stopped
+ profiles:
+ - monitoring
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+
+volumes:
+ alloy_data:
diff --git a/content/validators/greybox-setup-guide.md b/content/validators/greybox-setup-guide.md
new file mode 100644
index 00000000..b71bf00d
--- /dev/null
+++ b/content/validators/greybox-setup-guide.md
@@ -0,0 +1,177 @@
+# Greybox LLM Strategy — Validator Setup Guide
+
+Switch your GenLayer node from random LLM provider selection to deterministic ordered fallback via OpenRouter.
+
+## What is Greybox?
+
+By default, the node picks a random LLM provider for each call. With **greybox**, the node uses a fixed priority chain configured via `meta.greybox` fields in the YAML config.
+
+**Default text chain:** deepseek-v3.2 → qwen3-235b → claude-haiku-4.5 → kimi-k2 → glm-5 → llama-3.3 (heurist) → llama-3.3 (ionet)
+
+**Default image chain:** gpt-5.1-mini → gemini-3-flash → claude-haiku-4.5
+
+Chain order is determined by the `meta.greybox` priority numbers on each model in the YAML. Lower number = higher priority. You can change the order by editing these numbers — no Lua changes needed.
+
+OpenRouter is the primary aggregator. If it fails, the node falls back to direct provider APIs (heurist, ionet).
+
+## Prerequisites
+
+- GenLayer node **v0.5.7+** (tarball must include `genvm-modules-llm-release.yaml` and `genvm-llm-greybox.lua`)
+- An **OpenRouter API key** — get one at https://openrouter.ai/keys
+
+## Step-by-Step Setup
+
+### 1. Stop the node
+
+```bash
+sudo systemctl stop genlayer-node
+```
+
+### 2. Add OpenRouter API key to .env
+
+```bash
+# Find your active .env
+ENV_FILE="/opt/genlayer-node/.env"
+
+# Add the key (or edit the file manually)
+echo "OPENROUTERKEY=sk-or-v1-your-key-here" >> ${ENV_FILE}
+```
+
+### 3. Apply the release LLM config
+
+The tarball ships with a unified config that includes all backends (openrouter, morpheus, heurist, ionet, etc.).
+
+```bash
+VERSION=$(readlink /opt/genlayer-node/bin | sed 's|/bin||; s|.*/||')
+
+cp /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-modules-llm-release.yaml \
+ /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-module-llm.yaml
+```
+
+### 4. Switch to greybox strategy
+
+```bash
+sed -i 's/genvm-llm-default\.lua/genvm-llm-greybox.lua/' \
+ /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-module-llm.yaml
+```
+
+### 5. Verify the config
+
+```bash
+# Check lua script path
+grep lua_script_path /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-module-llm.yaml
+# Expected: lua_script_path: ${exeDir}/../config/genvm-llm-greybox.lua
+
+# Check openrouter is present
+grep -A2 'openrouter:' /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-module-llm.yaml
+# Expected: enabled: true
+
+# Check the greybox Lua file exists
+ls -la /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-llm-greybox.lua
+```
+
+### 6. Start the node
+
+```bash
+sudo systemctl start genlayer-node
+```
+
+### 7. Verify greybox is active
+
+Wait for an LLM transaction to be processed, then check the logs:
+
+```bash
+sudo journalctl -u genlayer-node --no-hostname | grep "greybox"
+```
+
+You should see entries like:
+```
+greybox: using text chain count: 5
+greybox: success provider: openrouter model: deepseek/deepseek-v3.2 is_primary: true
+```
+
+## Switching Back to Default
+
+To revert to random provider selection:
+
+```bash
+sudo systemctl stop genlayer-node
+
+sed -i 's/genvm-llm-greybox\.lua/genvm-llm-default.lua/' \
+ /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-module-llm.yaml
+
+sudo systemctl start genlayer-node
+```
+
+## Updating Greybox on a Running Node (No Full Restart)
+
+If you need to update the Lua script or model config without stopping the whole node,
+you can restart just the LLM module on each GenVM instance:
+
+```bash
+# Find the GenVM manager port (check your config or active ports)
+PORT=3999
+
+# Stop the LLM module
+curl -X POST "http://127.0.0.1:${PORT}/module/stop" \
+ -H 'Content-Type: application/json' \
+ -d '{"module_type": "Llm"}'
+
+# Start the LLM module (reloads Lua script and config)
+curl -X POST "http://127.0.0.1:${PORT}/module/start" \
+ -H 'Content-Type: application/json' \
+ -d '{"module_type": "Llm", "config": null}'
+```
+
+Repeat for each GenVM instance port. There is no atomic restart — each instance
+restarts independently.
+
+## Customizing the Chain Order
+
+The greybox Lua script reads chain membership and priority from `meta.greybox` on each model in the YAML config. Example:
+
+```yaml
+models:
+ deepseek/deepseek-v3.2:
+ supports_json: true
+ meta:
+ greybox: { text: 1 } # text chain, priority 1 (primary)
+ openai/gpt-5.1-mini:
+ supports_json: true
+ supports_image: true
+ meta:
+ greybox: { image: 1 } # image chain, priority 1
+ anthropic/claude-haiku-4.5:
+ supports_json: true
+ supports_image: true
+ meta:
+ greybox: { text: 3, image: 3 } # both chains
+```
+
+**Change model order:** Edit the priority numbers. Lower number = tried first.
+
+**Add a model to the chain:** Add `meta: { greybox: { text: N } }` to any model in any enabled backend.
+
+**Remove a model from the chain:** Remove its `meta.greybox` field.
+
+**Disable an entire provider:** Remove its API key from `.env` — all its models drop out automatically.
+
+The YAML config **must** have `meta.greybox` fields on at least some models. If none are found, the LLM module will fail to start with an error.
+
+After editing the YAML, restart the LLM module (see "Updating Greybox on a Running Node" above) or restart the node.
+
+## Troubleshooting
+
+**"module_failed_to_start" error:**
+- Check that `genvm-llm-greybox.lua` exists in the config directory
+- Check that `OPENROUTERKEY` is set in `.env` and not empty
+- Check that the openrouter backend shows `enabled: true` in the YAML
+
+**No "greybox:" entries in logs:**
+- The greybox Lua only logs when an LLM call happens. Run a transaction that uses an intelligent contract with LLM calls.
+- Verify `lua_script_path` points to `genvm-llm-greybox.lua` (not `default`)
+
+**All models exhausted error:**
+- OpenRouter may be down or your key is invalid
+- Check your key at https://openrouter.ai/settings/keys
+- Fallback providers (heurist, ionet) also need valid keys if you want fallback to work
diff --git a/package.json b/package.json
index ac137922..ea75072a 100644
--- a/package.json
+++ b/package.json
@@ -3,8 +3,8 @@
"version": "0.0.1",
"description": "GenLayer documentation",
"scripts": {
- "dev": "npm run node-generate-changelog && npm run node-update-setup-guide && npm run node-update-config && npm run node-update-docker-compose && npm run node-generate-api-docs && node scripts/generate-full-docs.js && next dev",
- "build": "npm run node-generate-changelog && npm run node-update-setup-guide && npm run node-update-config && npm run node-update-docker-compose && npm run node-generate-api-docs && node scripts/generate-full-docs.js && next build",
+ "dev": "npm run node-generate-changelog && npm run node-update-setup-guide && npm run node-update-config && npm run node-update-docker-compose && npm run node-update-monitoring-docker-compose && npm run node-update-monitoring-alloy-config && npm run node-update-greybox && npm run node-generate-api-docs && node scripts/generate-full-docs.js && next dev",
+ "build": "npm run node-generate-changelog && npm run node-update-setup-guide && npm run node-update-config && npm run node-update-docker-compose && npm run node-update-monitoring-docker-compose && npm run node-update-monitoring-alloy-config && npm run node-update-greybox && npm run node-generate-api-docs && node scripts/generate-full-docs.js && next build",
"start": "next start",
"test:e2e": "playwright test",
"generate-sitemap": "node scripts/generate-sitemap-xml.js",
@@ -12,7 +12,10 @@
"node-generate-api-docs": "node scripts/generate-api-docs.js",
"node-update-setup-guide": "node scripts/update-setup-guide-versions.js",
"node-update-config": "node scripts/update-config-in-setup-guide.js",
- "node-update-docker-compose": "node scripts/update-docker-compose-in-setup-guide.js"
+ "node-update-docker-compose": "node scripts/update-docker-compose-in-setup-guide.js",
+ "node-update-monitoring-docker-compose": "node scripts/update-alloy-in-monitoring.js",
+ "node-update-monitoring-alloy-config": "node scripts/update-alloy-config-in-monitoring.js",
+ "node-update-greybox": "node scripts/update-greybox-in-genvm-config.js"
},
"repository": {
"type": "git",
diff --git a/pages/validators/changelog.mdx b/pages/validators/changelog.mdx
index 895601d0..a20703a2 100644
--- a/pages/validators/changelog.mdx
+++ b/pages/validators/changelog.mdx
@@ -1,5 +1,24 @@
# Changelog
+## v0.5.7
+
+### New features
+
+- Add LLM greyboxing with configurable chain order via YAML meta
+
+### Bug fixes
+
+- Use PendingAt for idle activator rotation
+- Remove unnecessary workaround from view calls
+- Restart sync pipeline after RPC failures
+- Suppress shutdown errors in watchers
+- Reduce FindAcceptanceBlock range to fit RPC limit
+
+### Misc
+
+- Include Alloy healthcheck script in release tarball
+- Increase Alloy push intervals to 60s
+
## v0.5.6
### Bug fixes
diff --git a/pages/validators/genvm-configuration.mdx b/pages/validators/genvm-configuration.mdx
index dc9dcb97..ab810809 100644
--- a/pages/validators/genvm-configuration.mdx
+++ b/pages/validators/genvm-configuration.mdx
@@ -42,20 +42,178 @@ macos (natively), you may want to set `permits` to some large value, as auto-det
## Greyboxing LLMs
-Greyboxing is a way to further customise your LLM setup to improve its performance as well as security.
+Switch your GenLayer node from random LLM provider selection to deterministic ordered fallback via OpenRouter.
-
- Greyboxing is an advanced feature. Familiarity with Lua scripting and LLM
- prompt engineering is recommended for customization.
-
+## What is Greybox?
-`genvm-modules llm` provides user with ability to customize [greyboxing](/_temp/security-and-best-practices/grey-boxing) via lua scripting. Right now users can customize prompt templates, specify temperature and system prompt.
+By default, the node picks a random LLM provider for each call. With **greybox**, the node uses a fixed priority chain configured via `meta.greybox` fields in the YAML config.
-Related scripts are located at:
+**Default text chain:** deepseek-v3.2 → qwen3-235b → claude-haiku-4.5 → kimi-k2 → glm-5 → llama-3.3 (heurist) → llama-3.3 (ionet)
-1. `./scripts/genvm-greyboxing.lua` – user defined script
-2. `./share/lib/genvm/greyboxing/lib-greyboxing.lua` – more low-level library
+**Default image chain:** gpt-5.1-mini → gemini-3-flash → claude-haiku-4.5
-
- More features and built-in filters will be added soon
-
+Chain order is determined by the `meta.greybox` priority numbers on each model in the YAML. Lower number = higher priority. You can change the order by editing these numbers — no Lua changes needed.
+
+OpenRouter is the primary aggregator. If it fails, the node falls back to direct provider APIs (heurist, ionet).
+
+## Prerequisites
+
+- GenLayer node **v0.5.7+** (tarball must include `genvm-modules-llm-release.yaml` and `genvm-llm-greybox.lua`)
+- An **OpenRouter API key** — get one at https://openrouter.ai/keys
+
+## Step-by-Step Setup
+
+### 1. Stop the node
+
+```bash
+sudo systemctl stop genlayer-node
+```
+
+### 2. Add OpenRouter API key to .env
+
+```bash
+# Find your active .env
+ENV_FILE="/opt/genlayer-node/.env"
+
+# Add the key (or edit the file manually)
+echo "OPENROUTERKEY=sk-or-v1-your-key-here" >> ${ENV_FILE}
+```
+
+### 3. Apply the release LLM config
+
+The tarball ships with a unified config that includes all backends (openrouter, morpheus, heurist, ionet, etc.).
+
+```bash
+VERSION=$(readlink /opt/genlayer-node/bin | sed 's|/bin||; s|.*/||')
+
+cp /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-modules-llm-release.yaml \
+ /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-module-llm.yaml
+```
+
+### 4. Switch to greybox strategy
+
+```bash
+sed -i 's/genvm-llm-default\.lua/genvm-llm-greybox.lua/' \
+ /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-module-llm.yaml
+```
+
+### 5. Verify the config
+
+```bash
+# Check lua script path
+grep lua_script_path /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-module-llm.yaml
+# Expected: lua_script_path: ${exeDir}/../config/genvm-llm-greybox.lua
+
+# Check openrouter is present
+grep -A2 'openrouter:' /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-module-llm.yaml
+# Expected: enabled: true
+
+# Check the greybox Lua file exists
+ls -la /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-llm-greybox.lua
+```
+
+### 6. Start the node
+
+```bash
+sudo systemctl start genlayer-node
+```
+
+### 7. Verify greybox is active
+
+Wait for an LLM transaction to be processed, then check the logs:
+
+```bash
+sudo journalctl -u genlayer-node --no-hostname | grep "greybox"
+```
+
+You should see entries like:
+```
+greybox: using text chain count: 5
+greybox: success provider: openrouter model: deepseek/deepseek-v3.2 is_primary: true
+```
+
+## Switching Back to Default
+
+To revert to random provider selection:
+
+```bash
+sudo systemctl stop genlayer-node
+
+sed -i 's/genvm-llm-greybox\.lua/genvm-llm-default.lua/' \
+ /opt/genlayer-node/${VERSION}/third_party/genvm/config/genvm-module-llm.yaml
+
+sudo systemctl start genlayer-node
+```
+
+## Updating Greybox on a Running Node (No Full Restart)
+
+If you need to update the Lua script or model config without stopping the whole node,
+you can restart just the LLM module on each GenVM instance:
+
+```bash
+# Find the GenVM manager port (check your config or active ports)
+PORT=3999
+
+# Stop the LLM module
+curl -X POST "http://127.0.0.1:${PORT}/module/stop" \
+ -H 'Content-Type: application/json' \
+ -d '{"module_type": "Llm"}'
+
+# Start the LLM module (reloads Lua script and config)
+curl -X POST "http://127.0.0.1:${PORT}/module/start" \
+ -H 'Content-Type: application/json' \
+ -d '{"module_type": "Llm", "config": null}'
+```
+
+Repeat for each GenVM instance port. There is no atomic restart — each instance
+restarts independently.
+
+## Customizing the Chain Order
+
+The greybox Lua script reads chain membership and priority from `meta.greybox` on each model in the YAML config. Example:
+
+```yaml
+models:
+ deepseek/deepseek-v3.2:
+ supports_json: true
+ meta:
+ greybox: { text: 1 } # text chain, priority 1 (primary)
+ openai/gpt-5.1-mini:
+ supports_json: true
+ supports_image: true
+ meta:
+ greybox: { image: 1 } # image chain, priority 1
+ anthropic/claude-haiku-4.5:
+ supports_json: true
+ supports_image: true
+ meta:
+ greybox: { text: 3, image: 3 } # both chains
+```
+
+**Change model order:** Edit the priority numbers. Lower number = tried first.
+
+**Add a model to the chain:** Add `meta: { greybox: { text: N } }` to any model in any enabled backend.
+
+**Remove a model from the chain:** Remove its `meta.greybox` field.
+
+**Disable an entire provider:** Remove its API key from `.env` — all its models drop out automatically.
+
+The YAML config **must** have `meta.greybox` fields on at least some models. If none are found, the LLM module will fail to start with an error.
+
+After editing the YAML, restart the LLM module (see "Updating Greybox on a Running Node" above) or restart the node.
+
+## Troubleshooting
+
+**"module_failed_to_start" error:**
+- Check that `genvm-llm-greybox.lua` exists in the config directory
+- Check that `OPENROUTERKEY` is set in `.env` and not empty
+- Check that the openrouter backend shows `enabled: true` in the YAML
+
+**No "greybox:" entries in logs:**
+- The greybox Lua only logs when an LLM call happens. Run a transaction that uses an intelligent contract with LLM calls.
+- Verify `lua_script_path` points to `genvm-llm-greybox.lua` (not `default`)
+
+**All models exhausted error:**
+- OpenRouter may be down or your key is invalid
+- Check your key at https://openrouter.ai/settings/keys
+- Fallback providers (heurist, ionet) also need valid keys if you want fallback to work
diff --git a/pages/validators/monitoring.mdx b/pages/validators/monitoring.mdx
index 2fd2d6af..b06a8a40 100644
--- a/pages/validators/monitoring.mdx
+++ b/pages/validators/monitoring.mdx
@@ -128,69 +128,75 @@ METRICS_SCRAPE_INTERVAL=15s
2. **Add or verify the Alloy service in docker-compose.yaml** (copy if missing):
```yaml
-# Grafana Alloy for both logs and metrics forwarding
-# Supports both single node and multi-node configurations
-#
-# Single Node Mode:
-# Set NODE_ID, VALIDATOR_NAME, NODE_METRICS_ENDPOINT in .env
-# docker compose --profile monitoring up -d
-#
-# Multi-Node Mode:
-# Set SCRAPE_TARGETS_JSON in .env
-# docker compose --profile monitoring up -d
-alloy:
- image: grafana/alloy:v1.12.0
- container_name: genlayer-node-alloy
- command:
- - run
- - /etc/alloy/config.river
- - --server.http.listen-addr=0.0.0.0:12345
- - --storage.path=/var/lib/alloy/data
- volumes:
- - ./alloy-config.river:/etc/alloy/config.river:ro
- - ${NODE_LOGS_PATH:-./data/node/logs}:/var/log/genlayer:ro
- - alloy_data:/var/lib/alloy
- environment:
- # Central monitoring endpoints
- - CENTRAL_LOKI_URL=${CENTRAL_LOKI_URL:-https://logs-prod-042.grafana.net/loki/api/v1/push}
- - CENTRAL_MONITORING_URL=${CENTRAL_MONITORING_URL:-https://prometheus-prod-66-prod-us-east-3.grafana.net/api/prom/push}
-
- # Metrics (Prometheus) authentication
- - CENTRAL_MONITORING_USERNAME=${CENTRAL_MONITORING_USERNAME:-telemetric}
- - CENTRAL_MONITORING_PASSWORD=${CENTRAL_MONITORING_PASSWORD:-12345678}
-
- # Logs (Loki) authentication
- - CENTRAL_LOKI_USERNAME=${CENTRAL_LOKI_USERNAME:-telemetric}
- - CENTRAL_LOKI_PASSWORD=${CENTRAL_LOKI_PASSWORD:-12345678}
-
- # Single node configuration
- - NODE_ID=${NODE_ID:-local}
- - VALIDATOR_NAME=${VALIDATOR_NAME:-default}
- - NETWORK_NAME=${NETWORK_NAME:-asimov-phase5}
- - NODE_METRICS_ENDPOINT=${NODE_METRICS_ENDPOINT:-host.docker.internal:9153}
-
- # Multi-node configuration
- # When set, overrides single node config above
- - SCRAPE_TARGETS_JSON=${SCRAPE_TARGETS_JSON:-}
-
- # Scraping configuration
- - METRICS_SCRAPE_INTERVAL=${METRICS_SCRAPE_INTERVAL:-15s}
- - METRICS_SCRAPE_TIMEOUT=${METRICS_SCRAPE_TIMEOUT:-10s}
- - ALLOY_SELF_MONITORING_INTERVAL=${ALLOY_SELF_MONITORING_INTERVAL:-60s}
-
- # Log collection configuration
- - LOG_FILE_PATTERN=${LOG_FILE_PATTERN:-/var/log/genlayer/node*.log}
-
- # Log batching configuration
- - LOKI_BATCH_SIZE=${LOKI_BATCH_SIZE:-1MiB}
- - LOKI_BATCH_WAIT=${LOKI_BATCH_WAIT:-1s}
- ports:
- - "12345:12345" # Alloy UI for debugging
- restart: unless-stopped
- profiles:
- - monitoring
- extra_hosts:
- - "host.docker.internal:host-gateway"
+ # Grafana Alloy for both logs and metrics forwarding
+ # Supports both single node and multi-node configurations
+ #
+ # Single Node Mode:
+ # Set NODE_ID, VALIDATOR_NAME, NODE_METRICS_ENDPOINT in .env
+ # docker compose --profile monitoring up -d
+ #
+ # Multi-Node Mode:
+ # Set SCRAPE_TARGETS_JSON in .env
+ # docker compose --profile monitoring up -d
+ alloy:
+ image: grafana/alloy:v1.12.0
+ container_name: genlayer-node-alloy
+ command:
+ - run
+ - /etc/alloy/config.river
+ - --server.http.listen-addr=0.0.0.0:12345
+ - --storage.path=/var/lib/alloy/data
+ volumes:
+ - ./alloy-config.river:/etc/alloy/config.river:ro
+ - ./alloy-healthcheck.sh:/etc/alloy/healthcheck.sh:ro
+ - ${NODE_LOGS_PATH:-./data/node/logs}:/var/log/genlayer:ro
+ - alloy_data:/var/lib/alloy
+ healthcheck:
+ test: ["CMD", "sh", "/etc/alloy/healthcheck.sh"]
+ interval: 120s
+ timeout: 15s
+ retries: 1
+ start_period: 120s
+ environment:
+ # Central monitoring endpoints
+ - CENTRAL_LOKI_URL=${CENTRAL_LOKI_URL:-https://logs-prod-042.grafana.net/loki/api/v1/push}
+ - CENTRAL_MONITORING_URL=${CENTRAL_MONITORING_URL:-https://prometheus-prod-66-prod-us-east-3.grafana.net/api/prom/push}
+
+ # Metrics (Prometheus) authentication
+ - CENTRAL_MONITORING_USERNAME=${CENTRAL_MONITORING_USERNAME:-telemetric}
+ - CENTRAL_MONITORING_PASSWORD=${CENTRAL_MONITORING_PASSWORD:-12345678}
+
+ # Logs (Loki) authentication
+ - CENTRAL_LOKI_USERNAME=${CENTRAL_LOKI_USERNAME:-telemetric}
+ - CENTRAL_LOKI_PASSWORD=${CENTRAL_LOKI_PASSWORD:-12345678}
+
+ # Single node configuration
+ - NODE_ID=${NODE_ID:-validator-001}
+ - VALIDATOR_NAME=${VALIDATOR_NAME:-MyValidator}
+ - NODE_METRICS_ENDPOINT=${NODE_METRICS_ENDPOINT:-host.docker.internal:9153}
+
+ # Multi-node configuration
+ # When set, overrides single node config above
+ - SCRAPE_TARGETS_JSON=${SCRAPE_TARGETS_JSON:-}
+
+ # Scraping configuration
+ - METRICS_SCRAPE_INTERVAL=${METRICS_SCRAPE_INTERVAL:-60s}
+ - METRICS_SCRAPE_TIMEOUT=${METRICS_SCRAPE_TIMEOUT:-10s}
+ - ALLOY_SELF_MONITORING_INTERVAL=${ALLOY_SELF_MONITORING_INTERVAL:-60s}
+
+ # Log collection configuration
+ - LOG_FILE_PATTERN=${LOG_FILE_PATTERN:-/var/log/genlayer/node*.log}
+
+ # Log batching configuration
+ - LOKI_BATCH_SIZE=${LOKI_BATCH_SIZE:-1MiB}
+ - LOKI_BATCH_WAIT=${LOKI_BATCH_WAIT:-60s}
+ ports:
+ - "12345:12345" # Alloy UI for debugging
+ restart: unless-stopped
+ profiles:
+ - monitoring
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
volumes:
alloy_data:
@@ -239,12 +245,6 @@ discovery.relabel "add_labels" {
replacement = "alloy"
}
- // Add network label from environment variable
- rule {
- target_label = "network"
- replacement = coalesce(sys.env("NETWORK_NAME"), "asimov-phase5")
- }
-
// Add job label
rule {
target_label = "job"
@@ -272,9 +272,13 @@ loki.write "central" {
password = sys.env("CENTRAL_LOKI_PASSWORD")
}
+ // Enable retry with default exponential backoff
+ // Note: Alloy's loki.write doesn't have a retry block; retries are handled automatically
+ // with exponential backoff by default when the endpoint is unreachable
+
// Configurable batch settings for efficient log sending
- batch_size = coalesce(sys.env("LOKI_BATCH_SIZE"), "1MiB")
- batch_wait = coalesce(sys.env("LOKI_BATCH_WAIT"), "1s")
+ batch_size = coalesce(sys.env("LOKI_BATCH_SIZE"), "1MiB") // Maximum batch size before sending
+ batch_wait = coalesce(sys.env("LOKI_BATCH_WAIT"), "60s") // Maximum wait time before sending partial batch
}
}
@@ -286,21 +290,24 @@ loki.write "central" {
// Supports both single node and multi-node configurations
//
// Single Node Mode:
-// Set NODE_METRICS_ENDPOINT, NODE_ID, VALIDATOR_NAME, NETWORK_NAME
+// Set NODE_METRICS_ENDPOINT, NODE_ID, VALIDATOR_NAME
//
// Multi-Node Mode:
// Set SCRAPE_TARGETS_JSON with JSON array of target objects
-// Example: [{"__address__":"host.docker.internal:9250","instance":"0x...","validator_name":"node-1","network":"testnet-asimov"}]
+// Example: [{"__address__":"host.docker.internal:9250","instance":"0x...","validator_name":"node-1"}]
+//
+// Note: The "network" label is emitted by the node itself (auto-detected from consensus address),
+// so it does not need to be configured here.
prometheus.scrape "genlayer_node" {
// Dynamic targets based on environment variable
// If SCRAPE_TARGETS_JSON is set, use it (multi-node mode)
// Otherwise, build single target from individual env vars (single node mode)
- targets = encoding.from_json(coalesce(sys.env("SCRAPE_TARGETS_JSON"), string.format("[{\"__address__\":\"%s\",\"instance\":\"%s\",\"validator_name\":\"%s\",\"network\":\"%s\"}]", coalesce(sys.env("NODE_METRICS_ENDPOINT"), "host.docker.internal:9153"), coalesce(sys.env("NODE_ID"), "local"), coalesce(sys.env("VALIDATOR_NAME"), "default"), coalesce(sys.env("NETWORK_NAME"), "asimov-phase5"))))
+ targets = encoding.from_json(coalesce(sys.env("SCRAPE_TARGETS_JSON"), string.format("[{\"__address__\":\"%s\",\"instance\":\"%s\",\"validator_name\":\"%s\"}]", coalesce(sys.env("NODE_METRICS_ENDPOINT"), "host.docker.internal:9153"), coalesce(sys.env("NODE_ID"), "local"), coalesce(sys.env("VALIDATOR_NAME"), "default"))))
forward_to = [prometheus.relabel.metrics.receiver]
// Configurable scrape intervals
- scrape_interval = coalesce(sys.env("METRICS_SCRAPE_INTERVAL"), "15s")
+ scrape_interval = coalesce(sys.env("METRICS_SCRAPE_INTERVAL"), "60s")
scrape_timeout = coalesce(sys.env("METRICS_SCRAPE_TIMEOUT"), "10s")
}
@@ -338,7 +345,7 @@ prometheus.remote_write "central" {
capacity = 10000
max_shards = 5
max_samples_per_send = 500
- batch_send_deadline = "15s"
+ batch_send_deadline = coalesce(sys.env("METRICS_BATCH_SEND_DEADLINE"), "60s")
}
}
}
diff --git a/pages/validators/setup-guide.mdx b/pages/validators/setup-guide.mdx
index 46ee2c59..8a527548 100644
--- a/pages/validators/setup-guide.mdx
+++ b/pages/validators/setup-guide.mdx
@@ -187,6 +187,8 @@ Possible options:
- Run an open-source model on a different machine
- Connect to a hosted inference provider (OpenAI, Anthropic, Heurist, Atoma network etc.)
+See the [GenVM Configuration](/validators/genvm-configuration) page for partner credits, provider setup, and the greybox fallback strategy.
+
#### ZKSync Full Node for the GenLayer Chain
Each validator needs access to a [ZKSync Full Node](https://docs.zksync.io/zksync-era/tooling/external-node) connected to the GenLayer chain. Validators use this node to read the chain state and submit transactions.
@@ -208,18 +210,18 @@ One full node can be shared between multiple validators. The optimal validator-t
You should see a list like this
```sh
+ v0.5.7
v0.5.6
v0.5.5
v0.5.4
v0.5.3
- v0.5.2
```
Typically, you will want to run the latest version
2. Download the packaged application
```sh copy
- export version=v0.5.6 # set your desired version here
+ export version=v0.5.7 # set your desired version here
wget https://storage.googleapis.com/gh-af/genlayer-node/bin/amd64/${version}/genlayer-node-linux-amd64-${version}.tar.gz
```
3. Extract the node software
diff --git a/scripts/update-alloy-config-in-monitoring.js b/scripts/update-alloy-config-in-monitoring.js
new file mode 100644
index 00000000..5cd92433
--- /dev/null
+++ b/scripts/update-alloy-config-in-monitoring.js
@@ -0,0 +1,50 @@
+#!/usr/bin/env node
+
+const fs = require('fs');
+const path = require('path');
+
+/**
+ * Update the monitoring page with the latest alloy-config.river from the synced content.
+ * Replaces the river code block in monitoring.mdx with the current alloy-config.river file.
+ */
+function updateAlloyConfigInMonitoring() {
+ const projectRoot = path.join(__dirname, '..');
+ const alloyConfigPath = path.join(projectRoot, 'content/validators/alloy-config.river');
+ const monitoringPath = path.join(projectRoot, 'pages/validators/monitoring.mdx');
+
+ if (!fs.existsSync(alloyConfigPath)) {
+ console.error(`Alloy config file ${alloyConfigPath} does not exist`);
+ return;
+ }
+
+ if (!fs.existsSync(monitoringPath)) {
+ console.error(`Monitoring file ${monitoringPath} does not exist`);
+ return;
+ }
+
+ const alloyConfigContent = fs.readFileSync(alloyConfigPath, 'utf8').trimEnd();
+ let monitoringContent = fs.readFileSync(monitoringPath, 'utf8');
+
+ // Pattern to match the alloy-config.river code block in monitoring.mdx
+ // It's the river block right after "Create or update ./alloy-config.river"
+ const riverPattern = /(Create or update \.\/alloy-config\.river[^`]*```river\n)([\s\S]*?)(\n```)/;
+
+ if (riverPattern.test(monitoringContent)) {
+ monitoringContent = monitoringContent.replace(
+ riverPattern,
+ `$1${alloyConfigContent}$3`
+ );
+
+ fs.writeFileSync(monitoringPath, monitoringContent);
+ console.log(`Updated monitoring alloy-config.river block at ${new Date().toISOString()}`);
+ } else {
+ console.error('Could not find alloy-config.river code block pattern in monitoring.mdx');
+ }
+}
+
+// Run the script
+if (require.main === module) {
+ updateAlloyConfigInMonitoring();
+}
+
+module.exports = { updateAlloyConfigInMonitoring };
diff --git a/scripts/update-alloy-in-monitoring.js b/scripts/update-alloy-in-monitoring.js
new file mode 100644
index 00000000..33f999ee
--- /dev/null
+++ b/scripts/update-alloy-in-monitoring.js
@@ -0,0 +1,147 @@
+#!/usr/bin/env node
+
+const fs = require('fs');
+const path = require('path');
+
+/**
+ * Update the monitoring page with the latest alloy service from the full docker-compose.
+ * Extracts the alloy service definition + volumes section from the full (unsanitized)
+ * docker-compose-monitoring.yaml and injects it into monitoring.mdx.
+ */
+function updateAlloyInMonitoring() {
+ const projectRoot = path.join(__dirname, '..');
+ const dockerComposePath = path.join(projectRoot, 'content/validators/docker-compose-monitoring.yaml');
+ const monitoringPath = path.join(projectRoot, 'pages/validators/monitoring.mdx');
+
+ if (!fs.existsSync(dockerComposePath)) {
+ console.error(`Full docker-compose file ${dockerComposePath} does not exist`);
+ return;
+ }
+
+ if (!fs.existsSync(monitoringPath)) {
+ console.error(`Monitoring file ${monitoringPath} does not exist`);
+ return;
+ }
+
+ const dockerComposeContent = fs.readFileSync(dockerComposePath, 'utf8');
+ let monitoringContent = fs.readFileSync(monitoringPath, 'utf8');
+
+ // Extract the alloy service block and volumes from the full docker-compose
+ const alloyBlock = extractAlloyBlock(dockerComposeContent);
+
+ if (!alloyBlock) {
+ console.error('Could not extract alloy service block from docker-compose');
+ return;
+ }
+
+ // Pattern to match the alloy docker-compose YAML block in monitoring.mdx
+ // It's the yaml block right after "Add or verify the Alloy service in docker-compose.yaml"
+ const alloyPattern = /(Add or verify the Alloy service in docker-compose\.yaml[^`]*```yaml\n)([\s\S]*?)(\n```)/;
+
+ if (alloyPattern.test(monitoringContent)) {
+ monitoringContent = monitoringContent.replace(
+ alloyPattern,
+ `$1${alloyBlock}$3`
+ );
+
+ fs.writeFileSync(monitoringPath, monitoringContent);
+ console.log(`Updated monitoring alloy service block at ${new Date().toISOString()}`);
+ } else {
+ console.error('Could not find alloy service YAML block pattern in monitoring.mdx');
+ }
+}
+
+/**
+ * Extract the alloy service definition and volumes section from docker-compose content.
+ * Uses regex-based approach to find the alloy service and volumes blocks.
+ */
+function extractAlloyBlock(content) {
+ // Find the alloy service block: starts with comments before " alloy:" and ends
+ // before the next root-level key or end of services section
+ const alloyMatch = content.match(/([ \t]*# .*[Aa]lloy[\s\S]*?)(^ alloy:[\s\S]*?)(?=^ \w|\nvolumes:|\n\S|$)/m);
+
+ if (!alloyMatch) {
+ // Try without leading comments
+ const simpleMatch = content.match(/(^ alloy:[\s\S]*?)(?=^ \w|\nvolumes:|\n\S|$)/m);
+ if (!simpleMatch) {
+ return null;
+ }
+ }
+
+ // Strategy: find start of alloy comments/service, then find volumes section
+ const lines = content.split('\n');
+ let startIdx = -1;
+ let endIdx = lines.length;
+
+ // Find the first comment line that mentions Alloy before the alloy: service
+ for (let i = 0; i < lines.length; i++) {
+ if (/^\s+alloy:/.test(lines[i])) {
+ // Found alloy service, now look backwards for leading comments
+ startIdx = i;
+ for (let j = i - 1; j >= 0; j--) {
+ if (/^\s*#/.test(lines[j]) || /^\s*$/.test(lines[j])) {
+ // Only include if it's actually related (check if first non-empty going back mentions alloy)
+ startIdx = j;
+ } else {
+ break;
+ }
+ }
+ // Skip leading empty lines
+ while (startIdx < i && lines[startIdx].trim() === '') {
+ startIdx++;
+ }
+ break;
+ }
+ }
+
+ if (startIdx === -1) {
+ return null;
+ }
+
+ // Find the alloy service end: next service at same indent level (2 spaces + word char)
+ // or volumes: at root level or end of file
+ const alloyServiceIdx = lines.findIndex((l, idx) => idx >= startIdx && /^\s+alloy:/.test(l));
+ for (let i = alloyServiceIdx + 1; i < lines.length; i++) {
+ const line = lines[i];
+ // Root-level key (like volumes:)
+ if (/^\S/.test(line) && line.trim() !== '') {
+ // Include volumes section if it exists
+ if (/^volumes:/.test(line)) {
+ // Find end of volumes section
+ for (let k = i + 1; k < lines.length; k++) {
+ if (/^\S/.test(lines[k]) && lines[k].trim() !== '' && !/^volumes:/.test(lines[k])) {
+ endIdx = k;
+ break;
+ }
+ }
+ if (endIdx === lines.length) {
+ endIdx = lines.length;
+ }
+ } else {
+ endIdx = i;
+ }
+ break;
+ }
+ // Another service at 2-space indent (but not deeper indentation of alloy's config)
+ if (/^ \S/.test(line) && !/^ alloy/.test(line) && !/^ #/.test(line)) {
+ endIdx = i;
+ break;
+ }
+ }
+
+ const result = lines.slice(startIdx, endIdx);
+
+ // Clean up trailing empty lines
+ while (result.length > 0 && result[result.length - 1].trim() === '') {
+ result.pop();
+ }
+
+ return result.length > 0 ? result.join('\n') : null;
+}
+
+// Run the script
+if (require.main === module) {
+ updateAlloyInMonitoring();
+}
+
+module.exports = { updateAlloyInMonitoring };
diff --git a/scripts/update-greybox-in-genvm-config.js b/scripts/update-greybox-in-genvm-config.js
new file mode 100644
index 00000000..e1b5515e
--- /dev/null
+++ b/scripts/update-greybox-in-genvm-config.js
@@ -0,0 +1,55 @@
+#!/usr/bin/env node
+
+const fs = require('fs');
+const path = require('path');
+
+/**
+ * Update the genvm-configuration page with the latest greybox setup guide.
+ * Replaces everything from "## Greyboxing LLMs" to EOF in genvm-configuration.mdx
+ * with the content of content/validators/greybox-setup-guide.md (stripping the H1 title).
+ */
+function updateGreyboxInGenvmConfig() {
+ const projectRoot = path.join(__dirname, '..');
+ const greyboxPath = path.join(projectRoot, 'content/validators/greybox-setup-guide.md');
+ const genvmConfigPath = path.join(projectRoot, 'pages/validators/genvm-configuration.mdx');
+
+ if (!fs.existsSync(greyboxPath)) {
+ console.error(`Greybox setup guide ${greyboxPath} does not exist`);
+ return;
+ }
+
+ if (!fs.existsSync(genvmConfigPath)) {
+ console.error(`GenVM configuration file ${genvmConfigPath} does not exist`);
+ return;
+ }
+
+ let greyboxContent = fs.readFileSync(greyboxPath, 'utf8').trimEnd();
+ let genvmContent = fs.readFileSync(genvmConfigPath, 'utf8');
+
+ // Strip the H1 title line and any immediately following blank lines from the greybox content
+ // The H1 is not needed since the MDX already has its own section heading
+ greyboxContent = greyboxContent.replace(/^# [^\n]*\n\n?/, '');
+
+ // Replace everything from "## Greyboxing LLMs" (or similar H2 heading about greybox)
+ // to the end of the file
+ const greyboxSectionPattern = /## Greybox(?:ing)? LLMs[\s\S]*$/;
+
+ if (greyboxSectionPattern.test(genvmContent)) {
+ genvmContent = genvmContent.replace(
+ greyboxSectionPattern,
+ `## Greyboxing LLMs\n\n${greyboxContent}\n`
+ );
+
+ fs.writeFileSync(genvmConfigPath, genvmContent);
+ console.log(`Updated genvm-configuration greybox section at ${new Date().toISOString()}`);
+ } else {
+ console.error('Could not find Greyboxing LLMs section in genvm-configuration.mdx');
+ }
+}
+
+// Run the script
+if (require.main === module) {
+ updateGreyboxInGenvmConfig();
+}
+
+module.exports = { updateGreyboxInGenvmConfig };