diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..6001fc1
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,21 @@
+# Copy this file to .env and fill in the values. Do NOT commit your real secrets.
+
+# AI Provider API Keys (set at least one)
+OPENAI_API_KEY=
+AZURE_OPENAI_ENDPOINT=
+AZURE_OPENAI_KEY=
+AZURE_OPENAI_DEPLOYMENT=gpt-4o
+ANTHROPIC_API_KEY=
+GOOGLE_GEMINI_KEY=
+CUSTOM_AI_ENDPOINT=http://localhost:11434/v1
+CUSTOM_AI_KEY=
+
+# AI Gateway Configuration
+AI_GATEWAY_PORT=3002
+AI_DEFAULT_PROVIDER=openai
+AI_RATE_LIMIT_PER_MINUTE=60
+AI_CACHE_TTL_SECONDS=300
+AI_ALLOW_ANONYMOUS=true
+AI_ALLOW_UNREGISTERED=true
+AI_API_KEY=your-api-key-for-testing
+ADMIN_KEY=your-admin-key
diff --git a/.github/cspell/cspell.json b/.github/cspell/cspell.json
new file mode 100644
index 0000000..27402b9
--- /dev/null
+++ b/.github/cspell/cspell.json
@@ -0,0 +1,5 @@
+{
+ "version": "0.1",
+ "language": "en",
+ "words": ["sterilization","datacentra","NetworkBuster","regolith","PAPR","UV-C"]
+}
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..541298e
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,23 @@
+name: CI
+
+on:
+ push:
+ branches: [ main ]
+ pull_request:
+ branches: [ main ]
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Use Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '24'
+ - name: Install dependencies
+ run: npm ci
+ - name: Run tests
+ run: npm test
+ - name: Run npm audit
+ run: npm audit --audit-level=moderate || true
diff --git a/.github/workflows/integration-device-registration.yml b/.github/workflows/integration-device-registration.yml
new file mode 100644
index 0000000..639d90b
--- /dev/null
+++ b/.github/workflows/integration-device-registration.yml
@@ -0,0 +1,54 @@
+name: CI - Device Registration Integration Tests
+
+on:
+ push:
+ branches: [ main, master ]
+ pull_request:
+ branches: [ main, master ]
+ workflow_dispatch: {}
+
+jobs:
+ integration-tests:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ node-version: [24.x]
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Use Node.js ${{ matrix.node-version }}
+ uses: actions/setup-node@v4
+ with:
+ node-version: ${{ matrix.node-version }}
+ cache: 'npm'
+
+ - name: Install dependencies
+ run: npm ci
+
+ - name: Start server (background)
+ run: |
+ nohup node server.js > server.log 2>&1 &
+ sleep 1
+
+ - name: Wait for server
+ run: |
+ for i in {1..30}; do
+ if curl -sSf http://localhost:3001/api/health >/dev/null; then
+ echo "server ready"; exit 0
+ fi
+ sleep 1
+ done
+ echo "Server not ready" && cat server.log && exit 1
+
+ - name: Run E2E integration test
+ env:
+ BASE: http://localhost:3001
+ run: npm run test:integration:devices
+
+ - name: Upload server logs on failure
+ if: failure()
+ uses: actions/upload-artifact@v4
+ with:
+ name: server-log
+ path: server.log
diff --git a/.github/workflows/lfs-build.yml b/.github/workflows/lfs-build.yml
new file mode 100644
index 0000000..bf16dc4
--- /dev/null
+++ b/.github/workflows/lfs-build.yml
@@ -0,0 +1,88 @@
+name: Build LFS rootfs (PoC)
+
+on:
+ push:
+ paths:
+ - 'os/lfs/**'
+ workflow_dispatch:
+ inputs:
+ build_kernel:
+ description: 'Build kernel during job? ("true" or "false")'
+ required: false
+ default: 'false'
+ kernel_version:
+ description: 'Kernel version to build (e.g., 6.8.13)'
+ required: false
+ default: '6.8.13'
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Set kernel build flags
+ run: |
+ echo "event_name=${{ github.event_name }}"
+ # If manually dispatched and build_kernel==true, enable kernel build
+ if [ "${{ github.event_name }}" = "workflow_dispatch" ] && [ "${{ github.event.inputs.build_kernel }}" = "true" ]; then
+ echo "SKIP_KERNEL=false" >> $GITHUB_ENV
+ echo "KERNEL_VERSION=${{ github.event.inputs.kernel_version }}" >> $GITHUB_ENV
+ echo "Kernel build enabled: $KERNEL_VERSION"
+ else
+ echo "SKIP_KERNEL=true" >> $GITHUB_ENV
+ echo "KERNEL_VERSION=${{ github.event.inputs.kernel_version }}" >> $GITHUB_ENV
+ echo "Kernel build disabled (default)"
+ fi
+
+ - name: Restore kernel cache
+ uses: actions/cache@v4
+ with:
+ path: .cache/linux-${{ env.KERNEL_VERSION }}
+ key: linux-kernel-${{ env.KERNEL_VERSION }}-${{ runner.os }}-v1
+
+ - name: Build container image
+ run: |
+ docker build -t lfs-build -f os/lfs/Dockerfile .
+
+ - name: Run build in container
+ env:
+ SKIP_KERNEL: ${{ env.SKIP_KERNEL }}
+ KERNEL_VERSION: ${{ env.KERNEL_VERSION }}
+ run: |
+ mkdir -p os/lfs/output
+ mkdir -p .cache/linux-${{ env.KERNEL_VERSION }}
+ docker run --rm -e SKIP_KERNEL="$SKIP_KERNEL" -e KERNEL_VERSION="$KERNEL_VERSION" -e KERNEL_CACHE_DIR="/workspace/kernel-cache" -v "$PWD/.cache/linux-${{ env.KERNEL_VERSION }}:/workspace/kernel-cache" -v "$PWD/os/lfs/output:/workspace/output" lfs-build || true
+
+ - name: Upload rootfs artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: lfs-rootfs
+ path: os/lfs/output/rootfs.tar.gz
+
+ - name: Attempt QEMU smoke boot (best-effort)
+ if: always()
+ run: |
+ # install QEMU on runner
+ sudo apt-get update && sudo apt-get install -y qemu-system-x86
+
+ # choose kernel: prefer built artifact
+ if [ -f os/lfs/output/vmlinuz-${{ env.KERNEL_VERSION }} ]; then
+ KERNEL=$(pwd)/os/lfs/output/vmlinuz-${{ env.KERNEL_VERSION }}
+ echo "Using built kernel: $KERNEL"
+ elif ls /boot/vmlinuz-* 1>/dev/null 2>&1 && [ -f os/lfs/output/rootfs.cpio.gz ]; then
+ KERNEL=$(ls -1 /boot/vmlinuz-* | tail -n1)
+ echo "Using host kernel: $KERNEL"
+ else
+ KERNEL=""
+ fi
+
+ if [ -n "$KERNEL" ] && [ -f os/lfs/output/rootfs.cpio.gz ]; then
+ timeout 30s qemu-system-x86_64 -kernel "$KERNEL" -initrd os/lfs/output/rootfs.cpio.gz -nographic -append "console=ttyS0 root=/dev/ram0 rw init=/init" -m 512 -no-reboot || true
+ else
+ echo "No kernel + initramfs available on runner โ skipping QEMU boot test"
+ fi
+
+ - name: List artifacts
+ run: ls -lh os/lfs/output || true
diff --git a/.github/workflows/lfs-cache-validate.yml b/.github/workflows/lfs-cache-validate.yml
new file mode 100644
index 0000000..dde9b37
--- /dev/null
+++ b/.github/workflows/lfs-cache-validate.yml
@@ -0,0 +1,63 @@
+name: Validate LFS kernel cache
+
+on:
+ workflow_dispatch:
+ inputs:
+ kernel_version:
+ description: 'Kernel version to validate (e.g., 6.8.13)'
+ required: false
+ default: '6.8.13'
+
+jobs:
+ build-and-cache:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Restore cache (initial)
+ uses: actions/cache@v4
+ with:
+ path: .cache/linux-${{ github.event.inputs.kernel_version }}
+ key: linux-kernel-${{ github.event.inputs.kernel_version }}-${{ runner.os }}-v1
+
+ - name: Build container image
+ run: docker build -t lfs-build -f os/lfs/Dockerfile .
+
+ - name: Run build to populate cache
+ run: |
+ mkdir -p .cache/linux-${{ github.event.inputs.kernel_version }}
+ docker run --rm -e SKIP_KERNEL=false -e KERNEL_VERSION="${{ github.event.inputs.kernel_version }}" -e KERNEL_CACHE_DIR=/workspace/kernel-cache -v "$PWD/.cache/linux-${{ github.event.inputs.kernel_version }}:/workspace/kernel-cache" -v "$PWD/os/lfs/output:/workspace/output" lfs-build || true
+
+ verify-cache:
+ runs-on: ubuntu-latest
+ needs: build-and-cache
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Restore cache (verify)
+ uses: actions/cache@v4
+ with:
+ path: .cache/linux-${{ github.event.inputs.kernel_version }}
+ key: linux-kernel-${{ github.event.inputs.kernel_version }}-${{ runner.os }}-v1
+
+ - name: Check cached vmlinuz exists
+ run: |
+ if [ -f .cache/linux-${{ github.event.inputs.kernel_version }}/vmlinuz-${{ github.event.inputs.kernel_version }} ]; then
+ echo "Cached kernel found"
+ else
+ echo "Cached kernel missing" >&2
+ exit 1
+ fi
+
+ - name: Run build and check logs for cache usage
+ run: |
+ mkdir -p os/lfs/output
+ docker run --rm -e SKIP_KERNEL=false -e KERNEL_VERSION="${{ github.event.inputs.kernel_version }}" -e KERNEL_CACHE_DIR=/workspace/kernel-cache -v "$PWD/.cache/linux-${{ github.event.inputs.kernel_version }}:/workspace/kernel-cache" -v "$PWD/os/lfs/output:/workspace/output" lfs-build | tee build.log || true
+ if grep -q "Using cached kernel tarball" build.log || grep -q "Using cached built kernel" build.log; then
+ echo "Cache used during build"
+ else
+ echo "Cache not used (check output)" >&2
+ exit 1
+ fi
\ No newline at end of file
diff --git a/.github/workflows/network-boost-ci.yml b/.github/workflows/network-boost-ci.yml
new file mode 100644
index 0000000..b8cb8e3
--- /dev/null
+++ b/.github/workflows/network-boost-ci.yml
@@ -0,0 +1,52 @@
+name: Network Boost CI
+
+on:
+ pull_request:
+ paths:
+ - 'contrib/Cleanskiier27-final/**'
+ - 'scripts/network-boost.*'
+
+jobs:
+ lint-and-dryrun-linux:
+ name: Lint (shellcheck) & Dry-run (Linux)
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install ShellCheck
+ run: sudo apt-get update && sudo apt-get install -y shellcheck
+ - name: Run ShellCheck on Linux script
+ run: |
+ shellcheck contrib/Cleanskiier27-final/scripts/network-boost.sh || true
+ - name: Run Linux dry-run
+ run: |
+ bash contrib/Cleanskiier27-final/scripts/network-boost.sh || true
+
+ lint-and-dryrun-windows:
+ name: Lint (PSScriptAnalyzer) & Dry-run (Windows)
+ runs-on: windows-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install PSScriptAnalyzer
+ shell: pwsh
+ run: |
+ Install-Module -Name PSScriptAnalyzer -Force -Scope CurrentUser -Confirm:$false
+ - name: Run PSScriptAnalyzer
+ shell: pwsh
+ run: |
+ Invoke-ScriptAnalyzer -Path .\contrib\Cleanskiier27-final\scripts\network-boost.ps1 -Recurse -Severity Error || true
+ - name: Windows dry-run
+ shell: pwsh
+ run: |
+ powershell -NoProfile -ExecutionPolicy Bypass -File .\contrib\Cleanskiier27-final\scripts\network-boost.ps1
+
+ optional-checks:
+ name: Optional checks (formatters/linter)
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Run shellcheck on all shell scripts (contrib)
+ run: |
+ for f in $(git ls-files 'contrib/**.sh'); do shellcheck "$f" || true; done
+ - name: Display generated files (for review)
+ run: |
+ ls -R contrib/Cleanskiier27-final || true
diff --git a/.github/workflows/recycle-ai-demo.yml b/.github/workflows/recycle-ai-demo.yml
new file mode 100644
index 0000000..0a6eeea
--- /dev/null
+++ b/.github/workflows/recycle-ai-demo.yml
@@ -0,0 +1,23 @@
+name: Recycle AI demo
+
+on:
+ workflow_dispatch:
+
+jobs:
+ demo:
+ runs-on: ubuntu-latest
+ env:
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ steps:
+ - uses: actions/checkout@v4
+ - name: Setup Node
+ uses: actions/setup-node@v4
+ with:
+ node-version: 24
+ - name: Install dependencies
+ run: npm ci
+ - name: Run simple recycle API test
+ run: |
+ node server.js &
+ sleep 2
+ curl -sS -X POST http://localhost:3001/api/recycle/recommend -H 'Content-Type: application/json' -d '{"items":[{"name":"plastic bottle"}],"location":"94107"}' | jq '.' || true
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000..84dc9d3
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,81 @@
+name: Build and Release
+
+on:
+ push:
+ tags:
+ - 'v*.*.*'
+ workflow_dispatch: {}
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Use Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '24'
+ - name: Install dependencies
+ run: npm ci
+ - name: Run dist script
+ run: npm run dist:zip
+ - name: Upload artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: dist-zip
+ path: dist/*.zip
+
+ build-windows-installer:
+ runs-on: windows-latest
+ needs: build
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '24'
+ - name: Install dependencies
+ run: npm ci
+ - name: Install NSIS & ImageMagick
+ run: |
+ choco install nsis -y
+ choco install imagemagick -y
+ - name: Convert icon (ImageMagick) and build
+ run: |
+ powershell -ExecutionPolicy Bypass -File scripts/installer/convert-icon.ps1 || echo "convert skipped"
+ npm run dist:nsis
+ - name: Upload installer
+ uses: actions/upload-artifact@v4
+ with:
+ name: dist-installer
+ path: dist/*Setup.exe
+
+ release:
+ needs: [build, build-windows-installer]
+ runs-on: ubuntu-latest
+ if: startsWith(github.ref, 'refs/tags/v')
+ steps:
+ - uses: actions/checkout@v4
+ - name: Create GitHub Release
+ id: create_release
+ uses: actions/create-release@v1
+ with:
+ tag_name: ${{ github.ref_name }}
+ release_name: Release ${{ github.ref_name }}
+ draft: false
+ prerelease: false
+ body: "Automated release created by workflow"
+ - name: Upload release zip asset
+ uses: actions/upload-release-asset@v2
+ with:
+ upload_url: ${{ steps.create_release.outputs.upload_url }}
+ asset_path: dist/*.zip
+ asset_name: ${{ github.repository }}-${{ github.ref_name }}.zip
+ asset_content_type: application/zip
+ - name: Upload release installer asset
+ uses: actions/upload-release-asset@v2
+ with:
+ upload_url: ${{ steps.create_release.outputs.upload_url }}
+ asset_path: dist/*Setup.exe
+ asset_name: ${{ github.repository }}-${{ github.ref_name }}-installer.exe
+ asset_content_type: application/octet-stream
\ No newline at end of file
diff --git a/.github/workflows/render-diagrams.yml b/.github/workflows/render-diagrams.yml
new file mode 100644
index 0000000..1e6648d
--- /dev/null
+++ b/.github/workflows/render-diagrams.yml
@@ -0,0 +1,41 @@
+name: Render diagrams to PNG ๐ผ๏ธ
+
+on:
+ workflow_dispatch: {}
+ push:
+ branches:
+ - bigtree
+
+jobs:
+ render:
+ name: Render Mermaid diagrams
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '24'
+
+ - name: Install dependencies
+ run: npm ci
+
+ - name: Install Puppeteer for rendering
+ run: npm install puppeteer --no-save
+
+ - name: Render Mermaid to SVG
+ run: npx -y @mermaid-js/mermaid-cli -i "docs/diagrams/*.mmd" -o docs/diagrams -f svg
+
+ - name: Render SVGs to PNG (hi-res)
+ run: node scripts/render-svgs.js 4
+
+ - name: List generated PNGs
+ run: ls -la docs/diagrams/*.png || true
+
+ - name: Upload PNG artifacts
+ uses: actions/upload-artifact@v4
+ with:
+ name: diagrams-png
+ path: docs/diagrams/*.png
diff --git a/.github/workflows/smoke-e2e-openai.yml b/.github/workflows/smoke-e2e-openai.yml
new file mode 100644
index 0000000..eff4e6d
--- /dev/null
+++ b/.github/workflows/smoke-e2e-openai.yml
@@ -0,0 +1,67 @@
+name: Smoke test โ OpenAI end-to-end โ
+
+on:
+ workflow_dispatch: {}
+ pull_request:
+ types: [opened, synchronize, reopened]
+
+permissions:
+ actions: read
+
+jobs:
+ smoke-e2e:
+ name: E2E smoke test (start server + call /api/recycle/recommend)
+ runs-on: ubuntu-latest
+ env:
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '24'
+
+ - name: Install dependencies
+ run: npm ci
+
+ - name: Start server in background
+ run: |
+ nohup npm start > server.log 2>&1 &
+ echo $! > server.pid
+
+ - name: Wait for server health
+ run: |
+ for i in {1..30}; do
+ STATUS=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3001/api/health || true)
+ echo "Attempt $i: health=$STATUS"
+ if [ "$STATUS" = "200" ]; then
+ curl -s http://localhost:3001/api/health | jq -r '.status, .uptime'
+ break
+ fi
+ sleep 2
+ done
+
+ - name: Perform recycle recommend request
+ run: |
+ set -o pipefail
+ echo '{"items":["plastic bottle"], "location":"test"}' > /tmp/payload.json
+ HTTP_CODE=$(curl -s -w "%{http_code}" -o /tmp/rec.json -X POST -H "Content-Type: application/json" -d @/tmp/payload.json http://localhost:3001/api/recycle/recommend)
+ echo "HTTP_CODE=$HTTP_CODE"
+ cat /tmp/rec.json
+ if [ "$HTTP_CODE" != "200" ]; then
+ echo "recommend endpoint failed: $HTTP_CODE" >&2
+ exit 1
+ fi
+ OK=$(jq -r '.ok' /tmp/rec.json)
+ if [ "$OK" != "true" ]; then
+ echo "recommend returned ok!=true" >&2
+ exit 1
+ fi
+
+ - name: Cleanup server
+ if: always()
+ run: |
+ if [ -f server.pid ]; then kill $(cat server.pid) || true; fi
+ pkill -f "node server.js" || true
diff --git a/.github/workflows/sterilization-docs.yml b/.github/workflows/sterilization-docs.yml
new file mode 100644
index 0000000..5000b1e
--- /dev/null
+++ b/.github/workflows/sterilization-docs.yml
@@ -0,0 +1,30 @@
+name: Lint Sterilization Docs
+
+on:
+ pull_request:
+ paths:
+ - 'docs/**'
+ - 'templates/**'
+ - 'MATERIALS.md'
+
+jobs:
+ lint_docs:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Run Super-Linter (markdown)
+ uses: github/super-linter@v5
+ env:
+ VALIDATE_MARKDOWN: true
+ DEFAULT_BRANCH: main
+
+ spellcheck:
+ runs-on: ubuntu-latest
+ needs: lint_docs
+ steps:
+ - uses: actions/checkout@v4
+ - name: Run cspell
+ uses: check-spelling/action@v0.0.25
+ with:
+ config: .github/cspell/cspell.json
+ continue-on-error: true
diff --git a/.github/workflows/test-ai-robot.yml b/.github/workflows/test-ai-robot.yml
new file mode 100644
index 0000000..dda844d
--- /dev/null
+++ b/.github/workflows/test-ai-robot.yml
@@ -0,0 +1,44 @@
+name: Test AI Robot (mock)
+
+on:
+ pull_request:
+ branches:
+ - '**'
+
+jobs:
+ test:
+ name: AI Robot tests (mock) on ${{ matrix.os }}
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ubuntu-latest, windows-latest]
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Setup
+ if: matrix.os == 'ubuntu-latest'
+ run: |
+ chmod +x ./scripts/test-ai-robot.sh
+
+ - name: Run AI Robot tests (Linux)
+ if: matrix.os == 'ubuntu-latest'
+ run: |
+ ./scripts/test-ai-robot.sh --mock --concurrency 3
+
+ - name: Run AI Robot tests (Windows PowerShell)
+ if: matrix.os == 'windows-latest'
+ shell: pwsh
+ run: |
+ Set-ExecutionPolicy -ExecutionPolicy Bypass -Scope Process
+ .\scripts\test-ai-robot.ps1 -Mock -Concurrency 3
+
+ - name: Upload test logs
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: ai-robot-test-logs-${{ github.run_id }}
+ path: |
+ ./scripts/test-ai-robot.sh || true
+ ./scripts/test-ai-robot.ps1 || true
diff --git a/.github/workflows/test-openai-secret.yml b/.github/workflows/test-openai-secret.yml
new file mode 100644
index 0000000..3419c8e
--- /dev/null
+++ b/.github/workflows/test-openai-secret.yml
@@ -0,0 +1,38 @@
+name: Test OpenAI secret โ
+
+on:
+ workflow_dispatch: {}
+ pull_request:
+ types: [opened, synchronize, reopened]
+
+permissions:
+ actions: read
+
+jobs:
+ check-openai-key:
+ name: Check OPENAI_API_KEY
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Ensure OPENAI_API_KEY secret exists
+ run: |
+ if [ -z "${OPENAI_API_KEY}" ]; then
+ echo "ERROR: OPENAI_API_KEY is not set in repository secrets" >&2
+ exit 1
+ fi
+ echo "OPENAI_API_KEY appears set (will not print the value)"
+
+ - name: Validate OpenAI API key by calling Models endpoint
+ env:
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ run: |
+ set -o pipefail
+ STATUS=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $OPENAI_API_KEY" https://api.openai.com/v1/models || true)
+ echo "HTTP status: $STATUS"
+ if [ "$STATUS" != "200" ]; then
+ echo "OpenAI API request failed with status $STATUS" >&2
+ exit 1
+ fi
+ echo "OpenAI API key validation succeeded (HTTP 200)."
diff --git a/.gitignore b/.gitignore
index 5002c7a..ddbc150 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,7 @@
node_modules/
.env
+# Local env file (contains secrets)
+.env
.env.local
.env.*.local
dist/
@@ -7,3 +9,17 @@ build/
*.log
.DS_Store
.vercel
+
+# Android: google-services and local.properties
+android/antigravity/app/google-services.json
+android/antigravity/local.properties
+
+# Ignore local tool bundles
+tools/
+
+# LFS build artifacts
+os/lfs/output/
+
+# Local sensitive scripts (do not commit)
+scripts/dummy-sa.json
+scripts/gcloud-startup.ps1
diff --git a/.security/active_session.json b/.security/active_session.json
new file mode 100644
index 0000000..0109106
--- /dev/null
+++ b/.security/active_session.json
@@ -0,0 +1,7 @@
+{
+ "username": "admin",
+ "level": 4,
+ "login_time": "2026-01-02T11:50:53.035271",
+ "host": "BOOK-KDMJTUA9LB",
+ "platform": "Windows"
+}
\ No newline at end of file
diff --git a/.security/users.json b/.security/users.json
new file mode 100644
index 0000000..e85a1f1
--- /dev/null
+++ b/.security/users.json
@@ -0,0 +1,9 @@
+{
+ "admin": {
+ "password_hash": "8a6d1f7718c6d64b31d720c8f0c1ee60c9f75c8016b6d95ad86e24a6e325b817",
+ "level": 4,
+ "created": "2026-01-02T11:48:37.888238",
+ "last_login": "2026-01-02T11:50:53.033672",
+ "mfa_enabled": false
+ }
+}
\ No newline at end of file
diff --git a/.vscode/launch.json b/.vscode/launch.json
new file mode 100644
index 0000000..2884f57
--- /dev/null
+++ b/.vscode/launch.json
@@ -0,0 +1,40 @@
+{
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": ".NET: Attach to Process",
+ "type": "coreclr",
+ "request": "attach",
+ "processId": "${command:pickProcess}",
+ "justMyCode": true
+ },
+ {
+ "name": ".NET: Launch (console)",
+ "type": "coreclr",
+ "request": "launch",
+ "preLaunchTask": "build",
+ # Replace the program path below with your project's output DLL if applicable
+ "program": "${workspaceFolder}/bin/Debug/net7.0/YourApp.dll",
+ "args": [],
+ "cwd": "${workspaceFolder}",
+ "stopAtEntry": false,
+ "console": "integratedTerminal",
+ "justMyCode": true
+ },
+ {
+ "name": "Website: Launch preciseliens.com",
+ "type": "coreclr",
+ "request": "launch",
+ "preLaunchTask": "build",
+ "program": "${workspaceFolder}/bin/Debug/net7.0/Preciseliens.Web.dll",
+ "args": [],
+ "cwd": "${workspaceFolder}",
+ "env": {
+ "ASPNETCORE_URLS": "https://preciseliens.com;http://localhost:5000"
+ },
+ "stopAtEntry": false,
+ "console": "integratedTerminal",
+ "justMyCode": true
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.vscode/tasks.json b/.vscode/tasks.json
new file mode 100644
index 0000000..f489c56
--- /dev/null
+++ b/.vscode/tasks.json
@@ -0,0 +1,14 @@
+{
+ "version": "2.0.0",
+ "tasks": [
+ {
+ "label": "build",
+ "type": "shell",
+ "command": "dotnet",
+ "args": ["build"],
+ "group": { "kind": "build", "isDefault": true },
+ "presentation": { "reveal": "always" },
+ "problemMatcher": ["$msCompile"]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/ANDREW.ps1.bak b/ANDREW.ps1.bak
deleted file mode 100644
index 2dd5633..0000000
--- a/ANDREW.ps1.bak
+++ /dev/null
@@ -1,212 +0,0 @@
-# ๐ก๏ธ ANDREW - Automated Network Deployment Engine (Azure Ready!)
-# Master orchestration script for NetworkBuster infrastructure
-# Inspired by Andrew's Trials: Tower of Code, Labyrinth of Data, Dragon of Scale, Mirror of Innovation
-
-param(
- [Parameter(Mandatory = $false)]
- [ValidateSet("deploy-storage", "deploy-all", "status", "backup", "sync")]
- [string]$Task = "status",
-
- [Parameter(Mandatory = $false)]
- [string]$Environment = "production"
-)
-
-# Colors for output
-$Colors = @{
- Success = "Green"
- Warning = "Yellow"
- Error = "Red"
- Info = "Cyan"
- Trial = "Magenta"
-}
-
-function Write-Trial {
- param([string]$Message, [string]$Trial)
- Write-Host "[$Trial] $Message" -ForegroundColor $Colors.Trial
-}
-
-function Write-Status {
- param([string]$Message, [string]$Status = "Info")
- Write-Host $Message -ForegroundColor $Colors[$Status]
-}
-
-# ============================================================================
-# ANDREW'S TRIALS - Infrastructure Deployment Tasks
-# ============================================================================
-
-function Invoke-StorageDeployment {
- Write-Trial "โก Trial One: Tower of Code - Building the Foundation" "ANDREW"
-
- $scriptPath = ".\deploy-storage-azure.ps1"
-
- if (-not (Test-Path $scriptPath)) {
- Write-Status "โ Deploy script not found at $scriptPath" "Error"
- return $false
- }
-
- Write-Status "๐ง Executing Azure Storage deployment..." "Info"
- & $scriptPath
-
- Write-Status "โ
Tower of Code construction complete!" "Success"
- return $true
-}
-
-function Invoke-FullDeployment {
- Write-Trial "๐ก๏ธ ANDREW'S FULL QUEST: All Trials Activated" "ANDREW"
-
- # Trial 1: Storage
- Write-Trial "๐ Trial One: Tower of Code" "ANDREW"
- Invoke-StorageDeployment
-
- # Trial 2: Sync
- Write-Trial "๐ Trial Two: Labyrinth of Data - Synchronizing" "ANDREW"
- Write-Status "Syncing repositories..." "Info"
- git status
-
- # Trial 3: Backup
- Write-Trial "๐ Trial Three: Dragon of Scale - Creating Backups" "ANDREW"
- Invoke-BackupProcedure
-
- # Trial 4: Status
- Write-Trial "๐ช Trial Four: Mirror of Innovation - Status Check" "ANDREW"
- Get-InfrastructureStatus
-
- Write-Status "๐ ANDREW'S QUEST COMPLETE!" "Success"
-}
-
-function Invoke-BackupProcedure {
- Write-Status "Creating backup of current state..." "Info"
-
- $backupDate = Get-Date -Format "yyyyMMdd_HHmmss"
- $backupPath = "D:\networkbuster_backup_$backupDate"
-
- if (-not (Test-Path "D:\")) {
- Write-Status "โ ๏ธ D: drive not accessible, skipping backup" "Warning"
- return
- }
-
- try {
- Copy-Item -Path "." -Destination $backupPath -Recurse -Force
- Write-Status "โ
Backup created: $backupPath" "Success"
- }
- catch {
- Write-Status "โ Backup failed: $_" "Error"
- }
-}
-
-function Get-InfrastructureStatus {
- Write-Status "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ" "Info"
- Write-Status "๐ ANDREW'S INFRASTRUCTURE STATUS" "Info"
- Write-Status "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ" "Info"
-
- # Git status
- Write-Status "`n๐ฆ Repository Status:" "Info"
- git branch -v
- git status --short
-
- # Storage check
- Write-Status "`n๐พ Storage Infrastructure:" "Info"
- if (Test-Path ".\infra\storage.bicep") {
- Write-Status "โ
Bicep template found" "Success"
- Get-Item ".\infra\storage.bicep" | Select-Object Name, Length, LastWriteTime | Format-Table
- }
- else {
- Write-Status "โ Bicep template missing" "Error"
- }
-
- # Script check
- Write-Status "`n๐ Deployment Scripts:" "Info"
- $scripts = @("deploy-storage-azure.ps1", "deploy-storage-azure.sh", "ANDREW.ps1")
- foreach ($script in $scripts) {
- if (Test-Path ".\$script") {
- Write-Status "โ
$script" "Success"
- }
- else {
- Write-Status "โ $script" "Error"
- }
- }
-
- # Azure CLI check
- Write-Status "`nโ๏ธ Azure Connectivity:" "Info"
- try {
- $azVersion = az --version | Select-Object -First 1
- Write-Status "โ
Azure CLI: $azVersion" "Success"
- }
- catch {
- Write-Status "โ ๏ธ Azure CLI not available (optional)" "Warning"
- }
-
- Write-Status "`nโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ" "Info"
-}
-
-function Sync-Repositories {
- Write-Trial "๐ Synchronizing all branches with DATACENTRAL" "ANDREW"
-
- try {
- Write-Status "๐ก Checking current branch..." "Info"
- $currentBranch = (git rev-parse --abbrev-ref HEAD)
- Write-Status "Current: $currentBranch" "Info"
-
- Write-Status "`n๐ All branches:" "Info"
- git branch -a
-
- Write-Status "`n๐ Fetching from remote..." "Info"
- git fetch origin
-
- Write-Status "โ
Repository sync complete" "Success"
- }
- catch {
- Write-Status "โ Sync failed: $_" "Error"
- }
-}
-
-# ============================================================================
-# Main Execution
-# ============================================================================
-
-Write-Host "`n" -ForegroundColor Black
-Write-Host "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ" -ForegroundColor Magenta
-Write-Host "โ ๐ก๏ธ ANDREW - Network Deployment Engine ๐ก๏ธ โ" -ForegroundColor Magenta
-Write-Host "โ Automated Deployment for NetworkBuster Infrastructure โ" -ForegroundColor Magenta
-Write-Host "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ" -ForegroundColor Magenta
-Write-Host "`n"
-
-Write-Status "โฑ๏ธ Timestamp: $(Get-Date -Format 'yyyy-MM-dd HH:mm:ss')" "Info"
-Write-Status "๐ Environment: $Environment" "Info"
-Write-Status "๐ Location: $(Get-Location)" "Info"
-Write-Status "๐ฏ Task: $Task" "Info"
-Write-Host "`n"
-
-switch ($Task) {
- "deploy-storage" {
- Invoke-StorageDeployment
- }
- "deploy-all" {
- Invoke-FullDeployment
- }
- "backup" {
- Invoke-BackupProcedure
- }
- "sync" {
- Sync-Repositories
- }
- "status" {
- Get-InfrastructureStatus
- }
- default {
- Get-InfrastructureStatus
- }
-}
-
-Write-Host "`n"
-Write-Status "๐ ANDREW execution complete" "Success"
-Write-Host "`n"
-
-# Usage examples
-Write-Host "๐ ANDREW Usage Examples:" -ForegroundColor Cyan
-Write-Host " .\ANDREW.ps1 # Show infrastructure status" -ForegroundColor Gray
-Write-Host " .\ANDREW.ps1 -Task deploy-storage # Deploy Azure Storage only" -ForegroundColor Gray
-Write-Host " .\ANDREW.ps1 -Task deploy-all # Full deployment (all trials)" -ForegroundColor Gray
-Write-Host " .\ANDREW.ps1 -Task backup # Create backup to D: drive" -ForegroundColor Gray
-Write-Host " .\ANDREW.ps1 -Task sync # Synchronize with remote" -ForegroundColor Gray
-Write-Host "`n"
diff --git a/AUTOSTART.bat b/AUTOSTART.bat
new file mode 100644
index 0000000..53e7706
--- /dev/null
+++ b/AUTOSTART.bat
@@ -0,0 +1,32 @@
+@echo off
+REM NetworkBuster One-Click Auto-Start
+REM Automatically requests permissions and starts everything
+
+cd /d "%~dp0"
+
+echo.
+echo ==========================================
+echo NetworkBuster One-Click Launcher
+echo ==========================================
+echo.
+
+REM Check if running as admin
+net session >nul 2>&1
+if %errorLevel% neq 0 (
+ echo Requesting administrator permissions...
+ powershell -Command "Start-Process '%~f0' -Verb RunAs"
+ exit /b
+)
+
+echo Running with administrator privileges...
+echo.
+echo Starting all services...
+echo.
+
+call .venv\Scripts\activate.bat
+python auto_start_service.py
+
+echo.
+echo All services started!
+echo Window will close in 3 seconds...
+timeout /t 3 /nobreak >nul
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..7ccc62a
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,10 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+## [Unreleased]
+- Packaging scripts added: `scripts/make-release.js` and `scripts/create-shortcut.ps1`
+- Added `start-desktop.bat` and `npm` scripts: `dist:zip`, `release:create-shortcut`, `start:desktop`
+
+## [1.0.1] - YYYY-MM-DD
+- Initial production release
diff --git a/COMPLETION-ACKNOWLEDGMENT.md b/COMPLETION-ACKNOWLEDGMENT.md
new file mode 100644
index 0000000..13f0aff
--- /dev/null
+++ b/COMPLETION-ACKNOWLEDGMENT.md
@@ -0,0 +1,29 @@
+# Completion Acknowledgement โ
+
+**Project:** NetworkBuster
+
+**Date:** December 17, 2025
+
+Thank you to everyone who contributed to the completion and distribution preparation of NetworkBuster. Your work on packaging, CI, and installer tooling made this milestone possible.
+
+## Completed highlights ๐ง
+- Packaging scripts added: `scripts/make-release.js` (ZIP) and `scripts/build-nsis.ps1` (NSIS)
+- Desktop shortcuts & launcher: `scripts/create-shortcut.ps1`, `start-desktop.bat`
+- Windows installer: `scripts/installer/networkbuster-installer.nsi`
+- Installer assets added: `scripts/installer/EULA.txt`, `scripts/installer/icon-placeholder.png`, `scripts/installer/convert-icon.ps1`, and `scripts/generate-icons.ps1`
+- Placeholder multi-size icons: `scripts/installer/branding/icons/icon-256.png`, `icon-128.png`, `icon-64.png`, `icon-48.png`, `icon-32.png`, `icon-16.png`
+- CI workflows: `.github/workflows/release.yml` and `.github/workflows/ci.yml`
+- Comparison helper: `scripts/compare-with-luna.ps1` (clones & diffs Cleanskiier27/luna.eu)
+- Documentation updates: `CHANGELOG.md`, README distribution notes
+
+## Acknowledgements ๐
+- Contributors and reviewers who implemented packaging and CI changes
+- The luna.eu project (https://github.com/Cleanskiier27/luna.eu) for useful USB packaging and flashing concepts that informed the distribution workflow
+
+## Next recommended steps โถ๏ธ
+1. Validate builds locally (Node/npm/git/NSIS required).
+2. Run CI on a test tag (e.g., `git tag v1.0.2 && git push origin --tags`) to verify release artifact and installer upload.
+3. Review installer content and add optional assets (icons, EULA, Node portable bundle) if desired.
+4. When ready, create the GitHub release and attach artifacts produced by CI.
+
+If you'd like, I can prepare the installer icon and EULA, or draft a short release note to attach to the GitHub release. Reply with which follow-up you prefer and I'll proceed.
\ No newline at end of file
diff --git a/DEVICE_REGISTRATION_GOAL.md b/DEVICE_REGISTRATION_GOAL.md
new file mode 100644
index 0000000..6b70a2b
--- /dev/null
+++ b/DEVICE_REGISTRATION_GOAL.md
@@ -0,0 +1,88 @@
+# Device Registration โ Neural Network
+
+**Overview**
+This document specifies the new project goal for builders: when a new device registers with NetworkBuster, its registration and initial telemetry must be validated, persisted, and forwarded into the neural network ingestion pipeline for training or inference. This capability enables device-aware models and closed-loop improvements.
+
+## Goal (one-liner)
+Pass every new device registration into the neural network ingestion pipeline reliably, securely, and with full observability.
+
+## Priority
+- **Priority:** High (project-level goal)
+- **Owner:** platform / ingestion team (assign on project board)
+
+## Acceptance Criteria
+1. POST /api/devices/register returns canonical device ID and registration status.
+2. Registration payload is validated and stored in device registry (persistent DB). Schema is versioned.
+3. A registration event is enqueued to an ingestion topic (e.g., Azure Service Bus, Kafka, or Azure Event Grid).
+4. Neural network ingestion service consumes the event, returns acknowledgement, and registration status is updated in the registry (queued โ processed โ acknowledged or failed).
+5. Automated E2E test that simulates a device registration and verifies processed status.
+6. Metrics/alerts: registration rate, ingestion queue lag, processing success/failure, and SLA violation alerts.
+
+## Data model (minimum)
+DeviceRegistration {
+ deviceId (string) // provided by device or generated
+ hardwareId (string) // device serial/MAC/fingerprint
+ model (string) // device model
+ firmwareVersion (string)
+ location (string | geo-coords)
+ ts (ISO8601) // registration timestamp
+ initialTelemetry: { battery, temp, sensors: {...} } // optional
+}
+
+## API Spec (example)
+- POST /api/devices/register
+ - input: DeviceRegistration payload (JSON)
+ - responses:
+ - 201 Created { deviceId, status: 'registered' }
+ - 202 Accepted { deviceId, status: 'queued' }
+ - 400 Bad Request
+
+Authentication: API key or OAuth. Rate limit per IP/credential.
+
+## Ingestion contract
+- Message schema must match DeviceRegistration with metadata: {source, version, traceId}
+- Messages delivered to topic: `device-registrations.v1` with at-least-once delivery
+- Consumer (ingestion microservice) must return processing result to `device-registration-results` topic or call back API to update status
+
+## Security & Privacy
+- Validate and sanitize all fields
+- Store sensitive identifiers hashed or encrypted at rest
+- Enforce ACLs and authenticated endpoints
+- Log access and changes for audits
+
+## Reliability
+- Use a durable queue (retry/backoff policy)
+- Implement idempotency keys (deviceId + ts) to avoid duplicate processing
+- Provide op metrics and health endpoints
+
+## Observability
+- Traces: Attach a traceId from API -> queue -> ingestion consumer -> model
+- Metrics: registration_count, registration_errors, ingestion_lag_seconds, ingestion_success_rate
+- Logs: structured logs with correlation IDs
+
+## Tests
+- Unit tests: validation, schema, DB write
+- Integration tests: API -> DB -> queue (mock) -> ingestion (mock)
+- E2E test: bring up a test ingestion consumer and verify registration processed
+
+## Implementation suggestions for builders
+1. Add `POST /api/devices/register` with JSON schema validation using existing API framework (e.g., express + Joi or equivalent).
+2. Persist registrations in a `devices` collection/table with status and audit fields.
+3. Use `az acr build` container or existing unix-friendly workers to host ingestion consumer.
+4. Publish a message to Azure Service Bus / Kafka topic with schema and trace context.
+5. Create a small ingestion worker that reads topic and calls model ingestion REST or gRPC endpoint.
+6. Add monitoring dashboards and alerts in observability platform (Log Analytics / Prometheus).
+
+## Suggested Milestones (for PRs)
+- M1 API + DB schema + unit tests
+- M2 Queue publish + consumer (test harness)
+- M3 Ingestion acknowledgement + status transitions + E2E test
+- M4 Security review + production runbook
+
+## Notes
+- Keep the message schema versioned and backward-compatible.
+- Document the exact contract in `api/schema/device-registration.json` when ready.
+
+---
+
+Add this file as the canonical specification for builders and link from `PROJECT-SUMMARY.md` and any relevant docs.
diff --git a/DUAL-ROUTER-SETUP-GUIDE.md b/DUAL-ROUTER-SETUP-GUIDE.md
new file mode 100644
index 0000000..b075c43
--- /dev/null
+++ b/DUAL-ROUTER-SETUP-GUIDE.md
@@ -0,0 +1,335 @@
+# Dual Router Setup Guide: WiFi 7 Mesh + NetworkBuster.net
+
+**Setup Date:** January 3, 2026
+**Configuration Type:** Cascaded Dual Router with Custom Domain
+
+---
+
+## Network Topology Overview
+
+```
+Internet โ WiFi 7 Mesh Router (Primary) โ NetworkBuster Router (Secondary)
+ 192.168.1.1 192.168.1.100 or 192.168.2.1
+```
+
+---
+
+## Option A: Same Subnet (Simpler Setup)
+
+### WiFi 7 Mesh Router (Primary Gateway)
+
+**IP Configuration:**
+- **Router IP:** `192.168.1.1`
+- **Subnet Mask:** `255.255.255.0`
+- **DHCP Range:** `192.168.1.10` to `192.168.1.99`
+- **DNS Primary:** `8.8.8.8` (Google)
+- **DNS Secondary:** `1.1.1.1` (Cloudflare)
+
+**WiFi 7 Settings:**
+- **Network Name (SSID):** `YourNetwork-WiFi7`
+- **Security:** WPA3-Personal
+- **Password:** [Your secure password]
+- **Band:** 2.4GHz + 5GHz + 6GHz (tri-band mesh)
+
+### NetworkBuster Router (Secondary)
+
+**IP Configuration:**
+- **Router IP:** `192.168.1.100` (static, outside DHCP range)
+- **Subnet Mask:** `255.255.255.0`
+- **Gateway:** `192.168.1.1` (points to WiFi 7 mesh)
+- **DHCP:** **DISABLED** (WiFi 7 handles DHCP)
+- **DNS:** `192.168.1.1` (forwards to WiFi 7 router)
+
+**Connection:**
+- **Cable:** Connect WiFi 7 mesh LAN port โ NetworkBuster WAN/LAN port
+- **Mode:** Bridge/AP mode (disable NAT on NetworkBuster)
+
+---
+
+## Option B: Separate Subnet (Advanced - Better Isolation)
+
+### WiFi 7 Mesh Router (Primary Gateway)
+
+**IP Configuration:**
+- **Router IP:** `192.168.1.1`
+- **Subnet Mask:** `255.255.255.0`
+- **DHCP Range:** `192.168.1.10` to `192.168.1.254`
+- **DNS Primary:** `8.8.8.8`
+- **DNS Secondary:** `1.1.1.1`
+
+### NetworkBuster Router (Secondary Subnet)
+
+**IP Configuration:**
+- **Router IP:** `192.168.2.1`
+- **Subnet Mask:** `255.255.255.0`
+- **Gateway:** `192.168.1.1`
+- **DHCP Range:** `192.168.2.10` to `192.168.2.254`
+- **DNS:** `192.168.1.1` or `8.8.8.8`
+
+**Connection:**
+- **Cable:** WiFi 7 mesh LAN โ NetworkBuster WAN port
+- **Mode:** Router mode (NAT enabled for subnet isolation)
+
+**Static Route on WiFi 7 Router:**
+```
+Destination: 192.168.2.0/24
+Gateway: 192.168.2.1
+```
+
+---
+
+## NetworkBuster.net Domain Setup
+
+### Local DNS Configuration (Internal Network)
+
+**On WiFi 7 Mesh Router:**
+
+**Add DNS Host Entries:**
+```
+networkbuster.net โ 192.168.1.100 (Option A) or 192.168.2.1 (Option B)
+www.networkbuster.net โ 192.168.1.100 or 192.168.2.1
+mission.networkbuster.net โ 192.168.1.100 or 192.168.2.1
+api.networkbuster.net โ 192.168.1.100 or 192.168.2.1
+```
+
+**Alternative: Edit Hosts File on All Devices**
+- **Windows:** `C:\Windows\System32\drivers\etc\hosts`
+- **macOS/Linux:** `/etc/hosts`
+
+```
+192.168.1.100 networkbuster.net www.networkbuster.net
+192.168.1.100 mission.networkbuster.net
+192.168.1.100 api.networkbuster.net
+```
+
+### External DNS (Public Internet Access)
+
+**If You Own networkbuster.net Domain:**
+
+**DNS A Records (at your domain registrar):**
+```
+Type Name Value TTL
+A @ [Your Public IP] 3600
+A www [Your Public IP] 3600
+A mission [Your Public IP] 3600
+A api [Your Public IP] 3600
+```
+
+**Dynamic DNS (DDNS) Setup:**
+- **Service:** No-IP, DuckDNS, or your router's built-in DDNS
+- **Update Interval:** Every 5 minutes
+- **Domain:** `yourname.ddns.net` (free) or `networkbuster.net` (owned domain)
+
+---
+
+## Port Forwarding Configuration
+
+**Configure on WiFi 7 Mesh Router:**
+
+| Service | External Port | Internal IP | Internal Port | Protocol |
+|---------------------|---------------|-------------------|---------------|----------|
+| Web Server | 3000 | 192.168.1.100 | 3000 | TCP |
+| API Server | 3001 | 192.168.1.100 | 3001 | TCP |
+| Audio Stream | 3002 | 192.168.1.100 | 3002 | TCP |
+| NASA Mission Control| 5000 | 192.168.1.100 | 5000 | TCP |
+| HTTP (Web) | 80 | 192.168.1.100 | 3000 | TCP |
+| HTTPS (Secure) | 443 | 192.168.1.100 | 443 | TCP |
+
+**If Using Option B (Separate Subnet):**
+- Change Internal IP to `192.168.2.1` in all port forwarding rules
+
+---
+
+## Windows Firewall Rules (NetworkBuster Device)
+
+**Already Configured:**
+```powershell
+# Verify existing rules
+Get-NetFirewallRule -DisplayName "NetworkBuster*" | Select-Object DisplayName, Enabled
+```
+
+**Add Mission Control Port:**
+```powershell
+New-NetFirewallRule -DisplayName "NetworkBuster-MissionControl" `
+ -Direction Inbound -LocalPort 5000 -Protocol TCP -Action Allow
+```
+
+---
+
+## Step-by-Step Setup Process
+
+### Step 1: Configure WiFi 7 Mesh Router
+1. Connect to WiFi 7 router at `192.168.1.1`
+2. Login to admin panel
+3. Set router IP to `192.168.1.1`
+4. Enable DHCP: `192.168.1.10` - `192.168.1.99` (Option A) or `192.168.1.10` - `192.168.1.254` (Option B)
+5. Set DNS servers: `8.8.8.8` and `1.1.1.1`
+6. Enable WiFi 7 (6GHz band)
+7. Set WPA3 security
+
+### Step 2: Configure NetworkBuster Router
+1. Connect NetworkBuster to computer temporarily
+2. Access router at default IP (usually `192.168.0.1` or `192.168.1.1`)
+3. Change router IP to:
+ - **Option A:** `192.168.1.100`
+ - **Option B:** `192.168.2.1`
+4. Set subnet mask: `255.255.255.0`
+5. **Option A:** Disable DHCP server, enable Bridge/AP mode
+6. **Option B:** Enable DHCP (`192.168.2.10` - `192.168.2.254`)
+7. Save and reboot
+
+### Step 3: Physical Connection
+1. Power off both routers
+2. Connect Ethernet cable:
+ - WiFi 7 LAN port โ NetworkBuster WAN port (or LAN port for Option A)
+3. Power on WiFi 7 mesh router first (wait 2 minutes)
+4. Power on NetworkBuster router (wait 2 minutes)
+
+### Step 4: Verify Connection
+```powershell
+# Test connectivity
+ping 192.168.1.1 # WiFi 7 router
+ping 192.168.1.100 # NetworkBuster (Option A)
+ping 8.8.8.8 # Internet
+
+# Test domain resolution
+ping networkbuster.net
+```
+
+### Step 5: Configure Port Forwarding
+1. Login to WiFi 7 router (`192.168.1.1`)
+2. Navigate to Port Forwarding / Virtual Servers
+3. Add all port forwarding rules from table above
+4. Save and apply
+
+### Step 6: Add Local DNS Entries
+1. In WiFi 7 router, find DNS/Hostname settings
+2. Add custom hosts:
+ - `networkbuster.net` โ `192.168.1.100`
+ - `www.networkbuster.net` โ `192.168.1.100`
+ - `mission.networkbuster.net` โ `192.168.1.100`
+
+### Step 7: Test Services
+```powershell
+# From any device on network
+Invoke-WebRequest -Uri "http://networkbuster.net:3000"
+Invoke-WebRequest -Uri "http://networkbuster.net:5000"
+Invoke-WebRequest -Uri "http://mission.networkbuster.net:5000"
+```
+
+---
+
+## Access URLs (Internal Network)
+
+**Direct IP Access:**
+- Web Server: `http://192.168.1.100:3000`
+- API Server: `http://192.168.1.100:3001`
+- Audio Stream: `http://192.168.1.100:3002`
+- NASA Mission Control: `http://192.168.1.100:5000`
+
+**Domain Access (After DNS Setup):**
+- Web: `http://networkbuster.net:3000`
+- Web: `http://www.networkbuster.net:3000`
+- Mission Control: `http://mission.networkbuster.net:5000`
+- API: `http://api.networkbuster.net:3001`
+
+**External Access (After Port Forwarding):**
+- Web: `http://[YOUR_PUBLIC_IP]:3000`
+- Mission Control: `http://[YOUR_PUBLIC_IP]:5000`
+
+---
+
+## Troubleshooting
+
+### Can't Access NetworkBuster Router
+```powershell
+# Find router IP
+arp -a | Select-String "192.168"
+
+# Verify route
+route print
+```
+
+### Port Not Accessible
+```powershell
+# Check if port is listening
+Get-NetTCPConnection -LocalPort 3000 -State Listen
+
+# Test firewall rule
+Test-NetConnection -ComputerName 192.168.1.100 -Port 3000
+```
+
+### Domain Not Resolving
+```powershell
+# Check DNS resolution
+nslookup networkbuster.net
+
+# Flush DNS cache
+ipconfig /flushdns
+
+# Test direct IP
+ping 192.168.1.100
+```
+
+### No Internet on NetworkBuster Subnet
+```powershell
+# Check gateway
+ipconfig | Select-String "Gateway"
+
+# Add static route on WiFi 7 router
+# Destination: 192.168.2.0/24 โ Gateway: 192.168.2.1
+```
+
+---
+
+## Security Recommendations
+
+1. **Change Default Passwords:**
+ - WiFi 7 router admin password
+ - NetworkBuster router admin password
+ - WiFi network password
+
+2. **Enable WPA3:** On WiFi 7 mesh for maximum encryption
+
+3. **Disable WPS:** On both routers (security risk)
+
+4. **Enable Firewall:** On both routers
+
+5. **Update Firmware:** Keep both routers updated
+
+6. **Guest Network:** Use WiFi 7 guest network for IoT devices
+
+7. **VPN:** Consider VPN for external access instead of port forwarding
+
+---
+
+## Recommended Configuration
+
+**For Best Performance:** Use **Option A** (Same Subnet)
+- Simpler setup
+- No double NAT issues
+- NetworkBuster acts as WiFi access point
+- Easier port forwarding
+
+**For Better Security:** Use **Option B** (Separate Subnet)
+- Network isolation
+- Separate traffic control
+- Better for multiple services
+- Easier firewall rules per subnet
+
+---
+
+## Quick Reference
+
+**WiFi 7 Mesh Router:** `192.168.1.1`
+**NetworkBuster Router:** `192.168.1.100` (Option A) or `192.168.2.1` (Option B)
+**Domain:** `networkbuster.net`
+**Services:** Ports 3000, 3001, 3002, 5000
+
+**DNS Servers:** `8.8.8.8` (Primary), `1.1.1.1` (Secondary)
+**Subnet Mask:** `255.255.255.0`
+**DHCP Range:** `192.168.1.10` - `192.168.1.99` (WiFi 7 only)
+
+---
+
+**Setup Complete!** Your WiFi 7 mesh router and NetworkBuster are now configured for optimal performance with custom domain support.
diff --git a/ENVIRONMENT_CHANGES.md b/ENVIRONMENT_CHANGES.md
new file mode 100644
index 0000000..535e154
--- /dev/null
+++ b/ENVIRONMENT_CHANGES.md
@@ -0,0 +1,22 @@
+# Terminal Environment Changes
+
+## Extension: vscode.git
+
+Enables the following features: git auth provider
+
+- `GIT_ASKPASS=c:\Users\daypi\AppData\Local\Programs\Microsoft VS Code\resources\app\extensions\git\dist\askpass.sh`
+- `VSCODE_GIT_ASKPASS_NODE=C:\Users\daypi\AppData\Local\Programs\Microsoft VS Code\Code.exe`
+- `VSCODE_GIT_ASKPASS_EXTRA_ARGS=`
+- `VSCODE_GIT_ASKPASS_MAIN=c:\Users\daypi\AppData\Local\Programs\Microsoft VS Code\resources\app\extensions\git\dist\askpass-main.js`
+- `VSCODE_GIT_IPC_HANDLE=\\.\\pipe\vscode-git-b65fb0a601-sock`
+
+## Extension: GitHub.copilot-chat
+
+Enables use of `copilot-debug` and `copilot` commands in the terminal
+
+- `PATH=c:\Users\daypi\AppData\Roaming\Code\User\globalStorage\github.copilot-chat\debugCommand;c:\Users\daypi\AppData\Roaming\Code\User\globalStorage\github.copilot-chat\copilotCli;${env:PATH}`
+
+## Extension: ms-python.python
+
+- `PYTHONSTARTUP=c:\Users\daypi\AppData\Roaming\Code\User\workspaceStorage\706e51ab7d28f79eab99add937660e6e\ms-python.python\pythonrc.py`
+- `PYTHON_BASIC_REPL=1`
\ No newline at end of file
diff --git a/HYPERV-LINUX-SETUP.md b/HYPERV-LINUX-SETUP.md
index ffec06e..f9be320 100644
--- a/HYPERV-LINUX-SETUP.md
+++ b/HYPERV-LINUX-SETUP.md
@@ -82,6 +82,15 @@ Or search "Hyper-V Manager" in Windows Start menu
8. **Summary:** Click "Finish"
+### Step 3.1: GPU Partitioning (Pro Upgrade)
+For optimal AI Gateway performance, allow the VM to access your host GPU (GPU-PV).
+
+Run this in PowerShell (Admin) after creating the VM:
+```powershell
+# Assign GPU to VM
+.\scripts\provision-hyperv-vm.ps1 -VMName "NetworkBuster-Linux" -EnableGPU -EnableNetworkAcceleration
+```
+
---
## Step 4: Start VM and Install Ubuntu
@@ -119,12 +128,12 @@ Start-VM -Name "NetworkBuster-Linux"
# Update system
sudo apt update && sudo apt upgrade -y
-# Install Node.js 24.x
+# Install Node.js 24.x (LTS)
curl -fsSL https://deb.nodesource.com/setup_24.x | sudo -E bash -
sudo apt install -y nodejs
-# Install Git
-sudo apt install -y git
+# Install Git & Optimization Tools
+sudo apt install -y git net-tools ethtool
# Verify installations
node --version # v24.x
@@ -350,15 +359,19 @@ ssh -i C:\path\to\key ubuntu@192.168.x.x
---
-## Performance Tips
+## Performance Tuning (Pro)
-- **Allocate enough resources:** 4GB RAM, 2+ CPU cores
-- **Use SSD storage:** VM performance depends on disk
-- **Enable nested virtualization:** For Docker-in-Hyper-V
-- **Snapshots:** Before major changes
- ```powershell
- Checkpoint-VM -Name "NetworkBuster-Linux" -SnapshotName "Working-State"
- ```
+### Enable SR-IOV
+Single Root I/O Virtualization (SR-IOV) significantly reduces network latency.
+1. In Hyper-V Manager โ Virtual Switch Manager.
+2. Select your switch โ Check "Enable SR-IOV".
+3. In VM Settings โ Network Adapter โ Hardware Acceleration โ Check "Enable SR-IOV".
+
+### Nested Virtualization (For Docker)
+If you plan to run Docker *inside* your Linux VM:
+```powershell
+Set-VMProcessor -VMName "NetworkBuster-Linux" -ExposeVirtualizationExtensions $true
+```
---
@@ -367,10 +380,10 @@ ssh -i C:\path\to\key ubuntu@192.168.x.x
1. โ
Enable Hyper-V (restart required)
2. โ
Download Ubuntu ISO
3. โ
Create VM in Hyper-V Manager
-4. โ
Install Ubuntu
-5. โ
Install Node.js & dependencies
-6. โ
Clone project
-7. โ
Test servers
-8. โ
(Optional) Set up Docker
+4. โ
Run `provision-hyperv-vm.ps1` for GPU/Performance
+5. โ
Install Ubuntu
+6. โ
Install Node.js & dependencies
+7. โ
Clone project
+8. โ
Test servers
-**You'll be able to test NetworkBuster on Windows AND Linux!**
+**You now have a high-performance AI-ready Linux testing environment!**
diff --git a/HYPERV-QUICK-START.md b/HYPERV-QUICK-START.md
index 2ec7e8d..c57a2ae 100644
--- a/HYPERV-QUICK-START.md
+++ b/HYPERV-QUICK-START.md
@@ -14,6 +14,8 @@
Enable-WindowsOptionalFeature -FeatureName Hyper-V -Online -All
# After restart, create VM manually (see HYPERV-LINUX-SETUP.md)
+# Then run the Upgrade script:
+.\scripts\provision-hyperv-vm.ps1 -VMName "NetworkBuster-Linux" -EnableGPU -EnableNetworkAcceleration
```
---
@@ -38,9 +40,9 @@ Enable-WindowsOptionalFeature -FeatureName Hyper-V -Online -All
# Update system
sudo apt update && sudo apt upgrade -y
-# Install Node.js 24.x
+# Install Node.js 24.x LTS
curl -fsSL https://deb.nodesource.com/setup_24.x | sudo -E bash -
-sudo apt install -y nodejs git
+sudo apt install -y nodejs git ethtool
# Clone project
git clone https://github.com/NetworkBuster/networkbuster.net.git
diff --git a/MATERIALS.md b/MATERIALS.md
new file mode 100644
index 0000000..d3e50f7
--- /dev/null
+++ b/MATERIALS.md
@@ -0,0 +1,25 @@
+# Materials
+
+This file documents the materials and properties managed in the recycle procedure.
+
+## Properties
+
+- mixed plastic
+
+## Sterilization & Decontamination Supplies
+
+- Nitrile gloves (various sizes)
+- N95 respirators or PAPRs
+- Safety goggles / face shields
+- Lint-free wipes (low-lint, microfiber)
+- Sterile swabs (foam tipped) for crevices
+- Isopropyl alcohol (70%โ90%) in sealed containers
+- Manufacturer-approved optical cleaning fluids (for optics)
+- HEPA portable air purifier (local capture)
+- UV-C lamp (supplementary only; follow safety guidelines)
+- Disposable gowns / coveralls and shoe covers
+- Sealable waste bags and biohazard labels
+
+**Notes:** Consult instrument manufacturer for approved cleaning agents and procedures; when biological contamination is suspected, contact biosafety personnel and do not proceed without authorization.
+
+*Add additional materials/properties as needed.*
diff --git a/NetworkBuster_Git_Shortcuts/git_dashboard.html b/NetworkBuster_Git_Shortcuts/git_dashboard.html
new file mode 100644
index 0000000..90443dd
--- /dev/null
+++ b/NetworkBuster_Git_Shortcuts/git_dashboard.html
@@ -0,0 +1,296 @@
+
+
+
+
+
+
+ NetworkBuster Git Repositories
+
+
+
+
+
+
+
+
+
1
+
๐ฆ Total Repositories
+
+
+
59
+
๐ Total Commits
+
+
+
34
+
๐ง Modified Files
+
+
+
299.98 MB
+
๐พ Total Size
+
+
+
+
+
+
+
+
+
+
+ ๐
+ C:\Users\daypi\networkbuster.net
+
+
+ ๐
+ https://github.com/NetworkBuster/networkbuster.net.git
+
+
+ ๐
+ 59 commits โข 299.98 MB
+
+
+
+
+ 1598d7e - Sync and redeploy: staged changes before redeployment (3 weeks ago)
+
+
+
+
+ 34 modified
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/NetworkBuster_Git_Shortcuts/git_manifest.json b/NetworkBuster_Git_Shortcuts/git_manifest.json
new file mode 100644
index 0000000..42d63d5
--- /dev/null
+++ b/NetworkBuster_Git_Shortcuts/git_manifest.json
@@ -0,0 +1,18 @@
+{
+ "generated": "2026-01-03T06:43:42.158278",
+ "total_repos": 1,
+ "total_commits": 59,
+ "total_size": 314556826,
+ "repositories": [
+ {
+ "name": "networkbuster.net",
+ "path": "C:\\Users\\daypi\\networkbuster.net",
+ "branch": "bigtree",
+ "remote_url": "https://github.com/NetworkBuster/networkbuster.net.git",
+ "last_commit": "1598d7e - Sync and redeploy: staged changes before redeployment (3 weeks ago)",
+ "modified_files": 34,
+ "commit_count": "59",
+ "size": 314556826
+ }
+ ]
+}
\ No newline at end of file
diff --git a/NetworkBuster_Git_Shortcuts/networkbuster.net.bat b/NetworkBuster_Git_Shortcuts/networkbuster.net.bat
new file mode 100644
index 0000000..b6ae834
--- /dev/null
+++ b/NetworkBuster_Git_Shortcuts/networkbuster.net.bat
@@ -0,0 +1,3 @@
+@echo off
+cd /d "C:\Users\daypi\networkbuster.net"
+start "" "%SystemRoot%\explorer.exe" "C:\Users\daypi\networkbuster.net"
diff --git a/Networkbuster.net b/Networkbuster.net
new file mode 160000
index 0000000..c630375
--- /dev/null
+++ b/Networkbuster.net
@@ -0,0 +1 @@
+Subproject commit c6303750f65b00061a694288c7a60f977d5ef51e
diff --git a/PROJECT-SUMMARY.md b/PROJECT-SUMMARY.md
index 386b646..e1dbc31 100644
--- a/PROJECT-SUMMARY.md
+++ b/PROJECT-SUMMARY.md
@@ -262,6 +262,43 @@ c:/Users/daypi/.gemini/antigravity/playground/iridescent-planetary/
- **Maintenance Guide**: Long-term operation support
- **Research Foundation**: Scientific references for validation
+---
+
+## New Project Goal: Device Registration โ Neural Network (Priority)
+- **Goal for builders:** Implement reliable new-device registration that captures device identity and telemetry, validates and sanitizes the payload, stores it, and forwards it into the training / inference pipeline (neural network) in a secure, auditable, and testable way.
+- **Why:** Enables automated model training, device-aware decisions, remote provisioning, and closed-loop improvement based on real device data.
+- **High-level acceptance criteria:**
+ - A stable API exists to register new devices and receive a canonical device id.
+ - Device metadata + initial telemetry is persisted in a schema documented in `DEVICE_REGISTRATION_GOAL.md`.
+ - A secure forwarding mechanism (message queue, data pipeline) reliably delivers registration events to the neural network ingestion endpoint (with retries and observability).
+ - Model pipeline acknowledges receipt and publishes processing status; registration shows 'registered', 'queued', 'processed', or 'failed'.
+ - End-to-end test exists covering registration -> pipeline ingestion -> acknowledgement.
+
+> This is a top-level goal for all builders โ see `DEVICE_REGISTRATION_GOAL.md` for full spec, wire diagrams, API examples, and implementation notes.
+
+### Quick dev notes (M1 implemented, M2 in progress)
+- POST `/api/devices/register` (prototype) implemented in `api/devices.js`.
+- Persistence: local files stored in `data/devices/` (use `lib/deviceStore.js`).
+- Queue: `device-registrations.v1` stored under `data/queue/device-registrations.v1/` using `lib/messageQueue.js`.
+- Worker: `workers/ingestWorker.js` provided as a simple polling consumer. Run with `node workers/ingestWorker.js`.
+- Test: `node tests/test-device-registration.js` will POST a sample registration (assumes server is running on port 3001).
+
+### M2: Queue publish + consumer (completed)
+- Azure Service Bus integration: Added to `deploy-azure.ps1` with `-SetupServiceBus` flag.
+- Updated `lib/messageQueue.js` to use Azure Service Bus SDK (falls back to files if no connection string).
+- New consumer: `workers/deviceConsumer.js` polls queue and forwards to ingestion endpoint.
+- Mock ingestion: Added `POST /api/ingestion/mock` to server for testing.
+- Run consumer: `npm run worker:device-consumer` (set `INGESTION_ENDPOINT` env var).
+- Deploy consumer: As Container App or Function App after Service Bus setup.
+
+### M3: Ingestion acknowledgement + status transitions (completed)
+- Status transitions: Added `transitionStatus()` with validation (registered โ queued โ processing โ acknowledged/failed).
+- Mock ingestion: Updated to return acknowledgements with confidence scores, simulate failures (10%), and processing delays.
+- Consumer retries: Added exponential backoff retry logic (up to 3 attempts) for failed ingestions.
+- E2E test: Updated to wait for 'acknowledged' status and handle failures.
+- API: Uses validated status transitions in registration endpoint.
+
+
---
## Technical Highlights
diff --git a/PR_NOTE.md b/PR_NOTE.md
new file mode 100644
index 0000000..f156ecd
--- /dev/null
+++ b/PR_NOTE.md
@@ -0,0 +1,32 @@
+PR Notes โ Add Network Boost utilities
+
+Summary:
+This PR adds a cross-platform ``Network Boost`` utility to improve network throughput and configuration for target systems. It includes hardened apply logic and generates robust restore scripts to revert changes.
+
+Files to add to upstream (`Cleanskiier27/Final`):
+- `scripts/network-boost.ps1` (Windows)
+- `scripts/network-boost.sh` (Linux)
+- `docs/NETWORK-BOOST.md` (documentation)
+- `CONTRIBUTORS.md` (contributor entry)
+
+Testing recommendations:
+- Run dry-run and review outputs: (Windows) `powershell -File scripts\network-boost.ps1` (Linux) `bash ./scripts/network-boost.sh`
+- Run apply in a controlled VM and verify `network-boost-restore.*` contents and restore operations.
+- Validate that installer integration is opt-in (checkbox) and uses non-interactive apply with `-Apply -Confirm:$false`.
+
+Security & Safety:
+- Scripts are designed to be reversible and non-destructive; restore scripts are generated with previous values and best-effort commands.
+- Scripts log all operations to `network-boost.log` and recommend reboot where appropriate.
+
+Maintainer notes:
+- If merging, consider adding a small CI job that runs a dry-run, installs PSScriptAnalyzer/shellcheck, and verifies that restore scripts are generated when running apply in a controlled test runner.
+- Optionally add an installer page and an entry in the main docs referencing the new tooling.
+
+---
+
+To apply this contribution automatically to upstream (fork + PR):
+- Use the helper script `scripts/apply-to-upstream.sh` (Linux/macOS) or `scripts/apply-to-upstream.ps1` (Windows).
+- Example (bash): `./scripts/apply-to-upstream.sh --upstream https://github.com/Cleanskiier27/Final.git --fork git@github.com:youruser/Final.git`
+- Example (PowerShell): `. ools\apply-to-upstream.ps1 -Upstream 'https://github.com/Cleanskiier27/Final.git' -Fork 'git@github.com:youruser/Final.git'`
+
+The helper clones upstream, creates a branch, copies contribution files, commits, pushes to your fork, and uses `gh` (if available) to open a PR. If `gh` is not available, push to your fork and open a PR manually.
\ No newline at end of file
diff --git a/QUICK-COMMANDS.md b/QUICK-COMMANDS.md
new file mode 100644
index 0000000..f799c3a
--- /dev/null
+++ b/QUICK-COMMANDS.md
@@ -0,0 +1,82 @@
+# NetworkBuster - Quick Command Reference
+# Simple commands to make everything easier
+
+## Batch Files (Double-click or run from CMD)
+
+```cmd
+start.bat - Start all NetworkBuster services
+stop.bat - Stop all services
+status.bat - Show current status
+map.bat - Open network map
+tracer.bat - Open API tracer
+backup.bat - Backup git to D: and K: drives
+thumbnails.bat - Extract and view thumbnails
+```
+
+## PowerShell Functions (Load with: . .\nb.ps1)
+
+```powershell
+nb-start - Start all services
+nb-stop - Stop all services
+nb-status - Show status
+nb-map - Open network map
+nb-tracer - Open API tracer
+nb-mission - Open mission control
+nb-backup - Run git backup
+nb-thumbs - Extract thumbnails
+nb-all - Open all dashboards at once
+nb-help - Show help
+```
+
+## Python Direct Commands
+
+```powershell
+python networkbuster_launcher.py --start # Start everything
+python networkbuster_launcher.py --stop # Stop everything
+python networkbuster_launcher.py --status # Check status
+python network_map_viewer.py # Run map
+python api_tracer.py # Run tracer
+python flash_git_backup.py # Backup git
+python extract_thumbnails.py # Extract thumbnails
+```
+
+## One-Line Quick Starts
+
+```powershell
+# Start and open Universal Launcher
+python networkbuster_launcher.py --start; start http://localhost:7000
+
+# Quick map view
+python network_map_viewer.py; start http://localhost:6000
+
+# Quick API trace
+python api_tracer.py; start http://localhost:8000
+
+# All dashboards
+start http://localhost:3000,http://localhost:5000,http://localhost:6000,http://localhost:7000,http://localhost:8000
+```
+
+## URL Shortcuts
+
+- Main Dashboard: http://localhost:7000
+- Network Map: http://localhost:6000
+- API Tracer: http://localhost:8000
+- Mission Control: http://localhost:5000
+- Web Server: http://localhost:3000
+- API Server: http://localhost:3001
+- Audio Stream: http://localhost:3002
+
+## Desktop Shortcuts (Already Created)
+
+- NetworkBuster.lnk - Main launcher
+- NetworkBuster Map.lnk - Network map viewer
+
+## Start Menu Programs
+
+- Start โ Programs โ NetworkBuster โ (Choose any tool)
+
+## Simplest Usage
+
+**Just double-click: `start.bat`**
+
+That's it! Everything launches automatically.
diff --git a/README.md b/README.md
index 928679b..7c954c9 100644
--- a/README.md
+++ b/README.md
@@ -4,6 +4,10 @@


+[](https://github.com/networkbuster/networkbuster.net/actions/workflows/test-openai-secret.yml)
+[](https://github.com/networkbuster/networkbuster.net/actions/workflows/smoke-e2e-openai.yml)
+
+
## ๐ฅ Award-Winning Advanced Networking Platform
**NetworkBuster** is the competition-winning advanced networking technology platform for space exploration and lunar operations. Featuring cutting-edge real-time visualization, interactive dashboards, and enterprise-grade automation.
@@ -31,6 +35,11 @@
โ
Git hooks for validation
โ
Mobile-responsive design
+### CI: OpenAI secret validation & E2E smoke test ๐ฌ
+
+We added GitHub Actions workflows to validate that `OPENAI_API_KEY` is set and to perform a safe endโtoโend smoke test that starts the app and calls `/api/recycle/recommend`. See the status badges above and the flow diagram in `docs/diagrams/openai-secret-flow.mmd` for details.
+
+
### Competition Results
| Category | Achievement |
|----------|-------------|
@@ -42,6 +51,46 @@
## ๐ Get Started
+### ๐จ Visuals & small renders
+
+- Emoji stack (render): `docs/diagrams/emoji-stack.svg`
+
+#### ๐ผ๏ธ Render diagrams locally
+
+You can render Mermaid `.mmd` sources to SVG and PNG locally with the provided helper script:
+
+```powershell
+# From the repository root
+# - downloads a portable Node 24.x if missing (wait longer with -LongTimeout)
+# - runs mermaid-cli to produce SVGs
+# - installs Puppeteer (Chromium) and converts SVG -> PNG at configurable scale
+.
+.\scripts\render-local.ps1 [-LongTimeout] [-RenderScale ]
+```
+
+Options:
+- `-UseNvm -AcceptUAC` โ use nvm-windows installer (requires UAC) instead of the portable Node download.
+- `-SkipChromiumDownload` โ skip Puppeteer's Chromium download if you already have a compatible Chromium in PATH.
+- `-LongTimeout` โ use longer timeouts & retries for downloads/Chromium install (helpful on flaky networks).
+- `-RenderScale ` โ set PNG scale (default 2, CI uses 4 for hi-res).
+
+Notes & tips:
+- Puppeteer will download Chromium (100+ MB); allow time and network access. โ ๏ธ
+- The script writes PNGs to `docs/diagrams` and lists generated PNG files when finished. โ
+- For CI rendering we provide `.github/workflows/render-diagrams.yml` which runs on GitHub runners and uploads PNG artifacts.
+
+### Android `antigravity` module
+A small Kotlin Android module skeleton has been added at `android/antigravity/`. It includes Gradle files and a placeholder `MainActivity`. Add `google-services.json` to `android/antigravity/app/` if integrating Firebase (do not commit it; see `.gitignore`).
+
+### Google Cloud SDK helpers
+Scripts added under `scripts/`:
+- `scripts/setup-gcloud-sdk.ps1` โ download and (optionally) install Google Cloud SDK on Windows, and initialize it interactively.
+- `scripts/gcloud-auth.ps1` โ authenticate with a service account JSON and set a project non-interactively.
+- `scripts/gcloud-startup.ps1` โ interactive helper to sign in as `ceanskiier27@networkbuster.net`, set project, and enable common APIs (or run non-interactive service-account auth).
+
+
+
+
### View Live Demo
Visit: https://networkbuster-mez5d7bmv-networkbuster.vercel.app
@@ -58,6 +107,8 @@ npm start
| Service | URL |
|---------|-----|
| Main Portal | / |
+
+
| Real-Time Overlay | /overlay |
| Dashboard | /dashboard |
| Blog | /blog |
@@ -98,3 +149,23 @@ npm start
**Last Updated**: December 3, 2025
**Version**: 1.0.0
**Status**: Active Development - Documentation Phase
+
+---
+
+## ๐ฆ Distribution & Installation (Windows)
+
+- Build artifact (ZIP): `npm run dist:zip` โ creates `dist/-.zip` with required files.
+- Create desktop launcher: `npm run release:create-shortcut` โ creates a shortcut called "NetworkBuster Launcher" on the current user desktop pointing to `start-desktop.bat`.
+- Build NSIS installer: `npm run dist:nsis` โ builds an NSIS installer (requires NSIS / makensis in PATH).
+- Start from desktop: Double click the created shortcut or run `npm run start:desktop`.
+
+Notes:
+- The packaging scripts rely on `node`/`npm` being available in PATH and use PowerShell `Compress-Archive` on Windows.
+- For a branded installer include an ICO at `scripts/installer/icon.ico` or place SVG/PNG assets in `scripts/installer/branding/`. You can generate an ICO from `scripts/installer/icon-placeholder.png` using `scripts/installer/convert-icon.ps1` (requires ImageMagick `magick`).
+- An End User License Agreement (`scripts/installer/EULA.txt`) is bundled into the installer and is required.
+- To test locally on Windows see `scripts/test-local-build.ps1` (requires Node, npm, Git, NSIS, and optionally ImageMagick).
+- For CI, add a job that runs `npm run dist:zip`, `npm run dist:nsis` (on windows), archives `dist/` as release artifacts, and tags the release in GitHub.
+
+---
+
+**Contributing:** See `CONTRIBUTING.md` for guidelines on releases and artifact verification.
diff --git a/SECURE_FILES_NOT_TRACKED.md b/SECURE_FILES_NOT_TRACKED.md
new file mode 100644
index 0000000..43dfb6b
--- /dev/null
+++ b/SECURE_FILES_NOT_TRACKED.md
@@ -0,0 +1,8 @@
+# Secure / local files not tracked
+
+The following files contain sensitive or local configuration and are explicitly ignored in `.gitignore`:
+
+- `scripts/dummy-sa.json` (service account / credentials placeholder)
+- `scripts/gcloud-startup.ps1` (local startup script)
+
+If you need to keep a local copy, store it outside the repository or in a secure vault.
diff --git a/SECURITY-CHECK-REPORT.md b/SECURITY-CHECK-REPORT.md
new file mode 100644
index 0000000..3349f3b
--- /dev/null
+++ b/SECURITY-CHECK-REPORT.md
@@ -0,0 +1,69 @@
+# NetworkBuster System Check Report
+**Generated:** January 2, 2026
+
+## โ
Security Enhancement Complete
+
+### ๐ New Security System Implemented
+Created comprehensive user verification module with:
+- **Multi-layer Authentication** - Username/password with SHA-256 hashing
+- **Access Control Levels** - 5-tier security clearance (Visitor โ Root)
+- **Failed Login Protection** - 3 attempts max, 5-minute lockout
+- **Session Management** - Persistent sessions with 24-hour validity
+- **Audit Logging** - All access attempts logged with timestamps
+- **Alert System** - Real-time security event notifications
+
+### ๐ Files Enhanced
+- **security_verification.py** - Core security module (NEW)
+- **drone_flight_system.py** - Now requires Operator clearance (Level 3+)
+- **launch.py** - Integrated security menu option `[s]`
+
+### ๐ก๏ธ Security Features
+| Feature | Status | Details |
+|---------|--------|---------|
+| User Authentication | โ
Active | SHA-256 hashed passwords |
+| Session Tracking | โ
Active | JSON-based session files |
+| Access Logging | โ
Active | `.security/access.log` |
+| Alert System | โ
Active | `.security/alerts.log` |
+| Account Lockout | โ
Active | 3 failed attempts = 5 min lock |
+| Level-Based Access | โ
Active | 5 security clearance levels |
+
+### ๐ Python Files Syntax Check
+
+| File | Status | Issues |
+|------|--------|--------|
+| launch.py | โ
PASS | No syntax errors |
+| drone_flight_system.py | โ
PASS | No syntax errors |
+| security_verification.py | โ
PASS | No syntax errors |
+| mobile_deployment.py | โ
PASS | No syntax errors |
+| cloud_devices.py | โ
PASS | No syntax errors |
+| system_health.py | โ ๏ธ WARN | psutil import (optional dependency) |
+| service_manager.py | โ
PASS | No syntax errors |
+| auto_startup.py | โ
PASS | No syntax errors |
+| quick_admin.py | โ
PASS | No syntax errors |
+| admin_runner.py | โ
PASS | No syntax errors |
+
+**Total Files Checked:** 10
+**Syntax Errors:** 0
+**Import Warnings:** 1 (psutil - optional)
+
+### ๐ Default Credentials
+- **Username:** admin
+- **Password:** admin123
+- **Security Level:** 4 (Admin)
+- โ ๏ธ **Change password on first login!**
+
+### ๐ Security Files Location
+```
+.security/
+ โโโ users.json # User database
+ โโโ access.log # Access history
+ โโโ alerts.log # Security alerts
+ โโโ active_session.json # Current session
+```
+
+### ๐ Usage
+1. Run `python security_verification.py` for security management
+2. Use `[s]` option in `launch.py` menu
+3. Drone operations now auto-check security clearance
+
+## โ
All Systems Operational
diff --git a/Untitled-1.txt b/Untitled-1.txt
new file mode 100644
index 0000000..e69de29
diff --git a/VERCEL-SETUP-TODO.md b/VERCEL-SETUP-TODO.md
new file mode 100644
index 0000000..2e98da3
--- /dev/null
+++ b/VERCEL-SETUP-TODO.md
@@ -0,0 +1,55 @@
+# Vercel Domain Setup - TODO
+
+## ๐ Configuration Steps (To Be Completed Later)
+
+### 1. Prerequisites
+- [ ] Vercel account created and authenticated
+- [ ] Domain purchased and DNS accessible
+- [ ] Project deployed to Vercel
+
+### 2. Domain Configuration
+```bash
+# Add domain to Vercel project
+vercel domains add yourdomain.com
+
+# Add www subdomain
+vercel domains add www.yourdomain.com
+```
+
+### 3. DNS Records Required
+| Type | Name | Value | TTL |
+|------|------|-------|-----|
+| A | @ | 76.76.21.21 | 3600 |
+| CNAME | www | cname.vercel-dns.com | 3600 |
+
+### 4. SSL/TLS Configuration
+- Vercel automatically provisions SSL certificates
+- HTTPS enforced by default
+- Certificate auto-renewal enabled
+
+### 5. Environment Variables
+Set in Vercel Dashboard or via CLI:
+```bash
+vercel env add DOMAIN_NAME production
+vercel env add API_URL production
+```
+
+### 6. Custom Domain Script
+Located at: [configure-custom-domain.ps1](configure-custom-domain.ps1)
+
+### 7. Verification Steps
+- [ ] Domain resolves to Vercel IP
+- [ ] HTTPS certificate valid
+- [ ] www redirect works
+- [ ] API endpoints accessible
+
+## ๐ Related Files
+- [CUSTOM-DOMAIN-SETUP.md](CUSTOM-DOMAIN-SETUP.md)
+- [VERCEL-DOMAIN-SETUP-GUIDE.md](VERCEL-DOMAIN-SETUP-GUIDE.md)
+- [configure-custom-domain.ps1](configure-custom-domain.ps1)
+- [vercel.json](vercel.json)
+
+## ๐ Notes
+- Complete this configuration when ready to go live
+- Ensure all security configurations are in place first
+- Test on staging domain before production
diff --git a/__pycache__/security_verification.cpython-314.pyc b/__pycache__/security_verification.cpython-314.pyc
new file mode 100644
index 0000000..c1622c9
Binary files /dev/null and b/__pycache__/security_verification.cpython-314.pyc differ
diff --git a/admin_runner.py b/admin_runner.py
new file mode 100644
index 0000000..94d0c49
--- /dev/null
+++ b/admin_runner.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3
+"""
+NetworkBuster Admin Runner
+Run any script/command with elevated privileges on Windows
+"""
+
+import ctypes
+import sys
+import os
+import subprocess
+from pathlib import Path
+
+PROJECT_PATH = Path(__file__).parent.resolve()
+
+
+def is_admin():
+ """Check if the script is running with administrator privileges."""
+ try:
+ return ctypes.windll.shell32.IsUserAnAdmin()
+ except:
+ return False
+
+
+def run_as_admin(command=None, script=None, wait=True):
+ """
+ Re-run the current script or a specific command as administrator.
+
+ Args:
+ command: Optional command to run (list of strings)
+ script: Optional script path to run
+ wait: Whether to wait for the process to complete
+ """
+ if is_admin():
+ print("โ Already running as Administrator")
+ return True
+
+ if command:
+ # Run a specific command elevated
+ cmd_str = ' '.join(command) if isinstance(command, list) else command
+ params = f'/c {cmd_str}'
+ executable = 'cmd.exe'
+ elif script:
+ # Run a specific script elevated
+ params = f'"{script}"'
+ executable = sys.executable
+ else:
+ # Re-run this script elevated
+ params = ' '.join([f'"{arg}"' for arg in sys.argv])
+ executable = sys.executable
+
+ print(f"โ Requesting Administrator privileges...")
+
+ try:
+ result = ctypes.windll.shell32.ShellExecuteW(
+ None, # Parent window
+ "runas", # Operation (run as admin)
+ executable, # Program
+ params, # Parameters
+ str(PROJECT_PATH), # Working directory
+ 1 if wait else 0 # Show window
+ )
+
+ if result > 32:
+ print("โ Elevated process started successfully")
+ return True
+ else:
+ print(f"โ Failed to elevate (error code: {result})")
+ return False
+ except Exception as e:
+ print(f"โ Error requesting elevation: {e}")
+ return False
+
+
+def run_elevated_command(cmd, capture_output=False):
+ """
+ Run a command that requires admin privileges.
+
+ Args:
+ cmd: Command as string or list
+ capture_output: Whether to capture and return output
+ """
+ if not is_admin():
+ print("โ This command requires Administrator privileges")
+ return run_as_admin(command=cmd)
+
+ if isinstance(cmd, str):
+ cmd = cmd.split()
+
+ try:
+ result = subprocess.run(
+ cmd,
+ capture_output=capture_output,
+ text=True,
+ cwd=PROJECT_PATH
+ )
+ return result if capture_output else result.returncode == 0
+ except Exception as e:
+ print(f"โ Command failed: {e}")
+ return False
+
+
+def main():
+ """Main entry point - demonstrates admin capabilities."""
+ print("=" * 60)
+ print(" NetworkBuster Admin Runner")
+ print("=" * 60)
+ print()
+
+ if is_admin():
+ print("โ Running with Administrator privileges")
+ print()
+
+ # Show what we can do as admin
+ print("Available admin operations:")
+ print(" 1. Manage Windows services")
+ print(" 2. Modify system firewall")
+ print(" 3. Access protected directories")
+ print(" 4. Run elevated PowerShell scripts")
+ print()
+
+ # Example: Check execution policy
+ result = subprocess.run(
+ ["powershell", "-Command", "Get-ExecutionPolicy"],
+ capture_output=True,
+ text=True
+ )
+ print(f"Current Execution Policy: {result.stdout.strip()}")
+
+ else:
+ print("โ Not running as Administrator")
+ print()
+ response = input("Would you like to restart with admin privileges? (y/n): ")
+ if response.lower() == 'y':
+ run_as_admin()
+ sys.exit(0)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ai-proxy-gateway.js b/ai-proxy-gateway.js
new file mode 100644
index 0000000..49c898f
--- /dev/null
+++ b/ai-proxy-gateway.js
@@ -0,0 +1,440 @@
+/**
+ * AI Proxy Gateway - Standalone gateway server for AI inference requests
+ * Routes requests from devices to multiple AI providers with authentication,
+ * rate limiting, caching, and usage tracking.
+ *
+ * Run: node ai-proxy-gateway.js
+ * Port: AI_GATEWAY_PORT (default: 3002)
+ */
+
+import express from 'express';
+import crypto from 'crypto';
+
+// Dynamic import for aiProviders and device store
+const aiProviders = await import('./lib/aiProviders.js').then(m => m.default);
+let deviceStore;
+try {
+ deviceStore = await import('./lib/deviceStore.js');
+} catch {
+ deviceStore = {
+ getRegistration: () => null,
+ saveRegistration: () => null
+ };
+}
+
+const app = express();
+const PORT = parseInt(process.env.AI_GATEWAY_PORT || '3002');
+
+// Request logging
+const requestLog = [];
+const MAX_LOG_ENTRIES = 1000;
+
+function logRequest(req, status, duration, provider = null) {
+ const entry = {
+ id: crypto.randomUUID?.() || crypto.randomBytes(8).toString('hex'),
+ timestamp: new Date().toISOString(),
+ method: req.method,
+ path: req.path,
+ deviceId: req.deviceId || 'unknown',
+ provider,
+ status,
+ duration,
+ ip: req.ip || req.connection?.remoteAddress
+ };
+
+ requestLog.unshift(entry);
+ if (requestLog.length > MAX_LOG_ENTRIES) requestLog.pop();
+
+ console.log(`[${entry.timestamp}] ${entry.method} ${entry.path} -> ${status} (${duration}ms) device:${entry.deviceId}`);
+}
+
+// Middleware
+app.use(express.json({ limit: '1mb' }));
+
+// CORS for all origins (devices may come from anywhere)
+app.use((req, res, next) => {
+ res.setHeader('Access-Control-Allow-Origin', '*');
+ res.setHeader('Access-Control-Allow-Methods', 'GET, POST, OPTIONS');
+ res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization, X-Device-Id, X-API-Key');
+
+ if (req.method === 'OPTIONS') {
+ res.writeHead(200);
+ return res.end();
+ }
+ next();
+});
+
+// Request timing
+app.use((req, res, next) => {
+ req.startTime = Date.now();
+ next();
+});
+
+// Device authentication middleware
+function authenticateDevice(req, res, next) {
+ const deviceId = req.headers['x-device-id'] || req.query.deviceId;
+ const apiKey = req.headers['x-api-key'] || req.headers['authorization']?.replace('Bearer ', '');
+
+ // API key authentication
+ if (apiKey && (apiKey === process.env.AI_API_KEY || apiKey === process.env.ADMIN_KEY)) {
+ req.deviceId = 'api-key-user';
+ req.authenticated = true;
+ req.isAdmin = apiKey === process.env.ADMIN_KEY;
+ return next();
+ }
+
+ // Device ID authentication
+ if (deviceId) {
+ const device = deviceStore.getRegistration?.(deviceId);
+ if (device) {
+ req.deviceId = deviceId;
+ req.device = device;
+ req.authenticated = true;
+ return next();
+ }
+ // Allow unregistered device IDs if configured
+ if (process.env.AI_ALLOW_UNREGISTERED === 'true') {
+ req.deviceId = deviceId;
+ req.authenticated = false;
+ return next();
+ }
+ }
+
+ // Anonymous access
+ if (process.env.AI_ALLOW_ANONYMOUS === 'true') {
+ req.deviceId = 'anon-' + crypto.randomBytes(4).toString('hex');
+ req.authenticated = false;
+ return next();
+ }
+
+ const duration = Date.now() - req.startTime;
+ logRequest(req, 401, duration);
+ return res.status(401).json({
+ error: 'Authentication required',
+ hint: 'Provide X-Device-Id or X-API-Key header'
+ });
+}
+
+// Rate limit headers
+function addRateLimitHeaders(req, res) {
+ if (req.deviceId) {
+ const info = aiProviders.checkRateLimit(req.deviceId);
+ res.setHeader('X-RateLimit-Limit', process.env.AI_RATE_LIMIT_PER_MINUTE || '60');
+ res.setHeader('X-RateLimit-Remaining', info.remaining);
+ res.setHeader('X-RateLimit-Reset', info.resetIn);
+ }
+}
+
+// ============ ROUTES ============
+
+// Health check (no auth required)
+app.get('/health', (req, res) => {
+ const providers = aiProviders.getAvailableProviders();
+ res.json({
+ status: 'healthy',
+ service: 'ai-proxy-gateway',
+ port: PORT,
+ providers: providers.length,
+ defaultProvider: aiProviders.getDefaultProvider(),
+ uptime: process.uptime(),
+ timestamp: new Date().toISOString()
+ });
+});
+
+// List providers (no auth required)
+app.get('/providers', (req, res) => {
+ res.json({
+ providers: aiProviders.getAvailableProviders(),
+ default: aiProviders.getDefaultProvider()
+ });
+});
+
+// Chat completion
+app.post('/chat', authenticateDevice, async (req, res) => {
+ const startTime = Date.now();
+
+ try {
+ addRateLimitHeaders(req, res);
+
+ const {
+ provider = aiProviders.getDefaultProvider(),
+ messages,
+ model,
+ maxTokens,
+ temperature,
+ useCache = true,
+ stream = false
+ } = req.body;
+
+ if (!messages || !Array.isArray(messages)) {
+ logRequest(req, 400, Date.now() - startTime, provider);
+ return res.status(400).json({ error: 'messages array required' });
+ }
+
+ if (!provider) {
+ logRequest(req, 503, Date.now() - startTime);
+ return res.status(503).json({ error: 'No AI providers configured' });
+ }
+
+ const result = await aiProviders.chat(provider, messages, {
+ model,
+ maxTokens,
+ temperature,
+ deviceId: req.deviceId,
+ useCache,
+ stream
+ });
+
+ if (stream && result instanceof ReadableStream) {
+ res.setHeader('Content-Type', 'text/event-stream');
+ res.setHeader('Cache-Control', 'no-cache');
+ res.setHeader('Connection', 'keep-alive');
+
+ const reader = result.getReader();
+ const decoder = new TextDecoder();
+
+ try {
+ while (true) {
+ const { done, value } = await reader.read();
+ if (done) break;
+ res.write(value);
+ }
+ } catch (err) {
+ console.error('Streaming error:', err);
+ } finally {
+ res.end();
+ logRequest(req, 200, Date.now() - startTime, provider + ' (streamed)');
+ }
+ return;
+ }
+
+ const tokens = result.usage?.total_tokens || 0;
+ aiProviders.trackUsage(req.deviceId, provider, 'chat', tokens);
+
+ logRequest(req, 200, Date.now() - startTime, provider);
+ res.json({ success: true, ...result });
+
+ } catch (err) {
+ const status = err.message.includes('Rate limit') ? 429 : 500;
+ logRequest(req, status, Date.now() - startTime);
+ res.status(status).json({ error: err.message });
+ }
+});
+
+// Embeddings
+app.post('/embed', authenticateDevice, async (req, res) => {
+ const startTime = Date.now();
+
+ try {
+ addRateLimitHeaders(req, res);
+
+ const {
+ provider = aiProviders.getDefaultProvider(),
+ text,
+ model
+ } = req.body;
+
+ if (!text) {
+ logRequest(req, 400, Date.now() - startTime, provider);
+ return res.status(400).json({ error: 'text required' });
+ }
+
+ const result = await aiProviders.embed(provider, text, {
+ model,
+ deviceId: req.deviceId
+ });
+
+ aiProviders.trackUsage(req.deviceId, provider, 'embed', result.usage?.total_tokens || 0);
+
+ logRequest(req, 200, Date.now() - startTime, provider);
+ res.json({ success: true, ...result });
+
+ } catch (err) {
+ const status = err.message.includes('Rate limit') ? 429 :
+ err.message.includes('does not support') ? 400 : 500;
+ logRequest(req, status, Date.now() - startTime);
+ res.status(status).json({ error: err.message });
+ }
+});
+
+// Image generation
+app.post('/image', authenticateDevice, async (req, res) => {
+ const startTime = Date.now();
+
+ try {
+ addRateLimitHeaders(req, res);
+
+ const {
+ provider = 'openai',
+ prompt,
+ model,
+ size,
+ quality,
+ n
+ } = req.body;
+
+ if (!prompt) {
+ logRequest(req, 400, Date.now() - startTime, provider);
+ return res.status(400).json({ error: 'prompt required' });
+ }
+
+ const result = await aiProviders.generateImage(provider, prompt, {
+ model,
+ size,
+ quality,
+ n,
+ deviceId: req.deviceId
+ });
+
+ aiProviders.trackUsage(req.deviceId, provider, 'image', 0);
+
+ logRequest(req, 200, Date.now() - startTime, provider);
+ res.json({ success: true, ...result });
+
+ } catch (err) {
+ const status = err.message.includes('Rate limit') ? 429 :
+ err.message.includes('does not support') ? 400 : 500;
+ logRequest(req, status, Date.now() - startTime);
+ res.status(status).json({ error: err.message });
+ }
+});
+
+// Device usage
+app.get('/usage', authenticateDevice, (req, res) => {
+ addRateLimitHeaders(req, res);
+ const usage = aiProviders.getDeviceUsage(req.deviceId);
+ const rateInfo = aiProviders.checkRateLimit(req.deviceId);
+
+ res.json({
+ deviceId: req.deviceId,
+ usage,
+ rateLimit: {
+ limit: parseInt(process.env.AI_RATE_LIMIT_PER_MINUTE || '60'),
+ remaining: rateInfo.remaining,
+ resetIn: rateInfo.resetIn
+ }
+ });
+});
+
+// Admin: all usage
+app.get('/usage/all', authenticateDevice, (req, res) => {
+ if (!req.isAdmin) {
+ return res.status(403).json({ error: 'Admin access required' });
+ }
+
+ res.json({
+ usage: aiProviders.getAllUsage(),
+ timestamp: new Date().toISOString()
+ });
+});
+
+// Admin: request logs
+app.get('/logs', authenticateDevice, (req, res) => {
+ if (!req.isAdmin) {
+ return res.status(403).json({ error: 'Admin access required' });
+ }
+
+ const limit = parseInt(req.query.limit || '100');
+ res.json({
+ logs: requestLog.slice(0, limit),
+ total: requestLog.length
+ });
+});
+
+// Gateway status
+app.get('/status', (req, res) => {
+ const providers = aiProviders.getAvailableProviders();
+ const allUsage = aiProviders.getAllUsage();
+
+ let totalRequests = 0;
+ let totalTokens = 0;
+ for (const usage of Object.values(allUsage)) {
+ totalRequests += usage.requests || 0;
+ totalTokens += usage.tokens || 0;
+ }
+
+ res.json({
+ gateway: {
+ status: 'running',
+ port: PORT,
+ uptime: Math.floor(process.uptime()),
+ memory: Math.round(process.memoryUsage().heapUsed / 1024 / 1024) + 'MB'
+ },
+ providers: {
+ available: providers.length,
+ default: aiProviders.getDefaultProvider(),
+ list: providers.map(p => ({ id: p.id, name: p.name, capabilities: p.capabilities }))
+ },
+ stats: {
+ activeDevices: Object.keys(allUsage).length,
+ totalRequests,
+ totalTokens,
+ recentLogs: requestLog.length
+ },
+ config: {
+ rateLimitPerMinute: parseInt(process.env.AI_RATE_LIMIT_PER_MINUTE || '60'),
+ cacheTTL: parseInt(process.env.AI_CACHE_TTL_SECONDS || '300'),
+ allowAnonymous: process.env.AI_ALLOW_ANONYMOUS === 'true',
+ allowUnregistered: process.env.AI_ALLOW_UNREGISTERED === 'true'
+ },
+ timestamp: new Date().toISOString()
+ });
+});
+
+// 404 handler
+app.use((req, res) => {
+ res.status(404).json({
+ error: 'Not found',
+ endpoints: [
+ 'GET /health',
+ 'GET /providers',
+ 'GET /status',
+ 'POST /chat',
+ 'POST /embed',
+ 'POST /image',
+ 'GET /usage',
+ 'GET /usage/all (admin)',
+ 'GET /logs (admin)'
+ ]
+ });
+});
+
+// Error handler
+app.use((err, req, res, next) => {
+ console.error('Gateway error:', err);
+ res.status(500).json({ error: 'Internal gateway error' });
+});
+
+// Start server
+app.listen(PORT, '0.0.0.0', () => {
+ console.log(`
+โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
+โ ๐ค AI Proxy Gateway Started โ
+โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฃ
+โ Port: ${String(PORT).padEnd(44)}โ
+โ Health: http://localhost:${PORT}/health${' '.repeat(24 - String(PORT).length)}โ
+โ Status: http://localhost:${PORT}/status${' '.repeat(24 - String(PORT).length)}โ
+โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฃ
+โ Providers Available: โ`);
+
+ const providers = aiProviders.getAvailableProviders();
+ for (const p of providers) {
+ const caps = Object.entries(p.capabilities)
+ .filter(([, v]) => v)
+ .map(([k]) => k)
+ .join(', ');
+ console.log(`โ โ ${p.name.padEnd(20)} (${caps})`.padEnd(59) + 'โ');
+ }
+
+ if (providers.length === 0) {
+ console.log('โ โ No providers configured - set API keys โ');
+ }
+
+ console.log(`โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฃ
+โ Rate Limit: ${(process.env.AI_RATE_LIMIT_PER_MINUTE || '60') + '/min'.padEnd(42)}โ
+โ Cache TTL: ${(process.env.AI_CACHE_TTL_SECONDS || '300') + 's'.padEnd(43)}โ
+โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
+`);
+});
+
+export default app;
diff --git a/android/antigravity/.github/workflows/build-apk.yml b/android/antigravity/.github/workflows/build-apk.yml
new file mode 100644
index 0000000..06a8364
--- /dev/null
+++ b/android/antigravity/.github/workflows/build-apk.yml
@@ -0,0 +1,53 @@
+name: Build Debug APK
+
+on:
+ push:
+ branches: [ main, master ]
+ workflow_dispatch: {}
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Set up JDK 17
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'temurin'
+ java-version: '17'
+
+ - name: Set up Android SDK
+ uses: android-actions/setup-android@v2
+ with:
+ api-level: 34
+ components: build-tools;34.0.0
+
+ - name: Install Gradle
+ uses: gradle/gradle-build-action@v2
+ with:
+ # action will provide Gradle on the PATH
+ check-latest: true
+
+ - name: Ensure gradlew exists
+ run: |
+ if [ ! -f "./gradlew" ]; then
+ echo "No gradlew wrapper found. Generating wrapper using Gradle..."
+ gradle wrapper --gradle-version 8.4
+ else
+ echo "gradlew found"
+ fi
+
+ - name: Make gradlew executable
+ run: chmod +x gradlew || true
+
+ - name: Build Debug APK
+ run: ./gradlew assembleDebug --no-daemon
+
+ - name: Upload APK artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: app-debug-apk
+ path: app/build/outputs/apk/debug/app-debug.apk
diff --git a/android/antigravity/README.md b/android/antigravity/README.md
new file mode 100644
index 0000000..7cda8e7
--- /dev/null
+++ b/android/antigravity/README.md
@@ -0,0 +1,8 @@
+Antigravity Android module (placeholder)
+
+- Kotlin app module under `app/`
+- Add `google-services.json` to `app/` if integrating Firebase (do not commit it; see `.gitignore`)
+- Build using Android Studio or Gradle CLI (this repo does not include Android SDK tooling)
+
+To connect to Google Cloud services from this module, use a service account and the
+`gcloud` or `firebase` CLIs; see `scripts/setup-gcloud-sdk.ps1` and `scripts/gcloud-auth.ps1`.
\ No newline at end of file
diff --git a/android/antigravity/app/build.gradle b/android/antigravity/app/build.gradle
new file mode 100644
index 0000000..6e074b5
--- /dev/null
+++ b/android/antigravity/app/build.gradle
@@ -0,0 +1,30 @@
+plugins {
+ id 'com.android.application'
+ id 'kotlin-android'
+}
+
+android {
+ compileSdkVersion 34
+
+ defaultConfig {
+ applicationId "net.networkbuster.antigravity"
+ minSdkVersion 21
+ targetSdkVersion 34
+ versionCode 1
+ versionName "1.0"
+ }
+
+ buildTypes {
+ release {
+ minifyEnabled false
+ proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro'
+ }
+ }
+}
+
+dependencies {
+ implementation "org.jetbrains.kotlin:kotlin-stdlib:1.8.0"
+ implementation 'androidx.core:core-ktx:1.9.0'
+ implementation 'androidx.appcompat:appcompat:1.6.1'
+ implementation 'com.google.android.material:material:1.8.0'
+}
diff --git a/android/antigravity/app/luna_eu_repo b/android/antigravity/app/luna_eu_repo
new file mode 160000
index 0000000..d5f49a4
--- /dev/null
+++ b/android/antigravity/app/luna_eu_repo
@@ -0,0 +1 @@
+Subproject commit d5f49a43814387efd17213f7d1128f40fe906f14
diff --git a/android/antigravity/app/networkbuster.net_repo b/android/antigravity/app/networkbuster.net_repo
new file mode 160000
index 0000000..db9ed7a
--- /dev/null
+++ b/android/antigravity/app/networkbuster.net_repo
@@ -0,0 +1 @@
+Subproject commit db9ed7a72f7943df4de9c864afd53b35de4e3cdf
diff --git a/android/antigravity/app/networkbuster_nb_repo b/android/antigravity/app/networkbuster_nb_repo
new file mode 160000
index 0000000..7d4d6d7
--- /dev/null
+++ b/android/antigravity/app/networkbuster_nb_repo
@@ -0,0 +1 @@
+Subproject commit 7d4d6d71ffdf30e8ab7b861ef5efff1f8bedf516
diff --git a/android/antigravity/app/networkbuster_net_repo b/android/antigravity/app/networkbuster_net_repo
new file mode 160000
index 0000000..7d4d6d7
--- /dev/null
+++ b/android/antigravity/app/networkbuster_net_repo
@@ -0,0 +1 @@
+Subproject commit 7d4d6d71ffdf30e8ab7b861ef5efff1f8bedf516
diff --git a/android/antigravity/app/proguard-rules.pro b/android/antigravity/app/proguard-rules.pro
new file mode 100644
index 0000000..8ce17ba
--- /dev/null
+++ b/android/antigravity/app/proguard-rules.pro
@@ -0,0 +1,2 @@
+# proguard rules placeholder
+-keep class net.networkbuster.antigravity.** { *; }
\ No newline at end of file
diff --git a/android/antigravity/app/src/main/AndroidManifest.xml b/android/antigravity/app/src/main/AndroidManifest.xml
new file mode 100644
index 0000000..f36bd4b
--- /dev/null
+++ b/android/antigravity/app/src/main/AndroidManifest.xml
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/android/antigravity/app/src/main/java/net/networkbuster/antigravity/MainActivity.kt b/android/antigravity/app/src/main/java/net/networkbuster/antigravity/MainActivity.kt
new file mode 100644
index 0000000..e6f4eae
--- /dev/null
+++ b/android/antigravity/app/src/main/java/net/networkbuster/antigravity/MainActivity.kt
@@ -0,0 +1,44 @@
+package net.networkbuster.antigravity
+
+import android.content.Intent
+import android.net.Uri
+import android.os.Build
+import android.os.Bundle
+import android.provider.Settings
+import android.widget.Button
+import androidx.appcompat.app.AppCompatActivity
+import androidx.appcompat.app.AlertDialog
+
+class MainActivity : AppCompatActivity() {
+ override fun onCreate(savedInstanceState: Bundle?) {
+ super.onCreate(savedInstanceState)
+ setContentView(R.layout.activity_main)
+
+ val btn = findViewById