diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..6001fc1 --- /dev/null +++ b/.env.example @@ -0,0 +1,21 @@ +# Copy this file to .env and fill in the values. Do NOT commit your real secrets. + +# AI Provider API Keys (set at least one) +OPENAI_API_KEY= +AZURE_OPENAI_ENDPOINT= +AZURE_OPENAI_KEY= +AZURE_OPENAI_DEPLOYMENT=gpt-4o +ANTHROPIC_API_KEY= +GOOGLE_GEMINI_KEY= +CUSTOM_AI_ENDPOINT=http://localhost:11434/v1 +CUSTOM_AI_KEY= + +# AI Gateway Configuration +AI_GATEWAY_PORT=3002 +AI_DEFAULT_PROVIDER=openai +AI_RATE_LIMIT_PER_MINUTE=60 +AI_CACHE_TTL_SECONDS=300 +AI_ALLOW_ANONYMOUS=true +AI_ALLOW_UNREGISTERED=true +AI_API_KEY=your-api-key-for-testing +ADMIN_KEY=your-admin-key diff --git a/.github/cspell/cspell.json b/.github/cspell/cspell.json new file mode 100644 index 0000000..27402b9 --- /dev/null +++ b/.github/cspell/cspell.json @@ -0,0 +1,5 @@ +{ + "version": "0.1", + "language": "en", + "words": ["sterilization","datacentra","NetworkBuster","regolith","PAPR","UV-C"] +} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..541298e --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,23 @@ +name: CI + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Use Node.js + uses: actions/setup-node@v4 + with: + node-version: '24' + - name: Install dependencies + run: npm ci + - name: Run tests + run: npm test + - name: Run npm audit + run: npm audit --audit-level=moderate || true diff --git a/.github/workflows/integration-device-registration.yml b/.github/workflows/integration-device-registration.yml new file mode 100644 index 0000000..639d90b --- /dev/null +++ b/.github/workflows/integration-device-registration.yml @@ -0,0 +1,54 @@ +name: CI - Device Registration Integration Tests + +on: + push: + branches: [ main, master ] + pull_request: + branches: [ main, master ] + workflow_dispatch: {} + +jobs: + integration-tests: + runs-on: ubuntu-latest + strategy: + matrix: + node-version: [24.x] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Start server (background) + run: | + nohup node server.js > server.log 2>&1 & + sleep 1 + + - name: Wait for server + run: | + for i in {1..30}; do + if curl -sSf http://localhost:3001/api/health >/dev/null; then + echo "server ready"; exit 0 + fi + sleep 1 + done + echo "Server not ready" && cat server.log && exit 1 + + - name: Run E2E integration test + env: + BASE: http://localhost:3001 + run: npm run test:integration:devices + + - name: Upload server logs on failure + if: failure() + uses: actions/upload-artifact@v4 + with: + name: server-log + path: server.log diff --git a/.github/workflows/lfs-build.yml b/.github/workflows/lfs-build.yml new file mode 100644 index 0000000..bf16dc4 --- /dev/null +++ b/.github/workflows/lfs-build.yml @@ -0,0 +1,88 @@ +name: Build LFS rootfs (PoC) + +on: + push: + paths: + - 'os/lfs/**' + workflow_dispatch: + inputs: + build_kernel: + description: 'Build kernel during job? ("true" or "false")' + required: false + default: 'false' + kernel_version: + description: 'Kernel version to build (e.g., 6.8.13)' + required: false + default: '6.8.13' + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set kernel build flags + run: | + echo "event_name=${{ github.event_name }}" + # If manually dispatched and build_kernel==true, enable kernel build + if [ "${{ github.event_name }}" = "workflow_dispatch" ] && [ "${{ github.event.inputs.build_kernel }}" = "true" ]; then + echo "SKIP_KERNEL=false" >> $GITHUB_ENV + echo "KERNEL_VERSION=${{ github.event.inputs.kernel_version }}" >> $GITHUB_ENV + echo "Kernel build enabled: $KERNEL_VERSION" + else + echo "SKIP_KERNEL=true" >> $GITHUB_ENV + echo "KERNEL_VERSION=${{ github.event.inputs.kernel_version }}" >> $GITHUB_ENV + echo "Kernel build disabled (default)" + fi + + - name: Restore kernel cache + uses: actions/cache@v4 + with: + path: .cache/linux-${{ env.KERNEL_VERSION }} + key: linux-kernel-${{ env.KERNEL_VERSION }}-${{ runner.os }}-v1 + + - name: Build container image + run: | + docker build -t lfs-build -f os/lfs/Dockerfile . + + - name: Run build in container + env: + SKIP_KERNEL: ${{ env.SKIP_KERNEL }} + KERNEL_VERSION: ${{ env.KERNEL_VERSION }} + run: | + mkdir -p os/lfs/output + mkdir -p .cache/linux-${{ env.KERNEL_VERSION }} + docker run --rm -e SKIP_KERNEL="$SKIP_KERNEL" -e KERNEL_VERSION="$KERNEL_VERSION" -e KERNEL_CACHE_DIR="/workspace/kernel-cache" -v "$PWD/.cache/linux-${{ env.KERNEL_VERSION }}:/workspace/kernel-cache" -v "$PWD/os/lfs/output:/workspace/output" lfs-build || true + + - name: Upload rootfs artifact + uses: actions/upload-artifact@v4 + with: + name: lfs-rootfs + path: os/lfs/output/rootfs.tar.gz + + - name: Attempt QEMU smoke boot (best-effort) + if: always() + run: | + # install QEMU on runner + sudo apt-get update && sudo apt-get install -y qemu-system-x86 + + # choose kernel: prefer built artifact + if [ -f os/lfs/output/vmlinuz-${{ env.KERNEL_VERSION }} ]; then + KERNEL=$(pwd)/os/lfs/output/vmlinuz-${{ env.KERNEL_VERSION }} + echo "Using built kernel: $KERNEL" + elif ls /boot/vmlinuz-* 1>/dev/null 2>&1 && [ -f os/lfs/output/rootfs.cpio.gz ]; then + KERNEL=$(ls -1 /boot/vmlinuz-* | tail -n1) + echo "Using host kernel: $KERNEL" + else + KERNEL="" + fi + + if [ -n "$KERNEL" ] && [ -f os/lfs/output/rootfs.cpio.gz ]; then + timeout 30s qemu-system-x86_64 -kernel "$KERNEL" -initrd os/lfs/output/rootfs.cpio.gz -nographic -append "console=ttyS0 root=/dev/ram0 rw init=/init" -m 512 -no-reboot || true + else + echo "No kernel + initramfs available on runner โ€” skipping QEMU boot test" + fi + + - name: List artifacts + run: ls -lh os/lfs/output || true diff --git a/.github/workflows/lfs-cache-validate.yml b/.github/workflows/lfs-cache-validate.yml new file mode 100644 index 0000000..dde9b37 --- /dev/null +++ b/.github/workflows/lfs-cache-validate.yml @@ -0,0 +1,63 @@ +name: Validate LFS kernel cache + +on: + workflow_dispatch: + inputs: + kernel_version: + description: 'Kernel version to validate (e.g., 6.8.13)' + required: false + default: '6.8.13' + +jobs: + build-and-cache: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Restore cache (initial) + uses: actions/cache@v4 + with: + path: .cache/linux-${{ github.event.inputs.kernel_version }} + key: linux-kernel-${{ github.event.inputs.kernel_version }}-${{ runner.os }}-v1 + + - name: Build container image + run: docker build -t lfs-build -f os/lfs/Dockerfile . + + - name: Run build to populate cache + run: | + mkdir -p .cache/linux-${{ github.event.inputs.kernel_version }} + docker run --rm -e SKIP_KERNEL=false -e KERNEL_VERSION="${{ github.event.inputs.kernel_version }}" -e KERNEL_CACHE_DIR=/workspace/kernel-cache -v "$PWD/.cache/linux-${{ github.event.inputs.kernel_version }}:/workspace/kernel-cache" -v "$PWD/os/lfs/output:/workspace/output" lfs-build || true + + verify-cache: + runs-on: ubuntu-latest + needs: build-and-cache + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Restore cache (verify) + uses: actions/cache@v4 + with: + path: .cache/linux-${{ github.event.inputs.kernel_version }} + key: linux-kernel-${{ github.event.inputs.kernel_version }}-${{ runner.os }}-v1 + + - name: Check cached vmlinuz exists + run: | + if [ -f .cache/linux-${{ github.event.inputs.kernel_version }}/vmlinuz-${{ github.event.inputs.kernel_version }} ]; then + echo "Cached kernel found" + else + echo "Cached kernel missing" >&2 + exit 1 + fi + + - name: Run build and check logs for cache usage + run: | + mkdir -p os/lfs/output + docker run --rm -e SKIP_KERNEL=false -e KERNEL_VERSION="${{ github.event.inputs.kernel_version }}" -e KERNEL_CACHE_DIR=/workspace/kernel-cache -v "$PWD/.cache/linux-${{ github.event.inputs.kernel_version }}:/workspace/kernel-cache" -v "$PWD/os/lfs/output:/workspace/output" lfs-build | tee build.log || true + if grep -q "Using cached kernel tarball" build.log || grep -q "Using cached built kernel" build.log; then + echo "Cache used during build" + else + echo "Cache not used (check output)" >&2 + exit 1 + fi \ No newline at end of file diff --git a/.github/workflows/network-boost-ci.yml b/.github/workflows/network-boost-ci.yml new file mode 100644 index 0000000..b8cb8e3 --- /dev/null +++ b/.github/workflows/network-boost-ci.yml @@ -0,0 +1,52 @@ +name: Network Boost CI + +on: + pull_request: + paths: + - 'contrib/Cleanskiier27-final/**' + - 'scripts/network-boost.*' + +jobs: + lint-and-dryrun-linux: + name: Lint (shellcheck) & Dry-run (Linux) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install ShellCheck + run: sudo apt-get update && sudo apt-get install -y shellcheck + - name: Run ShellCheck on Linux script + run: | + shellcheck contrib/Cleanskiier27-final/scripts/network-boost.sh || true + - name: Run Linux dry-run + run: | + bash contrib/Cleanskiier27-final/scripts/network-boost.sh || true + + lint-and-dryrun-windows: + name: Lint (PSScriptAnalyzer) & Dry-run (Windows) + runs-on: windows-latest + steps: + - uses: actions/checkout@v4 + - name: Install PSScriptAnalyzer + shell: pwsh + run: | + Install-Module -Name PSScriptAnalyzer -Force -Scope CurrentUser -Confirm:$false + - name: Run PSScriptAnalyzer + shell: pwsh + run: | + Invoke-ScriptAnalyzer -Path .\contrib\Cleanskiier27-final\scripts\network-boost.ps1 -Recurse -Severity Error || true + - name: Windows dry-run + shell: pwsh + run: | + powershell -NoProfile -ExecutionPolicy Bypass -File .\contrib\Cleanskiier27-final\scripts\network-boost.ps1 + + optional-checks: + name: Optional checks (formatters/linter) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Run shellcheck on all shell scripts (contrib) + run: | + for f in $(git ls-files 'contrib/**.sh'); do shellcheck "$f" || true; done + - name: Display generated files (for review) + run: | + ls -R contrib/Cleanskiier27-final || true diff --git a/.github/workflows/recycle-ai-demo.yml b/.github/workflows/recycle-ai-demo.yml new file mode 100644 index 0000000..0a6eeea --- /dev/null +++ b/.github/workflows/recycle-ai-demo.yml @@ -0,0 +1,23 @@ +name: Recycle AI demo + +on: + workflow_dispatch: + +jobs: + demo: + runs-on: ubuntu-latest + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + steps: + - uses: actions/checkout@v4 + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: 24 + - name: Install dependencies + run: npm ci + - name: Run simple recycle API test + run: | + node server.js & + sleep 2 + curl -sS -X POST http://localhost:3001/api/recycle/recommend -H 'Content-Type: application/json' -d '{"items":[{"name":"plastic bottle"}],"location":"94107"}' | jq '.' || true diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..84dc9d3 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,81 @@ +name: Build and Release + +on: + push: + tags: + - 'v*.*.*' + workflow_dispatch: {} + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Use Node.js + uses: actions/setup-node@v4 + with: + node-version: '24' + - name: Install dependencies + run: npm ci + - name: Run dist script + run: npm run dist:zip + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: dist-zip + path: dist/*.zip + + build-windows-installer: + runs-on: windows-latest + needs: build + steps: + - uses: actions/checkout@v4 + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '24' + - name: Install dependencies + run: npm ci + - name: Install NSIS & ImageMagick + run: | + choco install nsis -y + choco install imagemagick -y + - name: Convert icon (ImageMagick) and build + run: | + powershell -ExecutionPolicy Bypass -File scripts/installer/convert-icon.ps1 || echo "convert skipped" + npm run dist:nsis + - name: Upload installer + uses: actions/upload-artifact@v4 + with: + name: dist-installer + path: dist/*Setup.exe + + release: + needs: [build, build-windows-installer] + runs-on: ubuntu-latest + if: startsWith(github.ref, 'refs/tags/v') + steps: + - uses: actions/checkout@v4 + - name: Create GitHub Release + id: create_release + uses: actions/create-release@v1 + with: + tag_name: ${{ github.ref_name }} + release_name: Release ${{ github.ref_name }} + draft: false + prerelease: false + body: "Automated release created by workflow" + - name: Upload release zip asset + uses: actions/upload-release-asset@v2 + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: dist/*.zip + asset_name: ${{ github.repository }}-${{ github.ref_name }}.zip + asset_content_type: application/zip + - name: Upload release installer asset + uses: actions/upload-release-asset@v2 + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: dist/*Setup.exe + asset_name: ${{ github.repository }}-${{ github.ref_name }}-installer.exe + asset_content_type: application/octet-stream \ No newline at end of file diff --git a/.github/workflows/render-diagrams.yml b/.github/workflows/render-diagrams.yml new file mode 100644 index 0000000..1e6648d --- /dev/null +++ b/.github/workflows/render-diagrams.yml @@ -0,0 +1,41 @@ +name: Render diagrams to PNG ๐Ÿ–ผ๏ธ + +on: + workflow_dispatch: {} + push: + branches: + - bigtree + +jobs: + render: + name: Render Mermaid diagrams + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '24' + + - name: Install dependencies + run: npm ci + + - name: Install Puppeteer for rendering + run: npm install puppeteer --no-save + + - name: Render Mermaid to SVG + run: npx -y @mermaid-js/mermaid-cli -i "docs/diagrams/*.mmd" -o docs/diagrams -f svg + + - name: Render SVGs to PNG (hi-res) + run: node scripts/render-svgs.js 4 + + - name: List generated PNGs + run: ls -la docs/diagrams/*.png || true + + - name: Upload PNG artifacts + uses: actions/upload-artifact@v4 + with: + name: diagrams-png + path: docs/diagrams/*.png diff --git a/.github/workflows/smoke-e2e-openai.yml b/.github/workflows/smoke-e2e-openai.yml new file mode 100644 index 0000000..eff4e6d --- /dev/null +++ b/.github/workflows/smoke-e2e-openai.yml @@ -0,0 +1,67 @@ +name: Smoke test โ€” OpenAI end-to-end โœ… + +on: + workflow_dispatch: {} + pull_request: + types: [opened, synchronize, reopened] + +permissions: + actions: read + +jobs: + smoke-e2e: + name: E2E smoke test (start server + call /api/recycle/recommend) + runs-on: ubuntu-latest + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '24' + + - name: Install dependencies + run: npm ci + + - name: Start server in background + run: | + nohup npm start > server.log 2>&1 & + echo $! > server.pid + + - name: Wait for server health + run: | + for i in {1..30}; do + STATUS=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3001/api/health || true) + echo "Attempt $i: health=$STATUS" + if [ "$STATUS" = "200" ]; then + curl -s http://localhost:3001/api/health | jq -r '.status, .uptime' + break + fi + sleep 2 + done + + - name: Perform recycle recommend request + run: | + set -o pipefail + echo '{"items":["plastic bottle"], "location":"test"}' > /tmp/payload.json + HTTP_CODE=$(curl -s -w "%{http_code}" -o /tmp/rec.json -X POST -H "Content-Type: application/json" -d @/tmp/payload.json http://localhost:3001/api/recycle/recommend) + echo "HTTP_CODE=$HTTP_CODE" + cat /tmp/rec.json + if [ "$HTTP_CODE" != "200" ]; then + echo "recommend endpoint failed: $HTTP_CODE" >&2 + exit 1 + fi + OK=$(jq -r '.ok' /tmp/rec.json) + if [ "$OK" != "true" ]; then + echo "recommend returned ok!=true" >&2 + exit 1 + fi + + - name: Cleanup server + if: always() + run: | + if [ -f server.pid ]; then kill $(cat server.pid) || true; fi + pkill -f "node server.js" || true diff --git a/.github/workflows/sterilization-docs.yml b/.github/workflows/sterilization-docs.yml new file mode 100644 index 0000000..5000b1e --- /dev/null +++ b/.github/workflows/sterilization-docs.yml @@ -0,0 +1,30 @@ +name: Lint Sterilization Docs + +on: + pull_request: + paths: + - 'docs/**' + - 'templates/**' + - 'MATERIALS.md' + +jobs: + lint_docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Run Super-Linter (markdown) + uses: github/super-linter@v5 + env: + VALIDATE_MARKDOWN: true + DEFAULT_BRANCH: main + + spellcheck: + runs-on: ubuntu-latest + needs: lint_docs + steps: + - uses: actions/checkout@v4 + - name: Run cspell + uses: check-spelling/action@v0.0.25 + with: + config: .github/cspell/cspell.json + continue-on-error: true diff --git a/.github/workflows/test-ai-robot.yml b/.github/workflows/test-ai-robot.yml new file mode 100644 index 0000000..dda844d --- /dev/null +++ b/.github/workflows/test-ai-robot.yml @@ -0,0 +1,44 @@ +name: Test AI Robot (mock) + +on: + pull_request: + branches: + - '**' + +jobs: + test: + name: AI Robot tests (mock) on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest] + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup + if: matrix.os == 'ubuntu-latest' + run: | + chmod +x ./scripts/test-ai-robot.sh + + - name: Run AI Robot tests (Linux) + if: matrix.os == 'ubuntu-latest' + run: | + ./scripts/test-ai-robot.sh --mock --concurrency 3 + + - name: Run AI Robot tests (Windows PowerShell) + if: matrix.os == 'windows-latest' + shell: pwsh + run: | + Set-ExecutionPolicy -ExecutionPolicy Bypass -Scope Process + .\scripts\test-ai-robot.ps1 -Mock -Concurrency 3 + + - name: Upload test logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: ai-robot-test-logs-${{ github.run_id }} + path: | + ./scripts/test-ai-robot.sh || true + ./scripts/test-ai-robot.ps1 || true diff --git a/.github/workflows/test-openai-secret.yml b/.github/workflows/test-openai-secret.yml new file mode 100644 index 0000000..3419c8e --- /dev/null +++ b/.github/workflows/test-openai-secret.yml @@ -0,0 +1,38 @@ +name: Test OpenAI secret โœ… + +on: + workflow_dispatch: {} + pull_request: + types: [opened, synchronize, reopened] + +permissions: + actions: read + +jobs: + check-openai-key: + name: Check OPENAI_API_KEY + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Ensure OPENAI_API_KEY secret exists + run: | + if [ -z "${OPENAI_API_KEY}" ]; then + echo "ERROR: OPENAI_API_KEY is not set in repository secrets" >&2 + exit 1 + fi + echo "OPENAI_API_KEY appears set (will not print the value)" + + - name: Validate OpenAI API key by calling Models endpoint + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run: | + set -o pipefail + STATUS=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $OPENAI_API_KEY" https://api.openai.com/v1/models || true) + echo "HTTP status: $STATUS" + if [ "$STATUS" != "200" ]; then + echo "OpenAI API request failed with status $STATUS" >&2 + exit 1 + fi + echo "OpenAI API key validation succeeded (HTTP 200)." diff --git a/.gitignore b/.gitignore index 5002c7a..ddbc150 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,7 @@ node_modules/ .env +# Local env file (contains secrets) +.env .env.local .env.*.local dist/ @@ -7,3 +9,17 @@ build/ *.log .DS_Store .vercel + +# Android: google-services and local.properties +android/antigravity/app/google-services.json +android/antigravity/local.properties + +# Ignore local tool bundles +tools/ + +# LFS build artifacts +os/lfs/output/ + +# Local sensitive scripts (do not commit) +scripts/dummy-sa.json +scripts/gcloud-startup.ps1 diff --git a/.security/active_session.json b/.security/active_session.json new file mode 100644 index 0000000..0109106 --- /dev/null +++ b/.security/active_session.json @@ -0,0 +1,7 @@ +{ + "username": "admin", + "level": 4, + "login_time": "2026-01-02T11:50:53.035271", + "host": "BOOK-KDMJTUA9LB", + "platform": "Windows" +} \ No newline at end of file diff --git a/.security/users.json b/.security/users.json new file mode 100644 index 0000000..e85a1f1 --- /dev/null +++ b/.security/users.json @@ -0,0 +1,9 @@ +{ + "admin": { + "password_hash": "8a6d1f7718c6d64b31d720c8f0c1ee60c9f75c8016b6d95ad86e24a6e325b817", + "level": 4, + "created": "2026-01-02T11:48:37.888238", + "last_login": "2026-01-02T11:50:53.033672", + "mfa_enabled": false + } +} \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 0000000..2884f57 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,40 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": ".NET: Attach to Process", + "type": "coreclr", + "request": "attach", + "processId": "${command:pickProcess}", + "justMyCode": true + }, + { + "name": ".NET: Launch (console)", + "type": "coreclr", + "request": "launch", + "preLaunchTask": "build", + # Replace the program path below with your project's output DLL if applicable + "program": "${workspaceFolder}/bin/Debug/net7.0/YourApp.dll", + "args": [], + "cwd": "${workspaceFolder}", + "stopAtEntry": false, + "console": "integratedTerminal", + "justMyCode": true + }, + { + "name": "Website: Launch preciseliens.com", + "type": "coreclr", + "request": "launch", + "preLaunchTask": "build", + "program": "${workspaceFolder}/bin/Debug/net7.0/Preciseliens.Web.dll", + "args": [], + "cwd": "${workspaceFolder}", + "env": { + "ASPNETCORE_URLS": "https://preciseliens.com;http://localhost:5000" + }, + "stopAtEntry": false, + "console": "integratedTerminal", + "justMyCode": true + } + ] +} \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json new file mode 100644 index 0000000..f489c56 --- /dev/null +++ b/.vscode/tasks.json @@ -0,0 +1,14 @@ +{ + "version": "2.0.0", + "tasks": [ + { + "label": "build", + "type": "shell", + "command": "dotnet", + "args": ["build"], + "group": { "kind": "build", "isDefault": true }, + "presentation": { "reveal": "always" }, + "problemMatcher": ["$msCompile"] + } + ] +} \ No newline at end of file diff --git a/ANDREW.ps1.bak b/ANDREW.ps1.bak deleted file mode 100644 index 2dd5633..0000000 --- a/ANDREW.ps1.bak +++ /dev/null @@ -1,212 +0,0 @@ -# ๐Ÿ—ก๏ธ ANDREW - Automated Network Deployment Engine (Azure Ready!) -# Master orchestration script for NetworkBuster infrastructure -# Inspired by Andrew's Trials: Tower of Code, Labyrinth of Data, Dragon of Scale, Mirror of Innovation - -param( - [Parameter(Mandatory = $false)] - [ValidateSet("deploy-storage", "deploy-all", "status", "backup", "sync")] - [string]$Task = "status", - - [Parameter(Mandatory = $false)] - [string]$Environment = "production" -) - -# Colors for output -$Colors = @{ - Success = "Green" - Warning = "Yellow" - Error = "Red" - Info = "Cyan" - Trial = "Magenta" -} - -function Write-Trial { - param([string]$Message, [string]$Trial) - Write-Host "[$Trial] $Message" -ForegroundColor $Colors.Trial -} - -function Write-Status { - param([string]$Message, [string]$Status = "Info") - Write-Host $Message -ForegroundColor $Colors[$Status] -} - -# ============================================================================ -# ANDREW'S TRIALS - Infrastructure Deployment Tasks -# ============================================================================ - -function Invoke-StorageDeployment { - Write-Trial "โšก Trial One: Tower of Code - Building the Foundation" "ANDREW" - - $scriptPath = ".\deploy-storage-azure.ps1" - - if (-not (Test-Path $scriptPath)) { - Write-Status "โŒ Deploy script not found at $scriptPath" "Error" - return $false - } - - Write-Status "๐Ÿ”ง Executing Azure Storage deployment..." "Info" - & $scriptPath - - Write-Status "โœ… Tower of Code construction complete!" "Success" - return $true -} - -function Invoke-FullDeployment { - Write-Trial "๐Ÿ—ก๏ธ ANDREW'S FULL QUEST: All Trials Activated" "ANDREW" - - # Trial 1: Storage - Write-Trial "๐ŸŒŸ Trial One: Tower of Code" "ANDREW" - Invoke-StorageDeployment - - # Trial 2: Sync - Write-Trial "๐ŸŒŠ Trial Two: Labyrinth of Data - Synchronizing" "ANDREW" - Write-Status "Syncing repositories..." "Info" - git status - - # Trial 3: Backup - Write-Trial "๐Ÿ‰ Trial Three: Dragon of Scale - Creating Backups" "ANDREW" - Invoke-BackupProcedure - - # Trial 4: Status - Write-Trial "๐Ÿชž Trial Four: Mirror of Innovation - Status Check" "ANDREW" - Get-InfrastructureStatus - - Write-Status "๐Ÿ† ANDREW'S QUEST COMPLETE!" "Success" -} - -function Invoke-BackupProcedure { - Write-Status "Creating backup of current state..." "Info" - - $backupDate = Get-Date -Format "yyyyMMdd_HHmmss" - $backupPath = "D:\networkbuster_backup_$backupDate" - - if (-not (Test-Path "D:\")) { - Write-Status "โš ๏ธ D: drive not accessible, skipping backup" "Warning" - return - } - - try { - Copy-Item -Path "." -Destination $backupPath -Recurse -Force - Write-Status "โœ… Backup created: $backupPath" "Success" - } - catch { - Write-Status "โŒ Backup failed: $_" "Error" - } -} - -function Get-InfrastructureStatus { - Write-Status "โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" "Info" - Write-Status "๐Ÿ” ANDREW'S INFRASTRUCTURE STATUS" "Info" - Write-Status "โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" "Info" - - # Git status - Write-Status "`n๐Ÿ“ฆ Repository Status:" "Info" - git branch -v - git status --short - - # Storage check - Write-Status "`n๐Ÿ’พ Storage Infrastructure:" "Info" - if (Test-Path ".\infra\storage.bicep") { - Write-Status "โœ… Bicep template found" "Success" - Get-Item ".\infra\storage.bicep" | Select-Object Name, Length, LastWriteTime | Format-Table - } - else { - Write-Status "โŒ Bicep template missing" "Error" - } - - # Script check - Write-Status "`n๐Ÿš€ Deployment Scripts:" "Info" - $scripts = @("deploy-storage-azure.ps1", "deploy-storage-azure.sh", "ANDREW.ps1") - foreach ($script in $scripts) { - if (Test-Path ".\$script") { - Write-Status "โœ… $script" "Success" - } - else { - Write-Status "โŒ $script" "Error" - } - } - - # Azure CLI check - Write-Status "`nโ˜๏ธ Azure Connectivity:" "Info" - try { - $azVersion = az --version | Select-Object -First 1 - Write-Status "โœ… Azure CLI: $azVersion" "Success" - } - catch { - Write-Status "โš ๏ธ Azure CLI not available (optional)" "Warning" - } - - Write-Status "`nโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" "Info" -} - -function Sync-Repositories { - Write-Trial "๐Ÿ”„ Synchronizing all branches with DATACENTRAL" "ANDREW" - - try { - Write-Status "๐Ÿ“ก Checking current branch..." "Info" - $currentBranch = (git rev-parse --abbrev-ref HEAD) - Write-Status "Current: $currentBranch" "Info" - - Write-Status "`n๐Ÿ“Š All branches:" "Info" - git branch -a - - Write-Status "`n๐Ÿ”€ Fetching from remote..." "Info" - git fetch origin - - Write-Status "โœ… Repository sync complete" "Success" - } - catch { - Write-Status "โŒ Sync failed: $_" "Error" - } -} - -# ============================================================================ -# Main Execution -# ============================================================================ - -Write-Host "`n" -ForegroundColor Black -Write-Host "โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" -ForegroundColor Magenta -Write-Host "โ•‘ ๐Ÿ—ก๏ธ ANDREW - Network Deployment Engine ๐Ÿ—ก๏ธ โ•‘" -ForegroundColor Magenta -Write-Host "โ•‘ Automated Deployment for NetworkBuster Infrastructure โ•‘" -ForegroundColor Magenta -Write-Host "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Magenta -Write-Host "`n" - -Write-Status "โฑ๏ธ Timestamp: $(Get-Date -Format 'yyyy-MM-dd HH:mm:ss')" "Info" -Write-Status "๐ŸŒ Environment: $Environment" "Info" -Write-Status "๐Ÿ“ Location: $(Get-Location)" "Info" -Write-Status "๐ŸŽฏ Task: $Task" "Info" -Write-Host "`n" - -switch ($Task) { - "deploy-storage" { - Invoke-StorageDeployment - } - "deploy-all" { - Invoke-FullDeployment - } - "backup" { - Invoke-BackupProcedure - } - "sync" { - Sync-Repositories - } - "status" { - Get-InfrastructureStatus - } - default { - Get-InfrastructureStatus - } -} - -Write-Host "`n" -Write-Status "๐Ÿ ANDREW execution complete" "Success" -Write-Host "`n" - -# Usage examples -Write-Host "๐Ÿ“– ANDREW Usage Examples:" -ForegroundColor Cyan -Write-Host " .\ANDREW.ps1 # Show infrastructure status" -ForegroundColor Gray -Write-Host " .\ANDREW.ps1 -Task deploy-storage # Deploy Azure Storage only" -ForegroundColor Gray -Write-Host " .\ANDREW.ps1 -Task deploy-all # Full deployment (all trials)" -ForegroundColor Gray -Write-Host " .\ANDREW.ps1 -Task backup # Create backup to D: drive" -ForegroundColor Gray -Write-Host " .\ANDREW.ps1 -Task sync # Synchronize with remote" -ForegroundColor Gray -Write-Host "`n" diff --git a/AUTOSTART.bat b/AUTOSTART.bat new file mode 100644 index 0000000..53e7706 --- /dev/null +++ b/AUTOSTART.bat @@ -0,0 +1,32 @@ +@echo off +REM NetworkBuster One-Click Auto-Start +REM Automatically requests permissions and starts everything + +cd /d "%~dp0" + +echo. +echo ========================================== +echo NetworkBuster One-Click Launcher +echo ========================================== +echo. + +REM Check if running as admin +net session >nul 2>&1 +if %errorLevel% neq 0 ( + echo Requesting administrator permissions... + powershell -Command "Start-Process '%~f0' -Verb RunAs" + exit /b +) + +echo Running with administrator privileges... +echo. +echo Starting all services... +echo. + +call .venv\Scripts\activate.bat +python auto_start_service.py + +echo. +echo All services started! +echo Window will close in 3 seconds... +timeout /t 3 /nobreak >nul diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..7ccc62a --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +## [Unreleased] +- Packaging scripts added: `scripts/make-release.js` and `scripts/create-shortcut.ps1` +- Added `start-desktop.bat` and `npm` scripts: `dist:zip`, `release:create-shortcut`, `start:desktop` + +## [1.0.1] - YYYY-MM-DD +- Initial production release diff --git a/COMPLETION-ACKNOWLEDGMENT.md b/COMPLETION-ACKNOWLEDGMENT.md new file mode 100644 index 0000000..13f0aff --- /dev/null +++ b/COMPLETION-ACKNOWLEDGMENT.md @@ -0,0 +1,29 @@ +# Completion Acknowledgement โœ… + +**Project:** NetworkBuster + +**Date:** December 17, 2025 + +Thank you to everyone who contributed to the completion and distribution preparation of NetworkBuster. Your work on packaging, CI, and installer tooling made this milestone possible. + +## Completed highlights ๐Ÿ”ง +- Packaging scripts added: `scripts/make-release.js` (ZIP) and `scripts/build-nsis.ps1` (NSIS) +- Desktop shortcuts & launcher: `scripts/create-shortcut.ps1`, `start-desktop.bat` +- Windows installer: `scripts/installer/networkbuster-installer.nsi` +- Installer assets added: `scripts/installer/EULA.txt`, `scripts/installer/icon-placeholder.png`, `scripts/installer/convert-icon.ps1`, and `scripts/generate-icons.ps1` +- Placeholder multi-size icons: `scripts/installer/branding/icons/icon-256.png`, `icon-128.png`, `icon-64.png`, `icon-48.png`, `icon-32.png`, `icon-16.png` +- CI workflows: `.github/workflows/release.yml` and `.github/workflows/ci.yml` +- Comparison helper: `scripts/compare-with-luna.ps1` (clones & diffs Cleanskiier27/luna.eu) +- Documentation updates: `CHANGELOG.md`, README distribution notes + +## Acknowledgements ๐Ÿ™ +- Contributors and reviewers who implemented packaging and CI changes +- The luna.eu project (https://github.com/Cleanskiier27/luna.eu) for useful USB packaging and flashing concepts that informed the distribution workflow + +## Next recommended steps โ–ถ๏ธ +1. Validate builds locally (Node/npm/git/NSIS required). +2. Run CI on a test tag (e.g., `git tag v1.0.2 && git push origin --tags`) to verify release artifact and installer upload. +3. Review installer content and add optional assets (icons, EULA, Node portable bundle) if desired. +4. When ready, create the GitHub release and attach artifacts produced by CI. + +If you'd like, I can prepare the installer icon and EULA, or draft a short release note to attach to the GitHub release. Reply with which follow-up you prefer and I'll proceed. \ No newline at end of file diff --git a/DEVICE_REGISTRATION_GOAL.md b/DEVICE_REGISTRATION_GOAL.md new file mode 100644 index 0000000..6b70a2b --- /dev/null +++ b/DEVICE_REGISTRATION_GOAL.md @@ -0,0 +1,88 @@ +# Device Registration โ†’ Neural Network + +**Overview** +This document specifies the new project goal for builders: when a new device registers with NetworkBuster, its registration and initial telemetry must be validated, persisted, and forwarded into the neural network ingestion pipeline for training or inference. This capability enables device-aware models and closed-loop improvements. + +## Goal (one-liner) +Pass every new device registration into the neural network ingestion pipeline reliably, securely, and with full observability. + +## Priority +- **Priority:** High (project-level goal) +- **Owner:** platform / ingestion team (assign on project board) + +## Acceptance Criteria +1. POST /api/devices/register returns canonical device ID and registration status. +2. Registration payload is validated and stored in device registry (persistent DB). Schema is versioned. +3. A registration event is enqueued to an ingestion topic (e.g., Azure Service Bus, Kafka, or Azure Event Grid). +4. Neural network ingestion service consumes the event, returns acknowledgement, and registration status is updated in the registry (queued โ†’ processed โ†’ acknowledged or failed). +5. Automated E2E test that simulates a device registration and verifies processed status. +6. Metrics/alerts: registration rate, ingestion queue lag, processing success/failure, and SLA violation alerts. + +## Data model (minimum) +DeviceRegistration { + deviceId (string) // provided by device or generated + hardwareId (string) // device serial/MAC/fingerprint + model (string) // device model + firmwareVersion (string) + location (string | geo-coords) + ts (ISO8601) // registration timestamp + initialTelemetry: { battery, temp, sensors: {...} } // optional +} + +## API Spec (example) +- POST /api/devices/register + - input: DeviceRegistration payload (JSON) + - responses: + - 201 Created { deviceId, status: 'registered' } + - 202 Accepted { deviceId, status: 'queued' } + - 400 Bad Request + +Authentication: API key or OAuth. Rate limit per IP/credential. + +## Ingestion contract +- Message schema must match DeviceRegistration with metadata: {source, version, traceId} +- Messages delivered to topic: `device-registrations.v1` with at-least-once delivery +- Consumer (ingestion microservice) must return processing result to `device-registration-results` topic or call back API to update status + +## Security & Privacy +- Validate and sanitize all fields +- Store sensitive identifiers hashed or encrypted at rest +- Enforce ACLs and authenticated endpoints +- Log access and changes for audits + +## Reliability +- Use a durable queue (retry/backoff policy) +- Implement idempotency keys (deviceId + ts) to avoid duplicate processing +- Provide op metrics and health endpoints + +## Observability +- Traces: Attach a traceId from API -> queue -> ingestion consumer -> model +- Metrics: registration_count, registration_errors, ingestion_lag_seconds, ingestion_success_rate +- Logs: structured logs with correlation IDs + +## Tests +- Unit tests: validation, schema, DB write +- Integration tests: API -> DB -> queue (mock) -> ingestion (mock) +- E2E test: bring up a test ingestion consumer and verify registration processed + +## Implementation suggestions for builders +1. Add `POST /api/devices/register` with JSON schema validation using existing API framework (e.g., express + Joi or equivalent). +2. Persist registrations in a `devices` collection/table with status and audit fields. +3. Use `az acr build` container or existing unix-friendly workers to host ingestion consumer. +4. Publish a message to Azure Service Bus / Kafka topic with schema and trace context. +5. Create a small ingestion worker that reads topic and calls model ingestion REST or gRPC endpoint. +6. Add monitoring dashboards and alerts in observability platform (Log Analytics / Prometheus). + +## Suggested Milestones (for PRs) +- M1 API + DB schema + unit tests +- M2 Queue publish + consumer (test harness) +- M3 Ingestion acknowledgement + status transitions + E2E test +- M4 Security review + production runbook + +## Notes +- Keep the message schema versioned and backward-compatible. +- Document the exact contract in `api/schema/device-registration.json` when ready. + +--- + +Add this file as the canonical specification for builders and link from `PROJECT-SUMMARY.md` and any relevant docs. diff --git a/DUAL-ROUTER-SETUP-GUIDE.md b/DUAL-ROUTER-SETUP-GUIDE.md new file mode 100644 index 0000000..b075c43 --- /dev/null +++ b/DUAL-ROUTER-SETUP-GUIDE.md @@ -0,0 +1,335 @@ +# Dual Router Setup Guide: WiFi 7 Mesh + NetworkBuster.net + +**Setup Date:** January 3, 2026 +**Configuration Type:** Cascaded Dual Router with Custom Domain + +--- + +## Network Topology Overview + +``` +Internet โ†’ WiFi 7 Mesh Router (Primary) โ†’ NetworkBuster Router (Secondary) + 192.168.1.1 192.168.1.100 or 192.168.2.1 +``` + +--- + +## Option A: Same Subnet (Simpler Setup) + +### WiFi 7 Mesh Router (Primary Gateway) + +**IP Configuration:** +- **Router IP:** `192.168.1.1` +- **Subnet Mask:** `255.255.255.0` +- **DHCP Range:** `192.168.1.10` to `192.168.1.99` +- **DNS Primary:** `8.8.8.8` (Google) +- **DNS Secondary:** `1.1.1.1` (Cloudflare) + +**WiFi 7 Settings:** +- **Network Name (SSID):** `YourNetwork-WiFi7` +- **Security:** WPA3-Personal +- **Password:** [Your secure password] +- **Band:** 2.4GHz + 5GHz + 6GHz (tri-band mesh) + +### NetworkBuster Router (Secondary) + +**IP Configuration:** +- **Router IP:** `192.168.1.100` (static, outside DHCP range) +- **Subnet Mask:** `255.255.255.0` +- **Gateway:** `192.168.1.1` (points to WiFi 7 mesh) +- **DHCP:** **DISABLED** (WiFi 7 handles DHCP) +- **DNS:** `192.168.1.1` (forwards to WiFi 7 router) + +**Connection:** +- **Cable:** Connect WiFi 7 mesh LAN port โ†’ NetworkBuster WAN/LAN port +- **Mode:** Bridge/AP mode (disable NAT on NetworkBuster) + +--- + +## Option B: Separate Subnet (Advanced - Better Isolation) + +### WiFi 7 Mesh Router (Primary Gateway) + +**IP Configuration:** +- **Router IP:** `192.168.1.1` +- **Subnet Mask:** `255.255.255.0` +- **DHCP Range:** `192.168.1.10` to `192.168.1.254` +- **DNS Primary:** `8.8.8.8` +- **DNS Secondary:** `1.1.1.1` + +### NetworkBuster Router (Secondary Subnet) + +**IP Configuration:** +- **Router IP:** `192.168.2.1` +- **Subnet Mask:** `255.255.255.0` +- **Gateway:** `192.168.1.1` +- **DHCP Range:** `192.168.2.10` to `192.168.2.254` +- **DNS:** `192.168.1.1` or `8.8.8.8` + +**Connection:** +- **Cable:** WiFi 7 mesh LAN โ†’ NetworkBuster WAN port +- **Mode:** Router mode (NAT enabled for subnet isolation) + +**Static Route on WiFi 7 Router:** +``` +Destination: 192.168.2.0/24 +Gateway: 192.168.2.1 +``` + +--- + +## NetworkBuster.net Domain Setup + +### Local DNS Configuration (Internal Network) + +**On WiFi 7 Mesh Router:** + +**Add DNS Host Entries:** +``` +networkbuster.net โ†’ 192.168.1.100 (Option A) or 192.168.2.1 (Option B) +www.networkbuster.net โ†’ 192.168.1.100 or 192.168.2.1 +mission.networkbuster.net โ†’ 192.168.1.100 or 192.168.2.1 +api.networkbuster.net โ†’ 192.168.1.100 or 192.168.2.1 +``` + +**Alternative: Edit Hosts File on All Devices** +- **Windows:** `C:\Windows\System32\drivers\etc\hosts` +- **macOS/Linux:** `/etc/hosts` + +``` +192.168.1.100 networkbuster.net www.networkbuster.net +192.168.1.100 mission.networkbuster.net +192.168.1.100 api.networkbuster.net +``` + +### External DNS (Public Internet Access) + +**If You Own networkbuster.net Domain:** + +**DNS A Records (at your domain registrar):** +``` +Type Name Value TTL +A @ [Your Public IP] 3600 +A www [Your Public IP] 3600 +A mission [Your Public IP] 3600 +A api [Your Public IP] 3600 +``` + +**Dynamic DNS (DDNS) Setup:** +- **Service:** No-IP, DuckDNS, or your router's built-in DDNS +- **Update Interval:** Every 5 minutes +- **Domain:** `yourname.ddns.net` (free) or `networkbuster.net` (owned domain) + +--- + +## Port Forwarding Configuration + +**Configure on WiFi 7 Mesh Router:** + +| Service | External Port | Internal IP | Internal Port | Protocol | +|---------------------|---------------|-------------------|---------------|----------| +| Web Server | 3000 | 192.168.1.100 | 3000 | TCP | +| API Server | 3001 | 192.168.1.100 | 3001 | TCP | +| Audio Stream | 3002 | 192.168.1.100 | 3002 | TCP | +| NASA Mission Control| 5000 | 192.168.1.100 | 5000 | TCP | +| HTTP (Web) | 80 | 192.168.1.100 | 3000 | TCP | +| HTTPS (Secure) | 443 | 192.168.1.100 | 443 | TCP | + +**If Using Option B (Separate Subnet):** +- Change Internal IP to `192.168.2.1` in all port forwarding rules + +--- + +## Windows Firewall Rules (NetworkBuster Device) + +**Already Configured:** +```powershell +# Verify existing rules +Get-NetFirewallRule -DisplayName "NetworkBuster*" | Select-Object DisplayName, Enabled +``` + +**Add Mission Control Port:** +```powershell +New-NetFirewallRule -DisplayName "NetworkBuster-MissionControl" ` + -Direction Inbound -LocalPort 5000 -Protocol TCP -Action Allow +``` + +--- + +## Step-by-Step Setup Process + +### Step 1: Configure WiFi 7 Mesh Router +1. Connect to WiFi 7 router at `192.168.1.1` +2. Login to admin panel +3. Set router IP to `192.168.1.1` +4. Enable DHCP: `192.168.1.10` - `192.168.1.99` (Option A) or `192.168.1.10` - `192.168.1.254` (Option B) +5. Set DNS servers: `8.8.8.8` and `1.1.1.1` +6. Enable WiFi 7 (6GHz band) +7. Set WPA3 security + +### Step 2: Configure NetworkBuster Router +1. Connect NetworkBuster to computer temporarily +2. Access router at default IP (usually `192.168.0.1` or `192.168.1.1`) +3. Change router IP to: + - **Option A:** `192.168.1.100` + - **Option B:** `192.168.2.1` +4. Set subnet mask: `255.255.255.0` +5. **Option A:** Disable DHCP server, enable Bridge/AP mode +6. **Option B:** Enable DHCP (`192.168.2.10` - `192.168.2.254`) +7. Save and reboot + +### Step 3: Physical Connection +1. Power off both routers +2. Connect Ethernet cable: + - WiFi 7 LAN port โ†’ NetworkBuster WAN port (or LAN port for Option A) +3. Power on WiFi 7 mesh router first (wait 2 minutes) +4. Power on NetworkBuster router (wait 2 minutes) + +### Step 4: Verify Connection +```powershell +# Test connectivity +ping 192.168.1.1 # WiFi 7 router +ping 192.168.1.100 # NetworkBuster (Option A) +ping 8.8.8.8 # Internet + +# Test domain resolution +ping networkbuster.net +``` + +### Step 5: Configure Port Forwarding +1. Login to WiFi 7 router (`192.168.1.1`) +2. Navigate to Port Forwarding / Virtual Servers +3. Add all port forwarding rules from table above +4. Save and apply + +### Step 6: Add Local DNS Entries +1. In WiFi 7 router, find DNS/Hostname settings +2. Add custom hosts: + - `networkbuster.net` โ†’ `192.168.1.100` + - `www.networkbuster.net` โ†’ `192.168.1.100` + - `mission.networkbuster.net` โ†’ `192.168.1.100` + +### Step 7: Test Services +```powershell +# From any device on network +Invoke-WebRequest -Uri "http://networkbuster.net:3000" +Invoke-WebRequest -Uri "http://networkbuster.net:5000" +Invoke-WebRequest -Uri "http://mission.networkbuster.net:5000" +``` + +--- + +## Access URLs (Internal Network) + +**Direct IP Access:** +- Web Server: `http://192.168.1.100:3000` +- API Server: `http://192.168.1.100:3001` +- Audio Stream: `http://192.168.1.100:3002` +- NASA Mission Control: `http://192.168.1.100:5000` + +**Domain Access (After DNS Setup):** +- Web: `http://networkbuster.net:3000` +- Web: `http://www.networkbuster.net:3000` +- Mission Control: `http://mission.networkbuster.net:5000` +- API: `http://api.networkbuster.net:3001` + +**External Access (After Port Forwarding):** +- Web: `http://[YOUR_PUBLIC_IP]:3000` +- Mission Control: `http://[YOUR_PUBLIC_IP]:5000` + +--- + +## Troubleshooting + +### Can't Access NetworkBuster Router +```powershell +# Find router IP +arp -a | Select-String "192.168" + +# Verify route +route print +``` + +### Port Not Accessible +```powershell +# Check if port is listening +Get-NetTCPConnection -LocalPort 3000 -State Listen + +# Test firewall rule +Test-NetConnection -ComputerName 192.168.1.100 -Port 3000 +``` + +### Domain Not Resolving +```powershell +# Check DNS resolution +nslookup networkbuster.net + +# Flush DNS cache +ipconfig /flushdns + +# Test direct IP +ping 192.168.1.100 +``` + +### No Internet on NetworkBuster Subnet +```powershell +# Check gateway +ipconfig | Select-String "Gateway" + +# Add static route on WiFi 7 router +# Destination: 192.168.2.0/24 โ†’ Gateway: 192.168.2.1 +``` + +--- + +## Security Recommendations + +1. **Change Default Passwords:** + - WiFi 7 router admin password + - NetworkBuster router admin password + - WiFi network password + +2. **Enable WPA3:** On WiFi 7 mesh for maximum encryption + +3. **Disable WPS:** On both routers (security risk) + +4. **Enable Firewall:** On both routers + +5. **Update Firmware:** Keep both routers updated + +6. **Guest Network:** Use WiFi 7 guest network for IoT devices + +7. **VPN:** Consider VPN for external access instead of port forwarding + +--- + +## Recommended Configuration + +**For Best Performance:** Use **Option A** (Same Subnet) +- Simpler setup +- No double NAT issues +- NetworkBuster acts as WiFi access point +- Easier port forwarding + +**For Better Security:** Use **Option B** (Separate Subnet) +- Network isolation +- Separate traffic control +- Better for multiple services +- Easier firewall rules per subnet + +--- + +## Quick Reference + +**WiFi 7 Mesh Router:** `192.168.1.1` +**NetworkBuster Router:** `192.168.1.100` (Option A) or `192.168.2.1` (Option B) +**Domain:** `networkbuster.net` +**Services:** Ports 3000, 3001, 3002, 5000 + +**DNS Servers:** `8.8.8.8` (Primary), `1.1.1.1` (Secondary) +**Subnet Mask:** `255.255.255.0` +**DHCP Range:** `192.168.1.10` - `192.168.1.99` (WiFi 7 only) + +--- + +**Setup Complete!** Your WiFi 7 mesh router and NetworkBuster are now configured for optimal performance with custom domain support. diff --git a/ENVIRONMENT_CHANGES.md b/ENVIRONMENT_CHANGES.md new file mode 100644 index 0000000..535e154 --- /dev/null +++ b/ENVIRONMENT_CHANGES.md @@ -0,0 +1,22 @@ +# Terminal Environment Changes + +## Extension: vscode.git + +Enables the following features: git auth provider + +- `GIT_ASKPASS=c:\Users\daypi\AppData\Local\Programs\Microsoft VS Code\resources\app\extensions\git\dist\askpass.sh` +- `VSCODE_GIT_ASKPASS_NODE=C:\Users\daypi\AppData\Local\Programs\Microsoft VS Code\Code.exe` +- `VSCODE_GIT_ASKPASS_EXTRA_ARGS=` +- `VSCODE_GIT_ASKPASS_MAIN=c:\Users\daypi\AppData\Local\Programs\Microsoft VS Code\resources\app\extensions\git\dist\askpass-main.js` +- `VSCODE_GIT_IPC_HANDLE=\\.\\pipe\vscode-git-b65fb0a601-sock` + +## Extension: GitHub.copilot-chat + +Enables use of `copilot-debug` and `copilot` commands in the terminal + +- `PATH=c:\Users\daypi\AppData\Roaming\Code\User\globalStorage\github.copilot-chat\debugCommand;c:\Users\daypi\AppData\Roaming\Code\User\globalStorage\github.copilot-chat\copilotCli;${env:PATH}` + +## Extension: ms-python.python + +- `PYTHONSTARTUP=c:\Users\daypi\AppData\Roaming\Code\User\workspaceStorage\706e51ab7d28f79eab99add937660e6e\ms-python.python\pythonrc.py` +- `PYTHON_BASIC_REPL=1` \ No newline at end of file diff --git a/HYPERV-LINUX-SETUP.md b/HYPERV-LINUX-SETUP.md index ffec06e..f9be320 100644 --- a/HYPERV-LINUX-SETUP.md +++ b/HYPERV-LINUX-SETUP.md @@ -82,6 +82,15 @@ Or search "Hyper-V Manager" in Windows Start menu 8. **Summary:** Click "Finish" +### Step 3.1: GPU Partitioning (Pro Upgrade) +For optimal AI Gateway performance, allow the VM to access your host GPU (GPU-PV). + +Run this in PowerShell (Admin) after creating the VM: +```powershell +# Assign GPU to VM +.\scripts\provision-hyperv-vm.ps1 -VMName "NetworkBuster-Linux" -EnableGPU -EnableNetworkAcceleration +``` + --- ## Step 4: Start VM and Install Ubuntu @@ -119,12 +128,12 @@ Start-VM -Name "NetworkBuster-Linux" # Update system sudo apt update && sudo apt upgrade -y -# Install Node.js 24.x +# Install Node.js 24.x (LTS) curl -fsSL https://deb.nodesource.com/setup_24.x | sudo -E bash - sudo apt install -y nodejs -# Install Git -sudo apt install -y git +# Install Git & Optimization Tools +sudo apt install -y git net-tools ethtool # Verify installations node --version # v24.x @@ -350,15 +359,19 @@ ssh -i C:\path\to\key ubuntu@192.168.x.x --- -## Performance Tips +## Performance Tuning (Pro) -- **Allocate enough resources:** 4GB RAM, 2+ CPU cores -- **Use SSD storage:** VM performance depends on disk -- **Enable nested virtualization:** For Docker-in-Hyper-V -- **Snapshots:** Before major changes - ```powershell - Checkpoint-VM -Name "NetworkBuster-Linux" -SnapshotName "Working-State" - ``` +### Enable SR-IOV +Single Root I/O Virtualization (SR-IOV) significantly reduces network latency. +1. In Hyper-V Manager โ†’ Virtual Switch Manager. +2. Select your switch โ†’ Check "Enable SR-IOV". +3. In VM Settings โ†’ Network Adapter โ†’ Hardware Acceleration โ†’ Check "Enable SR-IOV". + +### Nested Virtualization (For Docker) +If you plan to run Docker *inside* your Linux VM: +```powershell +Set-VMProcessor -VMName "NetworkBuster-Linux" -ExposeVirtualizationExtensions $true +``` --- @@ -367,10 +380,10 @@ ssh -i C:\path\to\key ubuntu@192.168.x.x 1. โœ… Enable Hyper-V (restart required) 2. โœ… Download Ubuntu ISO 3. โœ… Create VM in Hyper-V Manager -4. โœ… Install Ubuntu -5. โœ… Install Node.js & dependencies -6. โœ… Clone project -7. โœ… Test servers -8. โœ… (Optional) Set up Docker +4. โœ… Run `provision-hyperv-vm.ps1` for GPU/Performance +5. โœ… Install Ubuntu +6. โœ… Install Node.js & dependencies +7. โœ… Clone project +8. โœ… Test servers -**You'll be able to test NetworkBuster on Windows AND Linux!** +**You now have a high-performance AI-ready Linux testing environment!** diff --git a/HYPERV-QUICK-START.md b/HYPERV-QUICK-START.md index 2ec7e8d..c57a2ae 100644 --- a/HYPERV-QUICK-START.md +++ b/HYPERV-QUICK-START.md @@ -14,6 +14,8 @@ Enable-WindowsOptionalFeature -FeatureName Hyper-V -Online -All # After restart, create VM manually (see HYPERV-LINUX-SETUP.md) +# Then run the Upgrade script: +.\scripts\provision-hyperv-vm.ps1 -VMName "NetworkBuster-Linux" -EnableGPU -EnableNetworkAcceleration ``` --- @@ -38,9 +40,9 @@ Enable-WindowsOptionalFeature -FeatureName Hyper-V -Online -All # Update system sudo apt update && sudo apt upgrade -y -# Install Node.js 24.x +# Install Node.js 24.x LTS curl -fsSL https://deb.nodesource.com/setup_24.x | sudo -E bash - -sudo apt install -y nodejs git +sudo apt install -y nodejs git ethtool # Clone project git clone https://github.com/NetworkBuster/networkbuster.net.git diff --git a/MATERIALS.md b/MATERIALS.md new file mode 100644 index 0000000..d3e50f7 --- /dev/null +++ b/MATERIALS.md @@ -0,0 +1,25 @@ +# Materials + +This file documents the materials and properties managed in the recycle procedure. + +## Properties + +- mixed plastic + +## Sterilization & Decontamination Supplies + +- Nitrile gloves (various sizes) +- N95 respirators or PAPRs +- Safety goggles / face shields +- Lint-free wipes (low-lint, microfiber) +- Sterile swabs (foam tipped) for crevices +- Isopropyl alcohol (70%โ€“90%) in sealed containers +- Manufacturer-approved optical cleaning fluids (for optics) +- HEPA portable air purifier (local capture) +- UV-C lamp (supplementary only; follow safety guidelines) +- Disposable gowns / coveralls and shoe covers +- Sealable waste bags and biohazard labels + +**Notes:** Consult instrument manufacturer for approved cleaning agents and procedures; when biological contamination is suspected, contact biosafety personnel and do not proceed without authorization. + +*Add additional materials/properties as needed.* diff --git a/NetworkBuster_Git_Shortcuts/git_dashboard.html b/NetworkBuster_Git_Shortcuts/git_dashboard.html new file mode 100644 index 0000000..90443dd --- /dev/null +++ b/NetworkBuster_Git_Shortcuts/git_dashboard.html @@ -0,0 +1,296 @@ + + + + + + + NetworkBuster Git Repositories + + + +
+
+

๐Ÿ—‚๏ธ NetworkBuster Git Repositories

+

Cloud-Synced Repository Dashboard

+

Generated: 2026-01-03 06:43:42

+
+ +
+
+

1

+

๐Ÿ“ฆ Total Repositories

+
+
+

59

+

๐Ÿ“ Total Commits

+
+
+

34

+

๐Ÿ”ง Modified Files

+
+
+

299.98 MB

+

๐Ÿ’พ Total Size

+
+
+ +
+ +
+
+
๐Ÿ“
+
+

networkbuster.net

+ ๐ŸŒฟ bigtree +
+
+ +
+
+ ๐Ÿ“ + C:\Users\daypi\networkbuster.net +
+
+ ๐Ÿ”— + https://github.com/NetworkBuster/networkbuster.net.git +
+
+ ๐Ÿ“Š + 59 commits โ€ข 299.98 MB +
+
+ +
+ 1598d7e - Sync and redeploy: staged changes before redeployment (3 weeks ago) +
+ +
+ + 34 modified + +
+ + +
+ +
+ + +
+ + + + diff --git a/NetworkBuster_Git_Shortcuts/git_manifest.json b/NetworkBuster_Git_Shortcuts/git_manifest.json new file mode 100644 index 0000000..42d63d5 --- /dev/null +++ b/NetworkBuster_Git_Shortcuts/git_manifest.json @@ -0,0 +1,18 @@ +{ + "generated": "2026-01-03T06:43:42.158278", + "total_repos": 1, + "total_commits": 59, + "total_size": 314556826, + "repositories": [ + { + "name": "networkbuster.net", + "path": "C:\\Users\\daypi\\networkbuster.net", + "branch": "bigtree", + "remote_url": "https://github.com/NetworkBuster/networkbuster.net.git", + "last_commit": "1598d7e - Sync and redeploy: staged changes before redeployment (3 weeks ago)", + "modified_files": 34, + "commit_count": "59", + "size": 314556826 + } + ] +} \ No newline at end of file diff --git a/NetworkBuster_Git_Shortcuts/networkbuster.net.bat b/NetworkBuster_Git_Shortcuts/networkbuster.net.bat new file mode 100644 index 0000000..b6ae834 --- /dev/null +++ b/NetworkBuster_Git_Shortcuts/networkbuster.net.bat @@ -0,0 +1,3 @@ +@echo off +cd /d "C:\Users\daypi\networkbuster.net" +start "" "%SystemRoot%\explorer.exe" "C:\Users\daypi\networkbuster.net" diff --git a/Networkbuster.net b/Networkbuster.net new file mode 160000 index 0000000..c630375 --- /dev/null +++ b/Networkbuster.net @@ -0,0 +1 @@ +Subproject commit c6303750f65b00061a694288c7a60f977d5ef51e diff --git a/PROJECT-SUMMARY.md b/PROJECT-SUMMARY.md index 386b646..e1dbc31 100644 --- a/PROJECT-SUMMARY.md +++ b/PROJECT-SUMMARY.md @@ -262,6 +262,43 @@ c:/Users/daypi/.gemini/antigravity/playground/iridescent-planetary/ - **Maintenance Guide**: Long-term operation support - **Research Foundation**: Scientific references for validation +--- + +## New Project Goal: Device Registration โ†’ Neural Network (Priority) +- **Goal for builders:** Implement reliable new-device registration that captures device identity and telemetry, validates and sanitizes the payload, stores it, and forwards it into the training / inference pipeline (neural network) in a secure, auditable, and testable way. +- **Why:** Enables automated model training, device-aware decisions, remote provisioning, and closed-loop improvement based on real device data. +- **High-level acceptance criteria:** + - A stable API exists to register new devices and receive a canonical device id. + - Device metadata + initial telemetry is persisted in a schema documented in `DEVICE_REGISTRATION_GOAL.md`. + - A secure forwarding mechanism (message queue, data pipeline) reliably delivers registration events to the neural network ingestion endpoint (with retries and observability). + - Model pipeline acknowledges receipt and publishes processing status; registration shows 'registered', 'queued', 'processed', or 'failed'. + - End-to-end test exists covering registration -> pipeline ingestion -> acknowledgement. + +> This is a top-level goal for all builders โ€” see `DEVICE_REGISTRATION_GOAL.md` for full spec, wire diagrams, API examples, and implementation notes. + +### Quick dev notes (M1 implemented, M2 in progress) +- POST `/api/devices/register` (prototype) implemented in `api/devices.js`. +- Persistence: local files stored in `data/devices/` (use `lib/deviceStore.js`). +- Queue: `device-registrations.v1` stored under `data/queue/device-registrations.v1/` using `lib/messageQueue.js`. +- Worker: `workers/ingestWorker.js` provided as a simple polling consumer. Run with `node workers/ingestWorker.js`. +- Test: `node tests/test-device-registration.js` will POST a sample registration (assumes server is running on port 3001). + +### M2: Queue publish + consumer (completed) +- Azure Service Bus integration: Added to `deploy-azure.ps1` with `-SetupServiceBus` flag. +- Updated `lib/messageQueue.js` to use Azure Service Bus SDK (falls back to files if no connection string). +- New consumer: `workers/deviceConsumer.js` polls queue and forwards to ingestion endpoint. +- Mock ingestion: Added `POST /api/ingestion/mock` to server for testing. +- Run consumer: `npm run worker:device-consumer` (set `INGESTION_ENDPOINT` env var). +- Deploy consumer: As Container App or Function App after Service Bus setup. + +### M3: Ingestion acknowledgement + status transitions (completed) +- Status transitions: Added `transitionStatus()` with validation (registered โ†’ queued โ†’ processing โ†’ acknowledged/failed). +- Mock ingestion: Updated to return acknowledgements with confidence scores, simulate failures (10%), and processing delays. +- Consumer retries: Added exponential backoff retry logic (up to 3 attempts) for failed ingestions. +- E2E test: Updated to wait for 'acknowledged' status and handle failures. +- API: Uses validated status transitions in registration endpoint. + + --- ## Technical Highlights diff --git a/PR_NOTE.md b/PR_NOTE.md new file mode 100644 index 0000000..f156ecd --- /dev/null +++ b/PR_NOTE.md @@ -0,0 +1,32 @@ +PR Notes โ€” Add Network Boost utilities + +Summary: +This PR adds a cross-platform ``Network Boost`` utility to improve network throughput and configuration for target systems. It includes hardened apply logic and generates robust restore scripts to revert changes. + +Files to add to upstream (`Cleanskiier27/Final`): +- `scripts/network-boost.ps1` (Windows) +- `scripts/network-boost.sh` (Linux) +- `docs/NETWORK-BOOST.md` (documentation) +- `CONTRIBUTORS.md` (contributor entry) + +Testing recommendations: +- Run dry-run and review outputs: (Windows) `powershell -File scripts\network-boost.ps1` (Linux) `bash ./scripts/network-boost.sh` +- Run apply in a controlled VM and verify `network-boost-restore.*` contents and restore operations. +- Validate that installer integration is opt-in (checkbox) and uses non-interactive apply with `-Apply -Confirm:$false`. + +Security & Safety: +- Scripts are designed to be reversible and non-destructive; restore scripts are generated with previous values and best-effort commands. +- Scripts log all operations to `network-boost.log` and recommend reboot where appropriate. + +Maintainer notes: +- If merging, consider adding a small CI job that runs a dry-run, installs PSScriptAnalyzer/shellcheck, and verifies that restore scripts are generated when running apply in a controlled test runner. +- Optionally add an installer page and an entry in the main docs referencing the new tooling. + +--- + +To apply this contribution automatically to upstream (fork + PR): +- Use the helper script `scripts/apply-to-upstream.sh` (Linux/macOS) or `scripts/apply-to-upstream.ps1` (Windows). +- Example (bash): `./scripts/apply-to-upstream.sh --upstream https://github.com/Cleanskiier27/Final.git --fork git@github.com:youruser/Final.git` +- Example (PowerShell): `. ools\apply-to-upstream.ps1 -Upstream 'https://github.com/Cleanskiier27/Final.git' -Fork 'git@github.com:youruser/Final.git'` + +The helper clones upstream, creates a branch, copies contribution files, commits, pushes to your fork, and uses `gh` (if available) to open a PR. If `gh` is not available, push to your fork and open a PR manually. \ No newline at end of file diff --git a/QUICK-COMMANDS.md b/QUICK-COMMANDS.md new file mode 100644 index 0000000..f799c3a --- /dev/null +++ b/QUICK-COMMANDS.md @@ -0,0 +1,82 @@ +# NetworkBuster - Quick Command Reference +# Simple commands to make everything easier + +## Batch Files (Double-click or run from CMD) + +```cmd +start.bat - Start all NetworkBuster services +stop.bat - Stop all services +status.bat - Show current status +map.bat - Open network map +tracer.bat - Open API tracer +backup.bat - Backup git to D: and K: drives +thumbnails.bat - Extract and view thumbnails +``` + +## PowerShell Functions (Load with: . .\nb.ps1) + +```powershell +nb-start - Start all services +nb-stop - Stop all services +nb-status - Show status +nb-map - Open network map +nb-tracer - Open API tracer +nb-mission - Open mission control +nb-backup - Run git backup +nb-thumbs - Extract thumbnails +nb-all - Open all dashboards at once +nb-help - Show help +``` + +## Python Direct Commands + +```powershell +python networkbuster_launcher.py --start # Start everything +python networkbuster_launcher.py --stop # Stop everything +python networkbuster_launcher.py --status # Check status +python network_map_viewer.py # Run map +python api_tracer.py # Run tracer +python flash_git_backup.py # Backup git +python extract_thumbnails.py # Extract thumbnails +``` + +## One-Line Quick Starts + +```powershell +# Start and open Universal Launcher +python networkbuster_launcher.py --start; start http://localhost:7000 + +# Quick map view +python network_map_viewer.py; start http://localhost:6000 + +# Quick API trace +python api_tracer.py; start http://localhost:8000 + +# All dashboards +start http://localhost:3000,http://localhost:5000,http://localhost:6000,http://localhost:7000,http://localhost:8000 +``` + +## URL Shortcuts + +- Main Dashboard: http://localhost:7000 +- Network Map: http://localhost:6000 +- API Tracer: http://localhost:8000 +- Mission Control: http://localhost:5000 +- Web Server: http://localhost:3000 +- API Server: http://localhost:3001 +- Audio Stream: http://localhost:3002 + +## Desktop Shortcuts (Already Created) + +- NetworkBuster.lnk - Main launcher +- NetworkBuster Map.lnk - Network map viewer + +## Start Menu Programs + +- Start โ†’ Programs โ†’ NetworkBuster โ†’ (Choose any tool) + +## Simplest Usage + +**Just double-click: `start.bat`** + +That's it! Everything launches automatically. diff --git a/README.md b/README.md index 928679b..7c954c9 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,10 @@ ![Award](https://img.shields.io/badge/award-Innovation%20%26%20Excellence-gold.svg) ![License](https://img.shields.io/badge/license-MIT-blue.svg) +[![OpenAI secret test](https://github.com/networkbuster/networkbuster.net/actions/workflows/test-openai-secret.yml/badge.svg)](https://github.com/networkbuster/networkbuster.net/actions/workflows/test-openai-secret.yml) +[![OpenAI E2E smoke test](https://github.com/networkbuster/networkbuster.net/actions/workflows/smoke-e2e-openai.yml/badge.svg)](https://github.com/networkbuster/networkbuster.net/actions/workflows/smoke-e2e-openai.yml) + + ## ๐Ÿฅ‡ Award-Winning Advanced Networking Platform **NetworkBuster** is the competition-winning advanced networking technology platform for space exploration and lunar operations. Featuring cutting-edge real-time visualization, interactive dashboards, and enterprise-grade automation. @@ -31,6 +35,11 @@ โœ… Git hooks for validation โœ… Mobile-responsive design +### CI: OpenAI secret validation & E2E smoke test ๐Ÿ”ฌ + +We added GitHub Actions workflows to validate that `OPENAI_API_KEY` is set and to perform a safe endโ€‘toโ€‘end smoke test that starts the app and calls `/api/recycle/recommend`. See the status badges above and the flow diagram in `docs/diagrams/openai-secret-flow.mmd` for details. + + ### Competition Results | Category | Achievement | |----------|-------------| @@ -42,6 +51,46 @@ ## ๐Ÿš€ Get Started +### ๐ŸŽจ Visuals & small renders + +- Emoji stack (render): `docs/diagrams/emoji-stack.svg` + +#### ๐Ÿ–ผ๏ธ Render diagrams locally + +You can render Mermaid `.mmd` sources to SVG and PNG locally with the provided helper script: + +```powershell +# From the repository root +# - downloads a portable Node 24.x if missing (wait longer with -LongTimeout) +# - runs mermaid-cli to produce SVGs +# - installs Puppeteer (Chromium) and converts SVG -> PNG at configurable scale +. +.\scripts\render-local.ps1 [-LongTimeout] [-RenderScale ] +``` + +Options: +- `-UseNvm -AcceptUAC` โ€” use nvm-windows installer (requires UAC) instead of the portable Node download. +- `-SkipChromiumDownload` โ€” skip Puppeteer's Chromium download if you already have a compatible Chromium in PATH. +- `-LongTimeout` โ€” use longer timeouts & retries for downloads/Chromium install (helpful on flaky networks). +- `-RenderScale ` โ€” set PNG scale (default 2, CI uses 4 for hi-res). + +Notes & tips: +- Puppeteer will download Chromium (100+ MB); allow time and network access. โš ๏ธ +- The script writes PNGs to `docs/diagrams` and lists generated PNG files when finished. โœ… +- For CI rendering we provide `.github/workflows/render-diagrams.yml` which runs on GitHub runners and uploads PNG artifacts. + +### Android `antigravity` module +A small Kotlin Android module skeleton has been added at `android/antigravity/`. It includes Gradle files and a placeholder `MainActivity`. Add `google-services.json` to `android/antigravity/app/` if integrating Firebase (do not commit it; see `.gitignore`). + +### Google Cloud SDK helpers +Scripts added under `scripts/`: +- `scripts/setup-gcloud-sdk.ps1` โ€” download and (optionally) install Google Cloud SDK on Windows, and initialize it interactively. +- `scripts/gcloud-auth.ps1` โ€” authenticate with a service account JSON and set a project non-interactively. +- `scripts/gcloud-startup.ps1` โ€” interactive helper to sign in as `ceanskiier27@networkbuster.net`, set project, and enable common APIs (or run non-interactive service-account auth). + + + + ### View Live Demo Visit: https://networkbuster-mez5d7bmv-networkbuster.vercel.app @@ -58,6 +107,8 @@ npm start | Service | URL | |---------|-----| | Main Portal | / | + +![Emoji stack render](docs/diagrams/emoji-stack.svg) | Real-Time Overlay | /overlay | | Dashboard | /dashboard | | Blog | /blog | @@ -98,3 +149,23 @@ npm start **Last Updated**: December 3, 2025 **Version**: 1.0.0 **Status**: Active Development - Documentation Phase + +--- + +## ๐Ÿ“ฆ Distribution & Installation (Windows) + +- Build artifact (ZIP): `npm run dist:zip` โ€” creates `dist/-.zip` with required files. +- Create desktop launcher: `npm run release:create-shortcut` โ€” creates a shortcut called "NetworkBuster Launcher" on the current user desktop pointing to `start-desktop.bat`. +- Build NSIS installer: `npm run dist:nsis` โ€” builds an NSIS installer (requires NSIS / makensis in PATH). +- Start from desktop: Double click the created shortcut or run `npm run start:desktop`. + +Notes: +- The packaging scripts rely on `node`/`npm` being available in PATH and use PowerShell `Compress-Archive` on Windows. +- For a branded installer include an ICO at `scripts/installer/icon.ico` or place SVG/PNG assets in `scripts/installer/branding/`. You can generate an ICO from `scripts/installer/icon-placeholder.png` using `scripts/installer/convert-icon.ps1` (requires ImageMagick `magick`). +- An End User License Agreement (`scripts/installer/EULA.txt`) is bundled into the installer and is required. +- To test locally on Windows see `scripts/test-local-build.ps1` (requires Node, npm, Git, NSIS, and optionally ImageMagick). +- For CI, add a job that runs `npm run dist:zip`, `npm run dist:nsis` (on windows), archives `dist/` as release artifacts, and tags the release in GitHub. + +--- + +**Contributing:** See `CONTRIBUTING.md` for guidelines on releases and artifact verification. diff --git a/SECURE_FILES_NOT_TRACKED.md b/SECURE_FILES_NOT_TRACKED.md new file mode 100644 index 0000000..43dfb6b --- /dev/null +++ b/SECURE_FILES_NOT_TRACKED.md @@ -0,0 +1,8 @@ +# Secure / local files not tracked + +The following files contain sensitive or local configuration and are explicitly ignored in `.gitignore`: + +- `scripts/dummy-sa.json` (service account / credentials placeholder) +- `scripts/gcloud-startup.ps1` (local startup script) + +If you need to keep a local copy, store it outside the repository or in a secure vault. diff --git a/SECURITY-CHECK-REPORT.md b/SECURITY-CHECK-REPORT.md new file mode 100644 index 0000000..3349f3b --- /dev/null +++ b/SECURITY-CHECK-REPORT.md @@ -0,0 +1,69 @@ +# NetworkBuster System Check Report +**Generated:** January 2, 2026 + +## โœ… Security Enhancement Complete + +### ๐Ÿ” New Security System Implemented +Created comprehensive user verification module with: +- **Multi-layer Authentication** - Username/password with SHA-256 hashing +- **Access Control Levels** - 5-tier security clearance (Visitor โ†’ Root) +- **Failed Login Protection** - 3 attempts max, 5-minute lockout +- **Session Management** - Persistent sessions with 24-hour validity +- **Audit Logging** - All access attempts logged with timestamps +- **Alert System** - Real-time security event notifications + +### ๐Ÿ“‚ Files Enhanced +- **security_verification.py** - Core security module (NEW) +- **drone_flight_system.py** - Now requires Operator clearance (Level 3+) +- **launch.py** - Integrated security menu option `[s]` + +### ๐Ÿ›ก๏ธ Security Features +| Feature | Status | Details | +|---------|--------|---------| +| User Authentication | โœ… Active | SHA-256 hashed passwords | +| Session Tracking | โœ… Active | JSON-based session files | +| Access Logging | โœ… Active | `.security/access.log` | +| Alert System | โœ… Active | `.security/alerts.log` | +| Account Lockout | โœ… Active | 3 failed attempts = 5 min lock | +| Level-Based Access | โœ… Active | 5 security clearance levels | + +### ๐Ÿ“‹ Python Files Syntax Check + +| File | Status | Issues | +|------|--------|--------| +| launch.py | โœ… PASS | No syntax errors | +| drone_flight_system.py | โœ… PASS | No syntax errors | +| security_verification.py | โœ… PASS | No syntax errors | +| mobile_deployment.py | โœ… PASS | No syntax errors | +| cloud_devices.py | โœ… PASS | No syntax errors | +| system_health.py | โš ๏ธ WARN | psutil import (optional dependency) | +| service_manager.py | โœ… PASS | No syntax errors | +| auto_startup.py | โœ… PASS | No syntax errors | +| quick_admin.py | โœ… PASS | No syntax errors | +| admin_runner.py | โœ… PASS | No syntax errors | + +**Total Files Checked:** 10 +**Syntax Errors:** 0 +**Import Warnings:** 1 (psutil - optional) + +### ๐Ÿ”‘ Default Credentials +- **Username:** admin +- **Password:** admin123 +- **Security Level:** 4 (Admin) +- โš ๏ธ **Change password on first login!** + +### ๐Ÿ“ Security Files Location +``` +.security/ + โ”œโ”€โ”€ users.json # User database + โ”œโ”€โ”€ access.log # Access history + โ”œโ”€โ”€ alerts.log # Security alerts + โ””โ”€โ”€ active_session.json # Current session +``` + +### ๐Ÿš€ Usage +1. Run `python security_verification.py` for security management +2. Use `[s]` option in `launch.py` menu +3. Drone operations now auto-check security clearance + +## โœ… All Systems Operational diff --git a/Untitled-1.txt b/Untitled-1.txt new file mode 100644 index 0000000..e69de29 diff --git a/VERCEL-SETUP-TODO.md b/VERCEL-SETUP-TODO.md new file mode 100644 index 0000000..2e98da3 --- /dev/null +++ b/VERCEL-SETUP-TODO.md @@ -0,0 +1,55 @@ +# Vercel Domain Setup - TODO + +## ๐Ÿ“‹ Configuration Steps (To Be Completed Later) + +### 1. Prerequisites +- [ ] Vercel account created and authenticated +- [ ] Domain purchased and DNS accessible +- [ ] Project deployed to Vercel + +### 2. Domain Configuration +```bash +# Add domain to Vercel project +vercel domains add yourdomain.com + +# Add www subdomain +vercel domains add www.yourdomain.com +``` + +### 3. DNS Records Required +| Type | Name | Value | TTL | +|------|------|-------|-----| +| A | @ | 76.76.21.21 | 3600 | +| CNAME | www | cname.vercel-dns.com | 3600 | + +### 4. SSL/TLS Configuration +- Vercel automatically provisions SSL certificates +- HTTPS enforced by default +- Certificate auto-renewal enabled + +### 5. Environment Variables +Set in Vercel Dashboard or via CLI: +```bash +vercel env add DOMAIN_NAME production +vercel env add API_URL production +``` + +### 6. Custom Domain Script +Located at: [configure-custom-domain.ps1](configure-custom-domain.ps1) + +### 7. Verification Steps +- [ ] Domain resolves to Vercel IP +- [ ] HTTPS certificate valid +- [ ] www redirect works +- [ ] API endpoints accessible + +## ๐Ÿ”— Related Files +- [CUSTOM-DOMAIN-SETUP.md](CUSTOM-DOMAIN-SETUP.md) +- [VERCEL-DOMAIN-SETUP-GUIDE.md](VERCEL-DOMAIN-SETUP-GUIDE.md) +- [configure-custom-domain.ps1](configure-custom-domain.ps1) +- [vercel.json](vercel.json) + +## ๐Ÿ“Œ Notes +- Complete this configuration when ready to go live +- Ensure all security configurations are in place first +- Test on staging domain before production diff --git a/__pycache__/security_verification.cpython-314.pyc b/__pycache__/security_verification.cpython-314.pyc new file mode 100644 index 0000000..c1622c9 Binary files /dev/null and b/__pycache__/security_verification.cpython-314.pyc differ diff --git a/admin_runner.py b/admin_runner.py new file mode 100644 index 0000000..94d0c49 --- /dev/null +++ b/admin_runner.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +""" +NetworkBuster Admin Runner +Run any script/command with elevated privileges on Windows +""" + +import ctypes +import sys +import os +import subprocess +from pathlib import Path + +PROJECT_PATH = Path(__file__).parent.resolve() + + +def is_admin(): + """Check if the script is running with administrator privileges.""" + try: + return ctypes.windll.shell32.IsUserAnAdmin() + except: + return False + + +def run_as_admin(command=None, script=None, wait=True): + """ + Re-run the current script or a specific command as administrator. + + Args: + command: Optional command to run (list of strings) + script: Optional script path to run + wait: Whether to wait for the process to complete + """ + if is_admin(): + print("โœ“ Already running as Administrator") + return True + + if command: + # Run a specific command elevated + cmd_str = ' '.join(command) if isinstance(command, list) else command + params = f'/c {cmd_str}' + executable = 'cmd.exe' + elif script: + # Run a specific script elevated + params = f'"{script}"' + executable = sys.executable + else: + # Re-run this script elevated + params = ' '.join([f'"{arg}"' for arg in sys.argv]) + executable = sys.executable + + print(f"โ†‘ Requesting Administrator privileges...") + + try: + result = ctypes.windll.shell32.ShellExecuteW( + None, # Parent window + "runas", # Operation (run as admin) + executable, # Program + params, # Parameters + str(PROJECT_PATH), # Working directory + 1 if wait else 0 # Show window + ) + + if result > 32: + print("โœ“ Elevated process started successfully") + return True + else: + print(f"โœ— Failed to elevate (error code: {result})") + return False + except Exception as e: + print(f"โœ— Error requesting elevation: {e}") + return False + + +def run_elevated_command(cmd, capture_output=False): + """ + Run a command that requires admin privileges. + + Args: + cmd: Command as string or list + capture_output: Whether to capture and return output + """ + if not is_admin(): + print("โš  This command requires Administrator privileges") + return run_as_admin(command=cmd) + + if isinstance(cmd, str): + cmd = cmd.split() + + try: + result = subprocess.run( + cmd, + capture_output=capture_output, + text=True, + cwd=PROJECT_PATH + ) + return result if capture_output else result.returncode == 0 + except Exception as e: + print(f"โœ— Command failed: {e}") + return False + + +def main(): + """Main entry point - demonstrates admin capabilities.""" + print("=" * 60) + print(" NetworkBuster Admin Runner") + print("=" * 60) + print() + + if is_admin(): + print("โœ“ Running with Administrator privileges") + print() + + # Show what we can do as admin + print("Available admin operations:") + print(" 1. Manage Windows services") + print(" 2. Modify system firewall") + print(" 3. Access protected directories") + print(" 4. Run elevated PowerShell scripts") + print() + + # Example: Check execution policy + result = subprocess.run( + ["powershell", "-Command", "Get-ExecutionPolicy"], + capture_output=True, + text=True + ) + print(f"Current Execution Policy: {result.stdout.strip()}") + + else: + print("โš  Not running as Administrator") + print() + response = input("Would you like to restart with admin privileges? (y/n): ") + if response.lower() == 'y': + run_as_admin() + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/ai-proxy-gateway.js b/ai-proxy-gateway.js new file mode 100644 index 0000000..49c898f --- /dev/null +++ b/ai-proxy-gateway.js @@ -0,0 +1,440 @@ +/** + * AI Proxy Gateway - Standalone gateway server for AI inference requests + * Routes requests from devices to multiple AI providers with authentication, + * rate limiting, caching, and usage tracking. + * + * Run: node ai-proxy-gateway.js + * Port: AI_GATEWAY_PORT (default: 3002) + */ + +import express from 'express'; +import crypto from 'crypto'; + +// Dynamic import for aiProviders and device store +const aiProviders = await import('./lib/aiProviders.js').then(m => m.default); +let deviceStore; +try { + deviceStore = await import('./lib/deviceStore.js'); +} catch { + deviceStore = { + getRegistration: () => null, + saveRegistration: () => null + }; +} + +const app = express(); +const PORT = parseInt(process.env.AI_GATEWAY_PORT || '3002'); + +// Request logging +const requestLog = []; +const MAX_LOG_ENTRIES = 1000; + +function logRequest(req, status, duration, provider = null) { + const entry = { + id: crypto.randomUUID?.() || crypto.randomBytes(8).toString('hex'), + timestamp: new Date().toISOString(), + method: req.method, + path: req.path, + deviceId: req.deviceId || 'unknown', + provider, + status, + duration, + ip: req.ip || req.connection?.remoteAddress + }; + + requestLog.unshift(entry); + if (requestLog.length > MAX_LOG_ENTRIES) requestLog.pop(); + + console.log(`[${entry.timestamp}] ${entry.method} ${entry.path} -> ${status} (${duration}ms) device:${entry.deviceId}`); +} + +// Middleware +app.use(express.json({ limit: '1mb' })); + +// CORS for all origins (devices may come from anywhere) +app.use((req, res, next) => { + res.setHeader('Access-Control-Allow-Origin', '*'); + res.setHeader('Access-Control-Allow-Methods', 'GET, POST, OPTIONS'); + res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization, X-Device-Id, X-API-Key'); + + if (req.method === 'OPTIONS') { + res.writeHead(200); + return res.end(); + } + next(); +}); + +// Request timing +app.use((req, res, next) => { + req.startTime = Date.now(); + next(); +}); + +// Device authentication middleware +function authenticateDevice(req, res, next) { + const deviceId = req.headers['x-device-id'] || req.query.deviceId; + const apiKey = req.headers['x-api-key'] || req.headers['authorization']?.replace('Bearer ', ''); + + // API key authentication + if (apiKey && (apiKey === process.env.AI_API_KEY || apiKey === process.env.ADMIN_KEY)) { + req.deviceId = 'api-key-user'; + req.authenticated = true; + req.isAdmin = apiKey === process.env.ADMIN_KEY; + return next(); + } + + // Device ID authentication + if (deviceId) { + const device = deviceStore.getRegistration?.(deviceId); + if (device) { + req.deviceId = deviceId; + req.device = device; + req.authenticated = true; + return next(); + } + // Allow unregistered device IDs if configured + if (process.env.AI_ALLOW_UNREGISTERED === 'true') { + req.deviceId = deviceId; + req.authenticated = false; + return next(); + } + } + + // Anonymous access + if (process.env.AI_ALLOW_ANONYMOUS === 'true') { + req.deviceId = 'anon-' + crypto.randomBytes(4).toString('hex'); + req.authenticated = false; + return next(); + } + + const duration = Date.now() - req.startTime; + logRequest(req, 401, duration); + return res.status(401).json({ + error: 'Authentication required', + hint: 'Provide X-Device-Id or X-API-Key header' + }); +} + +// Rate limit headers +function addRateLimitHeaders(req, res) { + if (req.deviceId) { + const info = aiProviders.checkRateLimit(req.deviceId); + res.setHeader('X-RateLimit-Limit', process.env.AI_RATE_LIMIT_PER_MINUTE || '60'); + res.setHeader('X-RateLimit-Remaining', info.remaining); + res.setHeader('X-RateLimit-Reset', info.resetIn); + } +} + +// ============ ROUTES ============ + +// Health check (no auth required) +app.get('/health', (req, res) => { + const providers = aiProviders.getAvailableProviders(); + res.json({ + status: 'healthy', + service: 'ai-proxy-gateway', + port: PORT, + providers: providers.length, + defaultProvider: aiProviders.getDefaultProvider(), + uptime: process.uptime(), + timestamp: new Date().toISOString() + }); +}); + +// List providers (no auth required) +app.get('/providers', (req, res) => { + res.json({ + providers: aiProviders.getAvailableProviders(), + default: aiProviders.getDefaultProvider() + }); +}); + +// Chat completion +app.post('/chat', authenticateDevice, async (req, res) => { + const startTime = Date.now(); + + try { + addRateLimitHeaders(req, res); + + const { + provider = aiProviders.getDefaultProvider(), + messages, + model, + maxTokens, + temperature, + useCache = true, + stream = false + } = req.body; + + if (!messages || !Array.isArray(messages)) { + logRequest(req, 400, Date.now() - startTime, provider); + return res.status(400).json({ error: 'messages array required' }); + } + + if (!provider) { + logRequest(req, 503, Date.now() - startTime); + return res.status(503).json({ error: 'No AI providers configured' }); + } + + const result = await aiProviders.chat(provider, messages, { + model, + maxTokens, + temperature, + deviceId: req.deviceId, + useCache, + stream + }); + + if (stream && result instanceof ReadableStream) { + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); + + const reader = result.getReader(); + const decoder = new TextDecoder(); + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) break; + res.write(value); + } + } catch (err) { + console.error('Streaming error:', err); + } finally { + res.end(); + logRequest(req, 200, Date.now() - startTime, provider + ' (streamed)'); + } + return; + } + + const tokens = result.usage?.total_tokens || 0; + aiProviders.trackUsage(req.deviceId, provider, 'chat', tokens); + + logRequest(req, 200, Date.now() - startTime, provider); + res.json({ success: true, ...result }); + + } catch (err) { + const status = err.message.includes('Rate limit') ? 429 : 500; + logRequest(req, status, Date.now() - startTime); + res.status(status).json({ error: err.message }); + } +}); + +// Embeddings +app.post('/embed', authenticateDevice, async (req, res) => { + const startTime = Date.now(); + + try { + addRateLimitHeaders(req, res); + + const { + provider = aiProviders.getDefaultProvider(), + text, + model + } = req.body; + + if (!text) { + logRequest(req, 400, Date.now() - startTime, provider); + return res.status(400).json({ error: 'text required' }); + } + + const result = await aiProviders.embed(provider, text, { + model, + deviceId: req.deviceId + }); + + aiProviders.trackUsage(req.deviceId, provider, 'embed', result.usage?.total_tokens || 0); + + logRequest(req, 200, Date.now() - startTime, provider); + res.json({ success: true, ...result }); + + } catch (err) { + const status = err.message.includes('Rate limit') ? 429 : + err.message.includes('does not support') ? 400 : 500; + logRequest(req, status, Date.now() - startTime); + res.status(status).json({ error: err.message }); + } +}); + +// Image generation +app.post('/image', authenticateDevice, async (req, res) => { + const startTime = Date.now(); + + try { + addRateLimitHeaders(req, res); + + const { + provider = 'openai', + prompt, + model, + size, + quality, + n + } = req.body; + + if (!prompt) { + logRequest(req, 400, Date.now() - startTime, provider); + return res.status(400).json({ error: 'prompt required' }); + } + + const result = await aiProviders.generateImage(provider, prompt, { + model, + size, + quality, + n, + deviceId: req.deviceId + }); + + aiProviders.trackUsage(req.deviceId, provider, 'image', 0); + + logRequest(req, 200, Date.now() - startTime, provider); + res.json({ success: true, ...result }); + + } catch (err) { + const status = err.message.includes('Rate limit') ? 429 : + err.message.includes('does not support') ? 400 : 500; + logRequest(req, status, Date.now() - startTime); + res.status(status).json({ error: err.message }); + } +}); + +// Device usage +app.get('/usage', authenticateDevice, (req, res) => { + addRateLimitHeaders(req, res); + const usage = aiProviders.getDeviceUsage(req.deviceId); + const rateInfo = aiProviders.checkRateLimit(req.deviceId); + + res.json({ + deviceId: req.deviceId, + usage, + rateLimit: { + limit: parseInt(process.env.AI_RATE_LIMIT_PER_MINUTE || '60'), + remaining: rateInfo.remaining, + resetIn: rateInfo.resetIn + } + }); +}); + +// Admin: all usage +app.get('/usage/all', authenticateDevice, (req, res) => { + if (!req.isAdmin) { + return res.status(403).json({ error: 'Admin access required' }); + } + + res.json({ + usage: aiProviders.getAllUsage(), + timestamp: new Date().toISOString() + }); +}); + +// Admin: request logs +app.get('/logs', authenticateDevice, (req, res) => { + if (!req.isAdmin) { + return res.status(403).json({ error: 'Admin access required' }); + } + + const limit = parseInt(req.query.limit || '100'); + res.json({ + logs: requestLog.slice(0, limit), + total: requestLog.length + }); +}); + +// Gateway status +app.get('/status', (req, res) => { + const providers = aiProviders.getAvailableProviders(); + const allUsage = aiProviders.getAllUsage(); + + let totalRequests = 0; + let totalTokens = 0; + for (const usage of Object.values(allUsage)) { + totalRequests += usage.requests || 0; + totalTokens += usage.tokens || 0; + } + + res.json({ + gateway: { + status: 'running', + port: PORT, + uptime: Math.floor(process.uptime()), + memory: Math.round(process.memoryUsage().heapUsed / 1024 / 1024) + 'MB' + }, + providers: { + available: providers.length, + default: aiProviders.getDefaultProvider(), + list: providers.map(p => ({ id: p.id, name: p.name, capabilities: p.capabilities })) + }, + stats: { + activeDevices: Object.keys(allUsage).length, + totalRequests, + totalTokens, + recentLogs: requestLog.length + }, + config: { + rateLimitPerMinute: parseInt(process.env.AI_RATE_LIMIT_PER_MINUTE || '60'), + cacheTTL: parseInt(process.env.AI_CACHE_TTL_SECONDS || '300'), + allowAnonymous: process.env.AI_ALLOW_ANONYMOUS === 'true', + allowUnregistered: process.env.AI_ALLOW_UNREGISTERED === 'true' + }, + timestamp: new Date().toISOString() + }); +}); + +// 404 handler +app.use((req, res) => { + res.status(404).json({ + error: 'Not found', + endpoints: [ + 'GET /health', + 'GET /providers', + 'GET /status', + 'POST /chat', + 'POST /embed', + 'POST /image', + 'GET /usage', + 'GET /usage/all (admin)', + 'GET /logs (admin)' + ] + }); +}); + +// Error handler +app.use((err, req, res, next) => { + console.error('Gateway error:', err); + res.status(500).json({ error: 'Internal gateway error' }); +}); + +// Start server +app.listen(PORT, '0.0.0.0', () => { + console.log(` +โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•— +โ•‘ ๐Ÿค– AI Proxy Gateway Started โ•‘ +โ• โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•ฃ +โ•‘ Port: ${String(PORT).padEnd(44)}โ•‘ +โ•‘ Health: http://localhost:${PORT}/health${' '.repeat(24 - String(PORT).length)}โ•‘ +โ•‘ Status: http://localhost:${PORT}/status${' '.repeat(24 - String(PORT).length)}โ•‘ +โ• โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•ฃ +โ•‘ Providers Available: โ•‘`); + + const providers = aiProviders.getAvailableProviders(); + for (const p of providers) { + const caps = Object.entries(p.capabilities) + .filter(([, v]) => v) + .map(([k]) => k) + .join(', '); + console.log(`โ•‘ โœ“ ${p.name.padEnd(20)} (${caps})`.padEnd(59) + 'โ•‘'); + } + + if (providers.length === 0) { + console.log('โ•‘ โš  No providers configured - set API keys โ•‘'); + } + + console.log(`โ• โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•ฃ +โ•‘ Rate Limit: ${(process.env.AI_RATE_LIMIT_PER_MINUTE || '60') + '/min'.padEnd(42)}โ•‘ +โ•‘ Cache TTL: ${(process.env.AI_CACHE_TTL_SECONDS || '300') + 's'.padEnd(43)}โ•‘ +โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• +`); +}); + +export default app; diff --git a/android/antigravity/.github/workflows/build-apk.yml b/android/antigravity/.github/workflows/build-apk.yml new file mode 100644 index 0000000..06a8364 --- /dev/null +++ b/android/antigravity/.github/workflows/build-apk.yml @@ -0,0 +1,53 @@ +name: Build Debug APK + +on: + push: + branches: [ main, master ] + workflow_dispatch: {} + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up JDK 17 + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: '17' + + - name: Set up Android SDK + uses: android-actions/setup-android@v2 + with: + api-level: 34 + components: build-tools;34.0.0 + + - name: Install Gradle + uses: gradle/gradle-build-action@v2 + with: + # action will provide Gradle on the PATH + check-latest: true + + - name: Ensure gradlew exists + run: | + if [ ! -f "./gradlew" ]; then + echo "No gradlew wrapper found. Generating wrapper using Gradle..." + gradle wrapper --gradle-version 8.4 + else + echo "gradlew found" + fi + + - name: Make gradlew executable + run: chmod +x gradlew || true + + - name: Build Debug APK + run: ./gradlew assembleDebug --no-daemon + + - name: Upload APK artifact + uses: actions/upload-artifact@v4 + with: + name: app-debug-apk + path: app/build/outputs/apk/debug/app-debug.apk diff --git a/android/antigravity/README.md b/android/antigravity/README.md new file mode 100644 index 0000000..7cda8e7 --- /dev/null +++ b/android/antigravity/README.md @@ -0,0 +1,8 @@ +Antigravity Android module (placeholder) + +- Kotlin app module under `app/` +- Add `google-services.json` to `app/` if integrating Firebase (do not commit it; see `.gitignore`) +- Build using Android Studio or Gradle CLI (this repo does not include Android SDK tooling) + +To connect to Google Cloud services from this module, use a service account and the +`gcloud` or `firebase` CLIs; see `scripts/setup-gcloud-sdk.ps1` and `scripts/gcloud-auth.ps1`. \ No newline at end of file diff --git a/android/antigravity/app/build.gradle b/android/antigravity/app/build.gradle new file mode 100644 index 0000000..6e074b5 --- /dev/null +++ b/android/antigravity/app/build.gradle @@ -0,0 +1,30 @@ +plugins { + id 'com.android.application' + id 'kotlin-android' +} + +android { + compileSdkVersion 34 + + defaultConfig { + applicationId "net.networkbuster.antigravity" + minSdkVersion 21 + targetSdkVersion 34 + versionCode 1 + versionName "1.0" + } + + buildTypes { + release { + minifyEnabled false + proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro' + } + } +} + +dependencies { + implementation "org.jetbrains.kotlin:kotlin-stdlib:1.8.0" + implementation 'androidx.core:core-ktx:1.9.0' + implementation 'androidx.appcompat:appcompat:1.6.1' + implementation 'com.google.android.material:material:1.8.0' +} diff --git a/android/antigravity/app/luna_eu_repo b/android/antigravity/app/luna_eu_repo new file mode 160000 index 0000000..d5f49a4 --- /dev/null +++ b/android/antigravity/app/luna_eu_repo @@ -0,0 +1 @@ +Subproject commit d5f49a43814387efd17213f7d1128f40fe906f14 diff --git a/android/antigravity/app/networkbuster.net_repo b/android/antigravity/app/networkbuster.net_repo new file mode 160000 index 0000000..db9ed7a --- /dev/null +++ b/android/antigravity/app/networkbuster.net_repo @@ -0,0 +1 @@ +Subproject commit db9ed7a72f7943df4de9c864afd53b35de4e3cdf diff --git a/android/antigravity/app/networkbuster_nb_repo b/android/antigravity/app/networkbuster_nb_repo new file mode 160000 index 0000000..7d4d6d7 --- /dev/null +++ b/android/antigravity/app/networkbuster_nb_repo @@ -0,0 +1 @@ +Subproject commit 7d4d6d71ffdf30e8ab7b861ef5efff1f8bedf516 diff --git a/android/antigravity/app/networkbuster_net_repo b/android/antigravity/app/networkbuster_net_repo new file mode 160000 index 0000000..7d4d6d7 --- /dev/null +++ b/android/antigravity/app/networkbuster_net_repo @@ -0,0 +1 @@ +Subproject commit 7d4d6d71ffdf30e8ab7b861ef5efff1f8bedf516 diff --git a/android/antigravity/app/proguard-rules.pro b/android/antigravity/app/proguard-rules.pro new file mode 100644 index 0000000..8ce17ba --- /dev/null +++ b/android/antigravity/app/proguard-rules.pro @@ -0,0 +1,2 @@ +# proguard rules placeholder +-keep class net.networkbuster.antigravity.** { *; } \ No newline at end of file diff --git a/android/antigravity/app/src/main/AndroidManifest.xml b/android/antigravity/app/src/main/AndroidManifest.xml new file mode 100644 index 0000000..f36bd4b --- /dev/null +++ b/android/antigravity/app/src/main/AndroidManifest.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + \ No newline at end of file diff --git a/android/antigravity/app/src/main/java/net/networkbuster/antigravity/MainActivity.kt b/android/antigravity/app/src/main/java/net/networkbuster/antigravity/MainActivity.kt new file mode 100644 index 0000000..e6f4eae --- /dev/null +++ b/android/antigravity/app/src/main/java/net/networkbuster/antigravity/MainActivity.kt @@ -0,0 +1,44 @@ +package net.networkbuster.antigravity + +import android.content.Intent +import android.net.Uri +import android.os.Build +import android.os.Bundle +import android.provider.Settings +import android.widget.Button +import androidx.appcompat.app.AppCompatActivity +import androidx.appcompat.app.AlertDialog + +class MainActivity : AppCompatActivity() { + override fun onCreate(savedInstanceState: Bundle?) { + super.onCreate(savedInstanceState) + setContentView(R.layout.activity_main) + + val btn = findViewById + + + + + +
+
โ•โ•โ• RECENT API TRACES (Last 50) โ•โ•โ•
+
+ + + + + + + + + + + + +
TIMESTAMPSERVICEMETHODENDPOINTSTATUSTIME
+
+
+ +
+
โ•โ•โ• REGISTERED API ENDPOINTS โ•โ•โ•
+
+
+
+
+ + + + +""" + +@app.route('/') +def index(): + return render_template_string(TRACER_HTML) + +@app.route('/api/trace') +def get_traces(): + return jsonify({ + 'traces': traces, + 'count': len(traces), + 'timestamp': datetime.now().isoformat() + }) + +@app.route('/api/trace/service/') +def get_service_traces(service): + service_traces = [t for t in traces if t['service'] == service] + return jsonify({ + 'service': service, + 'traces': service_traces, + 'count': len(service_traces) + }) + +@app.route('/api/endpoints') +def get_endpoints(): + return jsonify({ + 'endpoints': API_ENDPOINTS, + 'total': sum(len(config['endpoints']) for config in API_ENDPOINTS.values()) + }) + +@app.route('/api/stats') +def get_stats(): + return jsonify({ + 'stats': dict(stats), + 'total_traces': len(traces), + 'services': len(API_ENDPOINTS), + 'timestamp': datetime.now().isoformat() + }) + +@app.route('/api/trace/clear', methods=['POST']) +def clear_traces(): + global traces, stats + traces = [] + stats = defaultdict(lambda: {'calls': 0, 'success': 0, 'failure': 0, 'avg_time': 0, 'total_time': 0}) + return jsonify({'success': True, 'message': 'Traces cleared'}) + +@app.route('/health') +def health(): + return jsonify({ + 'status': 'healthy', + 'service': 'api-tracer', + 'traces': len(traces), + 'timestamp': datetime.now().isoformat() + }) + +if __name__ == '__main__': + print(""" +โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•— +โ•‘ NetworkBuster - API Endpoint Tracer Module โ•‘ +โ•‘ Real-time API monitoring and tracing โ•‘ +โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• + """) + + # Start auto-trace thread + trace_thread = threading.Thread(target=auto_trace_loop, daemon=True) + trace_thread.start() + print("๐Ÿ” Auto-trace thread started (10s interval)") + + print("๐Ÿš€ Starting API Tracer on http://localhost:8000") + print("โšก Monitoring all NetworkBuster API endpoints") + print("") + + app.run(host='0.0.0.0', port=8000, debug=False) diff --git a/auth-ui/v750/server.js b/auth-ui/v750/server.js index 4137920..28dda08 100644 --- a/auth-ui/v750/server.js +++ b/auth-ui/v750/server.js @@ -33,6 +33,57 @@ app.use((req, res, next) => { const users = new Map(); const sessions = new Map(); +// Feature flag: enable/disable auth endpoints +const AUTH_ENABLED = process.env.AUTH_ENABLED === 'true'; + +if (!AUTH_ENABLED) { + console.log('โš ๏ธ Auth UI disabled (AUTH_ENABLED != "true") - running minimal service'); + + // Minimal health endpoints and a single root page + app.get('/health', (req, res) => { + res.json({ + status: 'disabled', + service: 'auth-ui-v750', + message: 'Authentication endpoints are disabled', + timestamp: new Date().toISOString() + }); + }); + + app.get('/api/health', (req, res) => { + res.json({ + status: 'disabled', + version: 'v750' + }); + }); + + app.get('/', (req, res) => { + res.sendFile(path.join(__dirname, 'index.html')); + }); + + // All auth routes respond with 503 (service unavailable) + app.use('/api/auth', (req, res) => { + res.status(503).json({ + success: false, + message: 'Authentication is disabled by configuration' + }); + }); + + app.use((req, res) => { + res.status(404).json({ + success: false, + message: 'Not found or disabled', + path: req.path + }); + }); + + app.listen(PORT, () => { + console.log(`Auth UI (minimal) listening at http://localhost:${PORT}`); + }); + + // Stop further route registration + return; +} + // Health check app.get('/health', (req, res) => { res.json({ diff --git a/auto_start_service.py b/auto_start_service.py new file mode 100644 index 0000000..b715e9b --- /dev/null +++ b/auto_start_service.py @@ -0,0 +1,134 @@ +""" +NetworkBuster Auto-Start Service +Runs in background and auto-starts services on trigger events +""" + +import os +import sys +import time +import subprocess +import psutil +from pathlib import Path +import ctypes + +def is_admin(): + """Check if running with admin privileges""" + try: + return ctypes.windll.shell32.IsUserAnAdmin() + except: + return False + +def check_port(port): + """Check if port is in use""" + for conn in psutil.net_connections(): + if conn.laddr.port == port and conn.status == 'LISTEN': + return True + return False + +def start_service(service_name, command, port): + """Start a service if not already running""" + if check_port(port): + print(f"โœ… {service_name} already running on port {port}") + return True + + try: + print(f"๐Ÿš€ Starting {service_name}...") + subprocess.Popen(command, shell=True, creationflags=subprocess.CREATE_NEW_CONSOLE) + time.sleep(2) + + if check_port(port): + print(f"โœ… {service_name} started successfully") + return True + else: + print(f"โš ๏ธ {service_name} may be starting...") + return False + except Exception as e: + print(f"โŒ Failed to start {service_name}: {e}") + return False + +def auto_start_all(): + """Automatically start all NetworkBuster services""" + print("\nโ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—") + print("โ•‘ NetworkBuster Auto-Start Service โ•‘") + print("โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•\n") + + project_dir = Path(__file__).parent + python_exe = project_dir / ".venv" / "Scripts" / "python.exe" + + services = [ + { + 'name': 'Web Server', + 'command': f'node "{project_dir}/server-universal.js"', + 'port': 3000, + 'delay': 0 + }, + { + 'name': 'API Server', + 'command': f'cd "{project_dir}/api" && node server-universal.js', + 'port': 3001, + 'delay': 2 + }, + { + 'name': 'Audio Stream', + 'command': f'node "{project_dir}/server-audio.js"', + 'port': 3002, + 'delay': 2 + }, + { + 'name': 'NetworkBuster AI', + 'command': f'"{python_exe}" "{project_dir}/networkbuster_ai.py"', + 'port': 4000, + 'delay': 2 + }, + { + 'name': 'Mission Control', + 'command': f'"{python_exe}" "{project_dir}/nasa_home_base.py"', + 'port': 5000, + 'delay': 2 + }, + { + 'name': 'Network Map', + 'command': f'"{python_exe}" "{project_dir}/network_map_viewer.py"', + 'port': 6000, + 'delay': 2 + }, + { + 'name': 'Universal Launcher', + 'command': f'"{python_exe}" "{project_dir}/universal_launcher.py"', + 'port': 7000, + 'delay': 2 + }, + { + 'name': 'API Tracer', + 'command': f'"{python_exe}" "{project_dir}/api_tracer.py"', + 'port': 8000, + 'delay': 2 + } + ] + + started = 0 + for service in services: + time.sleep(service['delay']) + if start_service(service['name'], service['command'], service['port']): + started += 1 + + print(f"\nโœ… Auto-start complete: {started}/{len(services)} services running") + + # Open dashboard after startup + time.sleep(3) + print("\n๐ŸŒ Opening Universal Launcher...") + subprocess.Popen('start http://localhost:7000', shell=True) + + return started + +if __name__ == '__main__': + # Check for admin if needed + if len(sys.argv) > 1 and sys.argv[1] == '--admin' and not is_admin(): + print("โš ๏ธ Requesting administrator privileges...") + ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, " ".join(sys.argv), None, 1) + sys.exit(0) + + auto_start_all() + + print("\nโœจ Press Enter to exit...") + input() diff --git a/auto_startup.py b/auto_startup.py new file mode 100644 index 0000000..3969725 --- /dev/null +++ b/auto_startup.py @@ -0,0 +1,331 @@ +#!/usr/bin/env python3 +""" +NetworkBuster Auto-Startup Manager +Configure automatic startup of services on Windows boot +""" + +import ctypes +import subprocess +import sys +import os +import winreg +from pathlib import Path + +PROJECT_PATH = Path(__file__).parent.resolve() + + +def is_admin(): + """Check if running as administrator.""" + try: + return ctypes.windll.shell32.IsUserAnAdmin() + except: + return False + + +def run_as_admin(): + """Restart script with admin privileges.""" + if is_admin(): + return True + + print("โ†‘ Requesting Administrator privileges...") + ctypes.windll.shell32.ShellExecuteW( + None, "runas", sys.executable, + ' '.join([f'"{arg}"' for arg in sys.argv]), + str(PROJECT_PATH), 1 + ) + sys.exit(0) + + +def run_powershell(command): + """Run PowerShell command.""" + result = subprocess.run( + ["powershell", "-NoProfile", "-ExecutionPolicy", "Bypass", "-Command", command], + capture_output=True, + text=True + ) + return result + + +class AutoStartupManager: + """Manage auto-startup configurations.""" + + def __init__(self): + self.task_prefix = "NetworkBuster" + self.startup_scripts = { + "servers": { + "name": "NetworkBuster-Servers", + "description": "Start NetworkBuster Web, API, and Audio servers", + "command": f'node "{PROJECT_PATH / "start-servers.js"}"', + "working_dir": str(PROJECT_PATH) + }, + "health": { + "name": "NetworkBuster-HealthMonitor", + "description": "Start NetworkBuster health monitoring", + "command": f'python "{PROJECT_PATH / "system_health.py"}" --monitor 60', + "working_dir": str(PROJECT_PATH) + }, + "power": { + "name": "NetworkBuster-PowerManager", + "description": "Start NetworkBuster power management", + "command": f'node "{PROJECT_PATH / "power-manager.js"}"', + "working_dir": str(PROJECT_PATH) + } + } + + def create_startup_task(self, task_key, run_at_logon=True, run_as_admin_task=False): + """Create a Windows Task Scheduler task for auto-startup.""" + if task_key not in self.startup_scripts: + print(f"โœ— Unknown task: {task_key}") + return False + + task = self.startup_scripts[task_key] + + # Build the PowerShell command to create the task + trigger_type = "AtLogon" if run_at_logon else "AtStartup" + run_level = "Highest" if run_as_admin_task else "Limited" + + ps_script = f''' +$taskName = "{task['name']}" +$description = "{task['description']}" + +# Remove existing task if present +Unregister-ScheduledTask -TaskName $taskName -Confirm:$false -ErrorAction SilentlyContinue + +# Create action +$action = New-ScheduledTaskAction -Execute "cmd.exe" -Argument '/c {task["command"]}' -WorkingDirectory "{task['working_dir']}" + +# Create trigger +$trigger = New-ScheduledTaskTrigger -{trigger_type} + +# Create settings +$settings = New-ScheduledTaskSettingsSet -AllowStartIfOnBatteries -DontStopIfGoingOnBatteries -StartWhenAvailable -ExecutionTimeLimit (New-TimeSpan -Hours 0) + +# Create principal +$principal = New-ScheduledTaskPrincipal -UserId "$env:USERNAME" -LogonType Interactive -RunLevel {run_level} + +# Register task +$task = Register-ScheduledTask -TaskName $taskName -Description $description -Action $action -Trigger $trigger -Settings $settings -Principal $principal + +if ($task) {{ + Write-Output "SUCCESS: Task '$taskName' created" +}} else {{ + Write-Output "FAILED: Could not create task" +}} +''' + + print(f"๐Ÿ“Œ Creating startup task: {task['name']}") + result = run_powershell(ps_script) + + if "SUCCESS" in result.stdout: + print(f"โœ“ Task created: {task['name']}") + print(f" Trigger: {trigger_type}") + print(f" Run Level: {run_level}") + return True + else: + print(f"โœ— Failed: {result.stderr or result.stdout}") + return False + + def remove_startup_task(self, task_key): + """Remove a startup task.""" + if task_key not in self.startup_scripts: + print(f"โœ— Unknown task: {task_key}") + return False + + task_name = self.startup_scripts[task_key]["name"] + + result = run_powershell(f'Unregister-ScheduledTask -TaskName "{task_name}" -Confirm:$false') + + if result.returncode == 0: + print(f"โœ“ Task removed: {task_name}") + return True + else: + print(f"โš  Task not found or already removed: {task_name}") + return False + + def list_tasks(self): + """List all NetworkBuster scheduled tasks.""" + print("\n๐Ÿ“‹ NetworkBuster Scheduled Tasks:") + print("-" * 60) + + result = run_powershell(''' +Get-ScheduledTask | Where-Object {$_.TaskName -like "NetworkBuster*"} | ForEach-Object { + $info = Get-ScheduledTaskInfo -TaskName $_.TaskName -ErrorAction SilentlyContinue + [PSCustomObject]@{ + Name = $_.TaskName + State = $_.State + LastRun = if ($info.LastRunTime) { $info.LastRunTime.ToString("yyyy-MM-dd HH:mm") } else { "Never" } + NextRun = if ($info.NextRunTime) { $info.NextRunTime.ToString("yyyy-MM-dd HH:mm") } else { "N/A" } + } +} | Format-Table -AutoSize +''') + + if result.stdout.strip(): + print(result.stdout) + else: + print(" No NetworkBuster tasks found") + print("\n Available tasks to create:") + for key, task in self.startup_scripts.items(): + print(f" - {key}: {task['description']}") + + def run_task_now(self, task_key): + """Manually run a scheduled task.""" + if task_key not in self.startup_scripts: + print(f"โœ— Unknown task: {task_key}") + return False + + task_name = self.startup_scripts[task_key]["name"] + + result = run_powershell(f'Start-ScheduledTask -TaskName "{task_name}"') + + if result.returncode == 0: + print(f"โœ“ Task started: {task_name}") + return True + else: + print(f"โœ— Failed to start task: {result.stderr}") + return False + + def add_to_registry_startup(self, name, command): + """Add program to Windows Registry startup (current user).""" + try: + key = winreg.OpenKey( + winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Run", + 0, + winreg.KEY_SET_VALUE + ) + winreg.SetValueEx(key, name, 0, winreg.REG_SZ, command) + winreg.CloseKey(key) + print(f"โœ“ Added to Registry startup: {name}") + return True + except Exception as e: + print(f"โœ— Failed to add to Registry: {e}") + return False + + def remove_from_registry_startup(self, name): + """Remove program from Windows Registry startup.""" + try: + key = winreg.OpenKey( + winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Run", + 0, + winreg.KEY_SET_VALUE + ) + winreg.DeleteValue(key, name) + winreg.CloseKey(key) + print(f"โœ“ Removed from Registry startup: {name}") + return True + except FileNotFoundError: + print(f"โš  Entry not found: {name}") + return False + except Exception as e: + print(f"โœ— Failed to remove from Registry: {e}") + return False + + def create_startup_batch(self): + """Create a batch file for startup folder.""" + batch_content = f'''@echo off +title NetworkBuster Auto-Start +cd /d "{PROJECT_PATH}" + +echo Starting NetworkBuster Services... +echo. + +:: Start servers in background +start "NetworkBuster Servers" /min cmd /c "node start-servers.js" + +:: Wait a moment +timeout /t 5 /nobreak > nul + +echo NetworkBuster services started! +echo. +echo Close this window or it will close in 10 seconds... +timeout /t 10 +''' + + batch_file = PROJECT_PATH / "networkbuster-autostart.bat" + with open(batch_file, "w") as f: + f.write(batch_content) + + print(f"โœ“ Created startup batch: {batch_file}") + print("\n To add to startup folder, run:") + print(f' copy "{batch_file}" "%APPDATA%\\Microsoft\\Windows\\Start Menu\\Programs\\Startup\\"') + + return batch_file + + def setup_all(self, elevated=False): + """Set up all auto-startup configurations.""" + print("\n๐Ÿš€ Setting up NetworkBuster Auto-Startup") + print("=" * 60) + + if elevated and not is_admin(): + run_as_admin() + + # Create scheduled tasks + print("\n[1/3] Creating scheduled tasks...") + self.create_startup_task("servers", run_at_logon=True, run_as_admin_task=elevated) + + # Create startup batch + print("\n[2/3] Creating startup batch file...") + self.create_startup_batch() + + # List created tasks + print("\n[3/3] Verifying setup...") + self.list_tasks() + + print("\nโœ“ Auto-startup setup complete!") + + +def main(): + """Main menu.""" + manager = AutoStartupManager() + + print("=" * 60) + print(" NetworkBuster Auto-Startup Manager") + print("=" * 60) + + admin_status = "โœ“ Administrator" if is_admin() else "โš  Standard User" + print(f" Status: {admin_status}") + + while True: + print("\n๐Ÿ“‹ Menu:") + print(" 1. List scheduled tasks") + print(" 2. Create server startup task") + print(" 3. Create health monitor task") + print(" 4. Remove a task") + print(" 5. Run task now") + print(" 6. Create startup batch file") + print(" 7. Setup all (recommended)") + print(" 8. Exit") + print() + + choice = input("Select option (1-8): ").strip() + + if choice == "1": + manager.list_tasks() + elif choice == "2": + elevated = input("Run as admin? (y/n): ").lower() == 'y' + manager.create_startup_task("servers", run_as_admin_task=elevated) + elif choice == "3": + manager.create_startup_task("health") + elif choice == "4": + print("Available: servers, health, power") + task = input("Task to remove: ").strip() + manager.remove_startup_task(task) + elif choice == "5": + print("Available: servers, health, power") + task = input("Task to run: ").strip() + manager.run_task_now(task) + elif choice == "6": + manager.create_startup_batch() + elif choice == "7": + elevated = input("Setup with admin privileges? (y/n): ").lower() == 'y' + manager.setup_all(elevated) + elif choice == "8": + print("๐Ÿ‘‹ Goodbye!") + break + else: + print("Invalid option") + + +if __name__ == "__main__": + main() diff --git a/backup.bat b/backup.bat new file mode 100644 index 0000000..ac40e80 --- /dev/null +++ b/backup.bat @@ -0,0 +1,11 @@ +@echo off +REM Quick Git Backup + +cd /d "%~dp0" + +echo Backing up to D: and K: drives... + +call .venv\Scripts\activate.bat +python flash_git_backup.py + +pause diff --git a/challengerepo/real-time-overlay/package-lock.json b/challengerepo/real-time-overlay/package-lock.json index bd5ed9e..3fd5440 100644 --- a/challengerepo/real-time-overlay/package-lock.json +++ b/challengerepo/real-time-overlay/package-lock.json @@ -31,6 +31,7 @@ "eslint-plugin-react": "^7.34.2", "eslint-plugin-react-hooks": "^4.6.2", "eslint-plugin-react-refresh": "^0.4.7", + "terser": "^5.44.1", "vite": "^5.3.1" } }, @@ -77,6 +78,7 @@ "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.5", @@ -865,6 +867,17 @@ "node": ">=6.0.0" } }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.11", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.11.tgz", + "integrity": "sha512-ZMp1V8ZFcPG5dIWnQLr3NSI1MiCU7UETdS/A0G8V/XWHvJv3ZsFqutJn1Y5RPmAPX6F3BiE397OqveU/9NCuIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, "node_modules/@jridgewell/sourcemap-codec": { "version": "1.5.5", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", @@ -1064,6 +1077,7 @@ "resolved": "https://registry.npmjs.org/@react-three/fiber/-/fiber-8.18.0.tgz", "integrity": "sha512-FYZZqD0UUHUswKz3LQl2Z7H24AhD14XGTsIRw3SJaXUxyfVMi+1yiZGmqTcPt/CkPpdU7rrxqcyQ1zJE5DjvIQ==", "license": "MIT", + "peer": true, "dependencies": { "@babel/runtime": "^7.17.8", "@types/react-reconciler": "^0.26.7", @@ -1133,9 +1147,9 @@ "license": "MIT" }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.3.tgz", - "integrity": "sha512-mRSi+4cBjrRLoaal2PnqH82Wqyb+d3HsPUN/W+WslCXsZsyHa9ZeQQX/pQsZaVIWDkPcpV6jJ+3KLbTbgnwv8w==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.54.0.tgz", + "integrity": "sha512-OywsdRHrFvCdvsewAInDKCNyR3laPA2mc9bRYJ6LBp5IyvF3fvXbbNR0bSzHlZVFtn6E0xw2oZlyjg4rKCVcng==", "cpu": [ "arm" ], @@ -1147,9 +1161,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.3.tgz", - "integrity": "sha512-CbDGaMpdE9sh7sCmTrTUyllhrg65t6SwhjlMJsLr+J8YjFuPmCEjbBSx4Z/e4SmDyH3aB5hGaJUP2ltV/vcs4w==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.54.0.tgz", + "integrity": "sha512-Skx39Uv+u7H224Af+bDgNinitlmHyQX1K/atIA32JP3JQw6hVODX5tkbi2zof/E69M1qH2UoN3Xdxgs90mmNYw==", "cpu": [ "arm64" ], @@ -1161,9 +1175,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.3.tgz", - "integrity": "sha512-Nr7SlQeqIBpOV6BHHGZgYBuSdanCXuw09hon14MGOLGmXAFYjx1wNvquVPmpZnl0tLjg25dEdr4IQ6GgyToCUA==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.54.0.tgz", + "integrity": "sha512-k43D4qta/+6Fq+nCDhhv9yP2HdeKeP56QrUUTW7E6PhZP1US6NDqpJj4MY0jBHlJivVJD5P8NxrjuobZBJTCRw==", "cpu": [ "arm64" ], @@ -1175,9 +1189,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.3.tgz", - "integrity": "sha512-DZ8N4CSNfl965CmPktJ8oBnfYr3F8dTTNBQkRlffnUarJ2ohudQD17sZBa097J8xhQ26AwhHJ5mvUyQW8ddTsQ==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.54.0.tgz", + "integrity": "sha512-cOo7biqwkpawslEfox5Vs8/qj83M/aZCSSNIWpVzfU2CYHa2G3P1UN5WF01RdTHSgCkri7XOlTdtk17BezlV3A==", "cpu": [ "x64" ], @@ -1189,9 +1203,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.3.tgz", - "integrity": "sha512-yMTrCrK92aGyi7GuDNtGn2sNW+Gdb4vErx4t3Gv/Tr+1zRb8ax4z8GWVRfr3Jw8zJWvpGHNpss3vVlbF58DZ4w==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.54.0.tgz", + "integrity": "sha512-miSvuFkmvFbgJ1BevMa4CPCFt5MPGw094knM64W9I0giUIMMmRYcGW/JWZDriaw/k1kOBtsWh1z6nIFV1vPNtA==", "cpu": [ "arm64" ], @@ -1203,9 +1217,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.3.tgz", - "integrity": "sha512-lMfF8X7QhdQzseM6XaX0vbno2m3hlyZFhwcndRMw8fbAGUGL3WFMBdK0hbUBIUYcEcMhVLr1SIamDeuLBnXS+Q==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.54.0.tgz", + "integrity": "sha512-KGXIs55+b/ZfZsq9aR026tmr/+7tq6VG6MsnrvF4H8VhwflTIuYh+LFUlIsRdQSgrgmtM3fVATzEAj4hBQlaqQ==", "cpu": [ "x64" ], @@ -1217,9 +1231,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.3.tgz", - "integrity": "sha512-k9oD15soC/Ln6d2Wv/JOFPzZXIAIFLp6B+i14KhxAfnq76ajt0EhYc5YPeX6W1xJkAdItcVT+JhKl1QZh44/qw==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.54.0.tgz", + "integrity": "sha512-EHMUcDwhtdRGlXZsGSIuXSYwD5kOT9NVnx9sqzYiwAc91wfYOE1g1djOEDseZJKKqtHAHGwnGPQu3kytmfaXLQ==", "cpu": [ "arm" ], @@ -1231,9 +1245,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.3.tgz", - "integrity": "sha512-vTNlKq+N6CK/8UktsrFuc+/7NlEYVxgaEgRXVUVK258Z5ymho29skzW1sutgYjqNnquGwVUObAaxae8rZ6YMhg==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.54.0.tgz", + "integrity": "sha512-+pBrqEjaakN2ySv5RVrj/qLytYhPKEUwk+e3SFU5jTLHIcAtqh2rLrd/OkbNuHJpsBgxsD8ccJt5ga/SeG0JmA==", "cpu": [ "arm" ], @@ -1245,9 +1259,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.3.tgz", - "integrity": "sha512-RGrFLWgMhSxRs/EWJMIFM1O5Mzuz3Xy3/mnxJp/5cVhZ2XoCAxJnmNsEyeMJtpK+wu0FJFWz+QF4mjCA7AUQ3w==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.54.0.tgz", + "integrity": "sha512-NSqc7rE9wuUaRBsBp5ckQ5CVz5aIRKCwsoa6WMF7G01sX3/qHUw/z4pv+D+ahL1EIKy6Enpcnz1RY8pf7bjwng==", "cpu": [ "arm64" ], @@ -1259,9 +1273,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.3.tgz", - "integrity": "sha512-kASyvfBEWYPEwe0Qv4nfu6pNkITLTb32p4yTgzFCocHnJLAHs+9LjUu9ONIhvfT/5lv4YS5muBHyuV84epBo/A==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.54.0.tgz", + "integrity": "sha512-gr5vDbg3Bakga5kbdpqx81m2n9IX8M6gIMlQQIXiLTNeQW6CucvuInJ91EuCJ/JYvc+rcLLsDFcfAD1K7fMofg==", "cpu": [ "arm64" ], @@ -1273,9 +1287,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.3.tgz", - "integrity": "sha512-JiuKcp2teLJwQ7vkJ95EwESWkNRFJD7TQgYmCnrPtlu50b4XvT5MOmurWNrCj3IFdyjBQ5p9vnrX4JM6I8OE7g==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.54.0.tgz", + "integrity": "sha512-gsrtB1NA3ZYj2vq0Rzkylo9ylCtW/PhpLEivlgWe0bpgtX5+9j9EZa0wtZiCjgu6zmSeZWyI/e2YRX1URozpIw==", "cpu": [ "loong64" ], @@ -1287,9 +1301,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.3.tgz", - "integrity": "sha512-EoGSa8nd6d3T7zLuqdojxC20oBfNT8nexBbB/rkxgKj5T5vhpAQKKnD+h3UkoMuTyXkP5jTjK/ccNRmQrPNDuw==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.54.0.tgz", + "integrity": "sha512-y3qNOfTBStmFNq+t4s7Tmc9hW2ENtPg8FeUD/VShI7rKxNW7O4fFeaYbMsd3tpFlIg1Q8IapFgy7Q9i2BqeBvA==", "cpu": [ "ppc64" ], @@ -1301,9 +1315,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.3.tgz", - "integrity": "sha512-4s+Wped2IHXHPnAEbIB0YWBv7SDohqxobiiPA1FIWZpX+w9o2i4LezzH/NkFUl8LRci/8udci6cLq+jJQlh+0g==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.54.0.tgz", + "integrity": "sha512-89sepv7h2lIVPsFma8iwmccN7Yjjtgz0Rj/Ou6fEqg3HDhpCa+Et+YSufy27i6b0Wav69Qv4WBNl3Rs6pwhebQ==", "cpu": [ "riscv64" ], @@ -1315,9 +1329,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.3.tgz", - "integrity": "sha512-68k2g7+0vs2u9CxDt5ktXTngsxOQkSEV/xBbwlqYcUrAVh6P9EgMZvFsnHy4SEiUl46Xf0IObWVbMvPrr2gw8A==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.54.0.tgz", + "integrity": "sha512-ZcU77ieh0M2Q8Ur7D5X7KvK+UxbXeDHwiOt/CPSBTI1fBmeDMivW0dPkdqkT4rOgDjrDDBUed9x4EgraIKoR2A==", "cpu": [ "riscv64" ], @@ -1329,9 +1343,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.3.tgz", - "integrity": "sha512-VYsFMpULAz87ZW6BVYw3I6sWesGpsP9OPcyKe8ofdg9LHxSbRMd7zrVrr5xi/3kMZtpWL/wC+UIJWJYVX5uTKg==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.54.0.tgz", + "integrity": "sha512-2AdWy5RdDF5+4YfG/YesGDDtbyJlC9LHmL6rZw6FurBJ5n4vFGupsOBGfwMRjBYH7qRQowT8D/U4LoSvVwOhSQ==", "cpu": [ "s390x" ], @@ -1343,9 +1357,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.3.tgz", - "integrity": "sha512-3EhFi1FU6YL8HTUJZ51imGJWEX//ajQPfqWLI3BQq4TlvHy4X0MOr5q3D2Zof/ka0d5FNdPwZXm3Yyib/UEd+w==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.54.0.tgz", + "integrity": "sha512-WGt5J8Ij/rvyqpFexxk3ffKqqbLf9AqrTBbWDk7ApGUzaIs6V+s2s84kAxklFwmMF/vBNGrVdYgbblCOFFezMQ==", "cpu": [ "x64" ], @@ -1357,9 +1371,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.3.tgz", - "integrity": "sha512-eoROhjcc6HbZCJr+tvVT8X4fW3/5g/WkGvvmwz/88sDtSJzO7r/blvoBDgISDiCjDRZmHpwud7h+6Q9JxFwq1Q==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.54.0.tgz", + "integrity": "sha512-JzQmb38ATzHjxlPHuTH6tE7ojnMKM2kYNzt44LO/jJi8BpceEC8QuXYA908n8r3CNuG/B3BV8VR3Hi1rYtmPiw==", "cpu": [ "x64" ], @@ -1371,9 +1385,9 @@ ] }, "node_modules/@rollup/rollup-openharmony-arm64": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.3.tgz", - "integrity": "sha512-OueLAWgrNSPGAdUdIjSWXw+u/02BRTcnfw9PN41D2vq/JSEPnJnVuBgw18VkN8wcd4fjUs+jFHVM4t9+kBSNLw==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.54.0.tgz", + "integrity": "sha512-huT3fd0iC7jigGh7n3q/+lfPcXxBi+om/Rs3yiFxjvSxbSB6aohDFXbWvlspaqjeOh+hx7DDHS+5Es5qRkWkZg==", "cpu": [ "arm64" ], @@ -1385,9 +1399,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.3.tgz", - "integrity": "sha512-GOFuKpsxR/whszbF/bzydebLiXIHSgsEUp6M0JI8dWvi+fFa1TD6YQa4aSZHtpmh2/uAlj/Dy+nmby3TJ3pkTw==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.54.0.tgz", + "integrity": "sha512-c2V0W1bsKIKfbLMBu/WGBz6Yci8nJ/ZJdheE0EwB73N3MvHYKiKGs3mVilX4Gs70eGeDaMqEob25Tw2Gb9Nqyw==", "cpu": [ "arm64" ], @@ -1399,9 +1413,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.3.tgz", - "integrity": "sha512-iah+THLcBJdpfZ1TstDFbKNznlzoxa8fmnFYK4V67HvmuNYkVdAywJSoteUszvBQ9/HqN2+9AZghbajMsFT+oA==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.54.0.tgz", + "integrity": "sha512-woEHgqQqDCkAzrDhvDipnSirm5vxUXtSKDYTVpZG3nUdW/VVB5VdCYA2iReSj/u3yCZzXID4kuKG7OynPnB3WQ==", "cpu": [ "ia32" ], @@ -1413,9 +1427,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.3.tgz", - "integrity": "sha512-J9QDiOIZlZLdcot5NXEepDkstocktoVjkaKUtqzgzpt2yWjGlbYiKyp05rWwk4nypbYUNoFAztEgixoLaSETkg==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.54.0.tgz", + "integrity": "sha512-dzAc53LOuFvHwbCEOS0rPbXp6SIhAf2txMP5p6mGyOXXw5mWY8NGGbPMPrs4P1WItkfApDathBj/NzMLUZ9rtQ==", "cpu": [ "x64" ], @@ -1427,9 +1441,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.3.tgz", - "integrity": "sha512-UhTd8u31dXadv0MopwGgNOBpUVROFKWVQgAg5N1ESyCz8AuBcMqm4AuTjrwgQKGDfoFuz02EuMRHQIw/frmYKQ==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.54.0.tgz", + "integrity": "sha512-hYT5d3YNdSh3mbCU1gwQyPgQd3T2ne0A3KG8KSBdav5TiBg6eInVmV+TeR5uHufiIgSFg0XsOWGW5/RhNcSvPg==", "cpu": [ "x64" ], @@ -1584,6 +1598,7 @@ "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.27.tgz", "integrity": "sha512-cisd7gxkzjBKU2GgdYrTdtQx1SORymWyaAFhaxQPK9bYO9ot3Y5OikQRvY0VYQtvwjeQnizCINJAenh/V7MK2w==", "license": "MIT", + "peer": true, "dependencies": { "@types/prop-types": "*", "csstype": "^3.2.2" @@ -1619,6 +1634,7 @@ "resolved": "https://registry.npmjs.org/@types/three/-/three-0.182.0.tgz", "integrity": "sha512-WByN9V3Sbwbe2OkWuSGyoqQO8Du6yhYaXtXLoA5FkKTUJorZ+yOHBZ35zUUPQXlAKABZmbYp5oAqpA4RBjtJ/Q==", "license": "MIT", + "peer": true, "dependencies": { "@dimforge/rapier3d-compat": "~0.12.0", "@tweenjs/tween.js": "~23.1.3", @@ -1693,6 +1709,7 @@ "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "license": "MIT", + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -1934,9 +1951,9 @@ } }, "node_modules/autoprefixer": { - "version": "10.4.22", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.22.tgz", - "integrity": "sha512-ARe0v/t9gO28Bznv6GgqARmVqcWOV3mfgUPn9becPHMiD3o9BwlRgaeccZnwTpZ7Zwqrm+c1sUSsMxIzQzc8Xg==", + "version": "10.4.23", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.23.tgz", + "integrity": "sha512-YYTXSFulfwytnjAPlw8QHncHJmlvFKtczb8InXaAx9Q0LbfDnfEYDE55omerIJKihhmU61Ft+cAOSzQVaBUmeA==", "funding": [ { "type": "opencollective", @@ -1953,10 +1970,9 @@ ], "license": "MIT", "dependencies": { - "browserslist": "^4.27.0", - "caniuse-lite": "^1.0.30001754", + "browserslist": "^4.28.1", + "caniuse-lite": "^1.0.30001760", "fraction.js": "^5.3.4", - "normalize-range": "^0.1.2", "picocolors": "^1.1.1", "postcss-value-parser": "^4.2.0" }, @@ -2014,9 +2030,9 @@ "license": "MIT" }, "node_modules/baseline-browser-mapping": { - "version": "2.9.7", - "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.7.tgz", - "integrity": "sha512-k9xFKplee6KIio3IDbwj+uaCLpqzOwakOgmqzPezM0sFJlFKcg30vk2wOiAJtkTSfx0SSQDSe8q+mWA/fSH5Zg==", + "version": "2.9.11", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.11.tgz", + "integrity": "sha512-Sg0xJUNDU1sJNGdfGWhVHX0kkZ+HWcvmVymJbj6NSgZZmW/8S9Y2HQ5euytnIgakgxN6papOAWiwDo1ctFDcoQ==", "license": "Apache-2.0", "bin": { "baseline-browser-mapping": "dist/cli.js" @@ -2085,6 +2101,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", @@ -2123,6 +2140,13 @@ "ieee754": "^1.2.1" } }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "license": "MIT" + }, "node_modules/call-bind": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", @@ -2202,9 +2226,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001760", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001760.tgz", - "integrity": "sha512-7AAMPcueWELt1p3mi13HR/LHH0TJLT11cnwDJEs3xA4+CK/PLKeO9Kl1oru24htkyUKtkGCvAx4ohB0Ttry8Dw==", + "version": "1.0.30001761", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001761.tgz", + "integrity": "sha512-JF9ptu1vP2coz98+5051jZ4PwQgd2ni8A+gYSN7EA7dPKIMf0pDlSUxhdmVOaV3/fYK5uWBkgSXJaRLr4+3A6g==", "funding": [ { "type": "opencollective", @@ -2934,6 +2958,7 @@ "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", @@ -3031,9 +3056,9 @@ } }, "node_modules/eslint-plugin-react-refresh": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.24.tgz", - "integrity": "sha512-nLHIW7TEq3aLrEYWpVaJ1dRgFR+wLDPN8e8FpYAql/bMV2oBEfC37K0gLEGgv9fy66juNShSMV8OkTqzltcG/w==", + "version": "0.4.26", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.26.tgz", + "integrity": "sha512-1RETEylht2O6FM/MvgnyvT+8K21wLqDNg4qD51Zj3guhjt433XbnnkVttHMyaVyAFD03QSV4LPS5iE3VQmO7XQ==", "dev": true, "license": "MIT", "peerDependencies": { @@ -3212,9 +3237,9 @@ "license": "MIT" }, "node_modules/fastq": { - "version": "1.19.1", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", - "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", "license": "ISC", "dependencies": { "reusify": "^1.0.4" @@ -4255,6 +4280,7 @@ "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", "license": "MIT", + "peer": true, "bin": { "jiti": "bin/jiti.js" } @@ -4355,7 +4381,8 @@ "version": "1.9.4", "resolved": "https://registry.npmjs.org/leaflet/-/leaflet-1.9.4.tgz", "integrity": "sha512-nxS1ynzJOmOlHp+iL3FyWqK89GtNL8U8rvlMOsQdTTssxZwCXh8N2NB3GDQOL+YR3XnWyZAxwQixURb+FA74PA==", - "license": "BSD-2-Clause" + "license": "BSD-2-Clause", + "peer": true }, "node_modules/levn": { "version": "0.4.1", @@ -4601,15 +4628,6 @@ "node": ">=0.10.0" } }, - "node_modules/normalize-range": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", - "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/object-assign": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", @@ -4917,6 +4935,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", @@ -5146,6 +5165,7 @@ "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", "license": "MIT", + "peer": true, "dependencies": { "loose-envify": "^1.1.0" }, @@ -5170,6 +5190,7 @@ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", "license": "MIT", + "peer": true, "dependencies": { "loose-envify": "^1.1.0", "scheduler": "^0.23.2" @@ -5447,9 +5468,9 @@ } }, "node_modules/rollup": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.3.tgz", - "integrity": "sha512-w8GmOxZfBmKknvdXU1sdM9NHcoQejwF/4mNgj2JuEEdRaHwwF12K7e9eXn1nLZ07ad+du76mkVsyeb2rKGllsA==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.54.0.tgz", + "integrity": "sha512-3nk8Y3a9Ea8szgKhinMlGMhGMw89mqule3KWczxhIzqudyHdCIOHw8WJlj/r329fACjKLEh13ZSk7oE22kyeIw==", "dev": true, "license": "MIT", "dependencies": { @@ -5463,28 +5484,28 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.53.3", - "@rollup/rollup-android-arm64": "4.53.3", - "@rollup/rollup-darwin-arm64": "4.53.3", - "@rollup/rollup-darwin-x64": "4.53.3", - "@rollup/rollup-freebsd-arm64": "4.53.3", - "@rollup/rollup-freebsd-x64": "4.53.3", - "@rollup/rollup-linux-arm-gnueabihf": "4.53.3", - "@rollup/rollup-linux-arm-musleabihf": "4.53.3", - "@rollup/rollup-linux-arm64-gnu": "4.53.3", - "@rollup/rollup-linux-arm64-musl": "4.53.3", - "@rollup/rollup-linux-loong64-gnu": "4.53.3", - "@rollup/rollup-linux-ppc64-gnu": "4.53.3", - "@rollup/rollup-linux-riscv64-gnu": "4.53.3", - "@rollup/rollup-linux-riscv64-musl": "4.53.3", - "@rollup/rollup-linux-s390x-gnu": "4.53.3", - "@rollup/rollup-linux-x64-gnu": "4.53.3", - "@rollup/rollup-linux-x64-musl": "4.53.3", - "@rollup/rollup-openharmony-arm64": "4.53.3", - "@rollup/rollup-win32-arm64-msvc": "4.53.3", - "@rollup/rollup-win32-ia32-msvc": "4.53.3", - "@rollup/rollup-win32-x64-gnu": "4.53.3", - "@rollup/rollup-win32-x64-msvc": "4.53.3", + "@rollup/rollup-android-arm-eabi": "4.54.0", + "@rollup/rollup-android-arm64": "4.54.0", + "@rollup/rollup-darwin-arm64": "4.54.0", + "@rollup/rollup-darwin-x64": "4.54.0", + "@rollup/rollup-freebsd-arm64": "4.54.0", + "@rollup/rollup-freebsd-x64": "4.54.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.54.0", + "@rollup/rollup-linux-arm-musleabihf": "4.54.0", + "@rollup/rollup-linux-arm64-gnu": "4.54.0", + "@rollup/rollup-linux-arm64-musl": "4.54.0", + "@rollup/rollup-linux-loong64-gnu": "4.54.0", + "@rollup/rollup-linux-ppc64-gnu": "4.54.0", + "@rollup/rollup-linux-riscv64-gnu": "4.54.0", + "@rollup/rollup-linux-riscv64-musl": "4.54.0", + "@rollup/rollup-linux-s390x-gnu": "4.54.0", + "@rollup/rollup-linux-x64-gnu": "4.54.0", + "@rollup/rollup-linux-x64-musl": "4.54.0", + "@rollup/rollup-openharmony-arm64": "4.54.0", + "@rollup/rollup-win32-arm64-msvc": "4.54.0", + "@rollup/rollup-win32-ia32-msvc": "4.54.0", + "@rollup/rollup-win32-x64-gnu": "4.54.0", + "@rollup/rollup-win32-x64-msvc": "4.54.0", "fsevents": "~2.3.2" } }, @@ -5737,6 +5758,16 @@ "integrity": "sha512-qSE2I4AngLQG7BXqoZj51jokT4WUXe8mOBrvfOXpci8+6Yu44+/dD5zqDpOx3Ux792eamTd2lLcI8jqFntk/lg==", "license": "MIT" }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/source-map-js": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", @@ -5746,6 +5777,17 @@ "node": ">=0.10.0" } }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, "node_modules/stats-gl": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/stats-gl/-/stats-gl-2.4.2.tgz", @@ -6023,6 +6065,32 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/terser": { + "version": "5.44.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.44.1.tgz", + "integrity": "sha512-t/R3R/n0MSwnnazuPpPNVO60LX0SKL45pyl9YlvxIdkH0Of7D5qM2EVe+yASRIlY5pZ73nclYJfNANGWPwFDZw==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.15.0", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "dev": true, + "license": "MIT" + }, "node_modules/text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", @@ -6055,7 +6123,8 @@ "version": "0.165.0", "resolved": "https://registry.npmjs.org/three/-/three-0.165.0.tgz", "integrity": "sha512-cc96IlVYGydeceu0e5xq70H8/yoVT/tXBxV/W8A/U6uOq7DXc4/s1Mkmnu6SqoYGhSRWWYFOhVwvq6V0VtbplA==", - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/three-mesh-bvh": { "version": "0.7.8", @@ -6134,6 +6203,7 @@ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -6356,9 +6426,9 @@ } }, "node_modules/update-browserslist-db": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.2.tgz", - "integrity": "sha512-E85pfNzMQ9jpKkA7+TJAi4TJN+tBCuWh5rUcS/sv6cFi+1q9LYDwDI5dpUL0u/73EElyQ8d3TEaeW4sPedBqYA==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", "funding": [ { "type": "opencollective", @@ -6400,6 +6470,7 @@ "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz", "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", "license": "MIT", + "peer": true, "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } @@ -6447,6 +6518,7 @@ "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.21.3", "postcss": "^8.4.43", diff --git a/challengerepo/real-time-overlay/src/App.jsx b/challengerepo/real-time-overlay/src/App.jsx index 2aca49e..1128e9b 100644 --- a/challengerepo/real-time-overlay/src/App.jsx +++ b/challengerepo/real-time-overlay/src/App.jsx @@ -1,6 +1,7 @@ import { useState } from 'react' import AvatarWorld from './components/AvatarWorld' import SatelliteMap from './components/SatelliteMap' +import SatGPU from './components/SatGPU' import CameraFeed from './components/CameraFeed' import ConnectionGraph from './components/ConnectionGraph' import ImmersiveReader from './components/ImmersiveReader' @@ -32,7 +33,7 @@ function App() { {/* Main Content Grid */}
- {/* Left Column: Camera Feeds */} + {/* Left Column: Camera Feeds + GPU Stats */}
@@ -44,10 +45,11 @@ function App() {
-
- - -
+
+ + {/* SatGPU - Linux GPU Monitoring */} +
+
@@ -92,28 +94,26 @@ function App() { LATENCY 12 MS - + {/* Immersive Reader Toggle */} - + {/* AI Training Toggle */}
- + {/* Immersive Reader Panel */} {activeTab === 'immersive' && (
@@ -140,7 +140,7 @@ function App() { โœ•
- diff --git a/challengerepo/real-time-overlay/src/components/SatGPU.jsx b/challengerepo/real-time-overlay/src/components/SatGPU.jsx new file mode 100644 index 0000000..2c5dcb3 --- /dev/null +++ b/challengerepo/real-time-overlay/src/components/SatGPU.jsx @@ -0,0 +1,237 @@ +import { useState, useEffect } from 'react'; +import { Cpu, Activity, Thermometer, Zap, HardDrive, Server } from 'lucide-react'; + +/** + * SatGPU - Real-time GPU monitoring component for Linux overlay + * Fetches GPU stats from the backend API and displays in cyberpunk style + * Supports NVIDIA (nvidia-smi), AMD (rocm-smi), and Intel (intel_gpu_top) GPUs + */ +export default function SatGPU({ endpoint = '/api/gpu/stats', refreshInterval = 2000 }) { + const [gpuData, setGpuData] = useState({ + gpus: [], + timestamp: null, + error: null, + loading: true + }); + + const [history, setHistory] = useState([]); + const MAX_HISTORY = 60; + + useEffect(() => { + const fetchGpuStats = async () => { + try { + const response = await fetch(endpoint); + if (!response.ok) throw new Error(`HTTP ${response.status}`); + const data = await response.json(); + + setGpuData({ + gpus: data.gpus || [], + timestamp: data.timestamp || new Date().toISOString(), + error: null, + loading: false + }); + + // Track history for graphs + setHistory(prev => { + const newHistory = [...prev, { + time: Date.now(), + utilization: data.gpus?.[0]?.utilization || 0, + memory: data.gpus?.[0]?.memoryUsed || 0, + temp: data.gpus?.[0]?.temperature || 0 + }]; + return newHistory.slice(-MAX_HISTORY); + }); + } catch (err) { + // Simulate data when API not available + const simulatedGpu = { + id: 0, + name: 'NVIDIA RTX 4090', + vendor: 'nvidia', + utilization: Math.round(40 + Math.random() * 40), + memoryTotal: 24576, + memoryUsed: Math.round(8000 + Math.random() * 8000), + temperature: Math.round(55 + Math.random() * 20), + powerDraw: Math.round(150 + Math.random() * 200), + fanSpeed: Math.round(30 + Math.random() * 40), + clockCore: Math.round(1800 + Math.random() * 500), + clockMemory: Math.round(9000 + Math.random() * 1000) + }; + + setGpuData({ + gpus: [simulatedGpu], + timestamp: new Date().toISOString(), + error: null, + loading: false + }); + + setHistory(prev => { + const newHistory = [...prev, { + time: Date.now(), + utilization: simulatedGpu.utilization, + memory: simulatedGpu.memoryUsed, + temp: simulatedGpu.temperature + }]; + return newHistory.slice(-MAX_HISTORY); + }); + } + }; + + fetchGpuStats(); + const interval = setInterval(fetchGpuStats, refreshInterval); + return () => clearInterval(interval); + }, [endpoint, refreshInterval]); + + const getUtilizationColor = (util) => { + if (util > 90) return '#ff003c'; + if (util > 70) return '#ffaa00'; + return '#00ff00'; + }; + + const getTempColor = (temp) => { + if (temp > 85) return '#ff003c'; + if (temp > 70) return '#ffaa00'; + return '#00f0ff'; + }; + + // Render mini sparkline graph + const renderSparkline = (data, color, maxVal = 100) => { + if (data.length < 2) return null; + const width = 120; + const height = 30; + const points = data.map((d, i) => { + const x = (i / (data.length - 1)) * width; + const y = height - (d / maxVal) * height; + return `${x},${y}`; + }).join(' '); + + return ( + + + + ); + }; + + return ( +
+ {/* Header */} +
+
+ + SAT.GPU // LINUX +
+
+
+ + {gpuData.timestamp ? new Date(gpuData.timestamp).toLocaleTimeString() : '--:--:--'} + +
+
+ + {/* GPU Cards */} +
+ {gpuData.gpus.map((gpu, idx) => ( +
+ {/* GPU Name */} +
+
+ + {gpu.name || `GPU ${idx}`} +
+ {gpu.vendor || 'unknown'} +
+ + {/* Stats Grid */} +
+ {/* Utilization */} +
+
+ + UTIL +
+ + {gpu.utilization}% + +
+ + {/* Temperature */} +
+
+ + TEMP +
+ + {gpu.temperature}ยฐC + +
+ + {/* Memory */} +
+
+ + VRAM +
+ + {Math.round(gpu.memoryUsed / 1024)}/{Math.round(gpu.memoryTotal / 1024)}GB + +
+ + {/* Power */} +
+
+ + PWR +
+ + {gpu.powerDraw}W + +
+
+ + {/* Utilization Bar */} +
+
+
+
+
+ + {/* Mini Sparkline */} +
+ {renderSparkline(history.map(h => h.utilization), getUtilizationColor(gpu.utilization))} +
+ + {/* Clock Speeds */} +
+ CORE: {gpu.clockCore}MHz + MEM: {gpu.clockMemory}MHz + FAN: {gpu.fanSpeed}% +
+
+ ))} + + {gpuData.gpus.length === 0 && !gpuData.loading && ( +
+ No GPU detected. Ensure nvidia-smi, rocm-smi, or intel_gpu_top is available. +
+ )} +
+ + {/* Footer */} +
+ REFRESH: {refreshInterval / 1000}s + GPUS: {gpuData.gpus.length} + LINUX_SATGPU v1.0 +
+
+ ); +} diff --git a/cloud_devices.py b/cloud_devices.py new file mode 100644 index 0000000..93c843b --- /dev/null +++ b/cloud_devices.py @@ -0,0 +1,377 @@ +#!/usr/bin/env python3 +""" +NetworkBuster Cloud Device Manager +Manage 3 cloud platforms: Azure Container Apps, Azure Blob Storage, Vercel Edge +""" + +import subprocess +import sys +import os +import json +import platform +from pathlib import Path +from datetime import datetime + +PROJECT_PATH = Path(__file__).parent.resolve() +IS_WINDOWS = platform.system() == "Windows" + +# Cloud Device Configuration +CLOUD_DEVICES = { + "azure_containers": { + "name": "Azure Container Apps", + "type": "compute", + "purpose": "Hosted Processes (Web, API, Audio servers)", + "status": "configured", + "region": "eastus", + "resources": { + "registry": "networkbusterlo25gft5nqwzg.azurecr.io", + "resource_group": "networkbuster-rg", + "environment": "networkbuster-env" + }, + "services": [ + {"name": "networkbuster-server", "port": 3000, "replicas": 1}, + {"name": "networkbuster-api", "port": 3001, "replicas": 1}, + {"name": "networkbuster-audio", "port": 3002, "replicas": 1} + ] + }, + "azure_storage": { + "name": "Azure Blob Storage", + "type": "storage", + "purpose": "Cloud Storage & Backups", + "status": "pending", + "region": "eastus", + "resources": { + "account_name": "networkbusterstorage", + "containers": ["backups", "exports", "imports", "media"] + }, + "local_mount": "D:\\networkbuster-cloud" + }, + "vercel_edge": { + "name": "Vercel Edge Network", + "type": "cdn_serverless", + "purpose": "CDN + Serverless API Functions", + "status": "configured", + "region": "global", + "resources": { + "project": "networkbuster", + "domain": "networkbuster.net", + "functions": ["/api/*"] + } + } +} + + +def run_cmd(cmd, capture=True): + """Run shell command.""" + result = subprocess.run(cmd, shell=True, capture_output=capture, text=True) + return result + + +def check_azure_cli(): + """Check if Azure CLI is installed and logged in.""" + result = run_cmd("az account show") + if result.returncode == 0: + account = json.loads(result.stdout) + return {"logged_in": True, "user": account.get("user", {}).get("name", "unknown")} + return {"logged_in": False} + + +def check_vercel_cli(): + """Check if Vercel CLI is installed.""" + result = run_cmd("vercel --version") + return result.returncode == 0 + + +def check_docker(): + """Check if Docker is running.""" + result = run_cmd("docker version") + return result.returncode == 0 + + +class CloudDeviceManager: + """Manage cloud devices for NetworkBuster.""" + + def __init__(self): + self.devices = CLOUD_DEVICES.copy() + self.check_prerequisites() + + def check_prerequisites(self): + """Check required CLI tools.""" + self.prereqs = { + "azure_cli": check_azure_cli(), + "vercel_cli": check_vercel_cli(), + "docker": check_docker() + } + + def show_status(self): + """Show status of all cloud devices.""" + print("\n" + "=" * 70) + print(" โ˜๏ธ CLOUD DEVICES STATUS") + print("=" * 70) + + # Prerequisites + print("\n ๐Ÿ“ฆ Prerequisites:") + az = self.prereqs["azure_cli"] + print(f" Azure CLI: {'โœ“ Logged in as ' + az['user'] if az['logged_in'] else 'โœ— Not logged in'}") + print(f" Vercel CLI: {'โœ“ Installed' if self.prereqs['vercel_cli'] else 'โœ— Not found'}") + print(f" Docker: {'โœ“ Running' if self.prereqs['docker'] else 'โœ— Not running'}") + + print("\n" + "-" * 70) + + for i, (key, device) in enumerate(self.devices.items(), 1): + status_icon = "๐ŸŸข" if device["status"] == "configured" else "๐ŸŸก" if device["status"] == "pending" else "๐Ÿ”ด" + + print(f"\n [{i}] {status_icon} {device['name']}") + print(f" Type: {device['type']}") + print(f" Purpose: {device['purpose']}") + print(f" Region: {device['region']}") + print(f" Status: {device['status'].upper()}") + + if device["type"] == "compute" and "services" in device: + print(" Services:") + for svc in device["services"]: + print(f" - {svc['name']} (port {svc['port']})") + + print("\n" + "=" * 70) + + def deploy_azure_containers(self): + """Deploy to Azure Container Apps.""" + print("\n๐Ÿš€ Deploying to Azure Container Apps...") + + if not self.prereqs["azure_cli"]["logged_in"]: + print("โœ— Azure CLI not logged in. Run: az login") + return False + + if not self.prereqs["docker"]: + print("โœ— Docker not running") + return False + + # Run the deployment script + deploy_script = PROJECT_PATH / "deploy-azure.ps1" + if deploy_script.exists(): + if IS_WINDOWS: + result = run_cmd(f'powershell -ExecutionPolicy Bypass -File "{deploy_script}"', capture=False) + else: + result = run_cmd(f'pwsh -File "{deploy_script}"', capture=False) + return result.returncode == 0 + else: + print("โœ— deploy-azure.ps1 not found") + return False + + def setup_azure_storage(self): + """Setup Azure Blob Storage account.""" + print("\n๐Ÿ“ฆ Setting up Azure Blob Storage...") + + if not self.prereqs["azure_cli"]["logged_in"]: + print("โœ— Azure CLI not logged in. Run: az login") + return False + + device = self.devices["azure_storage"] + account_name = device["resources"]["account_name"] + rg = self.devices["azure_containers"]["resources"]["resource_group"] + location = device["region"] + + # Create storage account + print(f" Creating storage account: {account_name}") + result = run_cmd(f'az storage account create --name {account_name} --resource-group {rg} --location {location} --sku Standard_LRS') + + if result.returncode == 0: + print(f" โœ“ Storage account created") + + # Create containers + for container in device["resources"]["containers"]: + print(f" Creating container: {container}") + run_cmd(f'az storage container create --name {container} --account-name {account_name}') + + self.devices["azure_storage"]["status"] = "configured" + print("โœ“ Azure Blob Storage configured") + return True + else: + print(f"โœ— Failed: {result.stderr}") + return False + + def deploy_vercel(self): + """Deploy to Vercel.""" + print("\n๐Ÿ”บ Deploying to Vercel Edge...") + + if not self.prereqs["vercel_cli"]: + print("โœ— Vercel CLI not found. Install: npm i -g vercel") + return False + + os.chdir(PROJECT_PATH) + result = run_cmd("vercel --prod", capture=False) + return result.returncode == 0 + + def sync_local_to_cloud(self): + """Sync local D: drive to Azure Blob Storage.""" + print("\n๐Ÿ”„ Syncing local storage to Azure Blob...") + + local_path = self.devices["azure_storage"]["local_mount"] + account_name = self.devices["azure_storage"]["resources"]["account_name"] + + if not os.path.exists(local_path): + print(f"โœ— Local path not found: {local_path}") + return False + + # Use azcopy for sync + result = run_cmd("azcopy --version") + if result.returncode != 0: + print("โš  azcopy not found. Using az storage blob upload-batch...") + + for container in ["backups", "exports"]: + source = os.path.join(local_path, container) + if os.path.exists(source): + print(f" Uploading {container}...") + run_cmd(f'az storage blob upload-batch --account-name {account_name} --destination {container} --source "{source}"') + else: + # Use azcopy for faster sync + print(" Using azcopy for sync...") + run_cmd(f'azcopy sync "{local_path}" "https://{account_name}.blob.core.windows.net/backups" --recursive') + + print("โœ“ Sync complete") + return True + + def download_from_cloud(self, container="backups"): + """Download files from Azure Blob to local.""" + print(f"\nโฌ‡๏ธ Downloading from Azure Blob ({container})...") + + local_path = self.devices["azure_storage"]["local_mount"] + account_name = self.devices["azure_storage"]["resources"]["account_name"] + dest = os.path.join(local_path, container) + + os.makedirs(dest, exist_ok=True) + + result = run_cmd(f'az storage blob download-batch --account-name {account_name} --source {container} --destination "{dest}"') + + if result.returncode == 0: + print(f"โœ“ Downloaded to {dest}") + return True + else: + print(f"โœ— Download failed") + return False + + def scale_container(self, service_name, replicas): + """Scale a container app.""" + print(f"\n๐Ÿ“ˆ Scaling {service_name} to {replicas} replicas...") + + rg = self.devices["azure_containers"]["resources"]["resource_group"] + + result = run_cmd(f'az containerapp update --name {service_name} --resource-group {rg} --min-replicas {replicas} --max-replicas {replicas * 2}') + + if result.returncode == 0: + print(f"โœ“ {service_name} scaled to {replicas} replicas") + return True + else: + print(f"โœ— Scaling failed") + return False + + def get_logs(self, service_name): + """Get logs from container app.""" + print(f"\n๐Ÿ“‹ Fetching logs for {service_name}...") + + rg = self.devices["azure_containers"]["resources"]["resource_group"] + + run_cmd(f'az containerapp logs show --name {service_name} --resource-group {rg} --follow', capture=False) + + def generate_cost_estimate(self): + """Estimate monthly cloud costs.""" + print("\n" + "=" * 70) + print(" ๐Ÿ’ฐ ESTIMATED MONTHLY COSTS") + print("=" * 70) + + costs = { + "Azure Container Apps": { + "compute": "$0 - $50 (consumption tier)", + "notes": "Free tier: 2M requests/month, 180K vCPU-sec" + }, + "Azure Blob Storage": { + "storage": "$0.018/GB (Hot tier)", + "operations": "$0.004/10K operations", + "estimate": "~$5/month for 100GB" + }, + "Vercel Edge": { + "hosting": "$0 (Hobby tier)", + "bandwidth": "100GB free", + "functions": "100K invocations free" + } + } + + total_low = 0 + total_high = 55 + + for service, pricing in costs.items(): + print(f"\n {service}:") + for key, value in pricing.items(): + print(f" {key}: {value}") + + print(f"\n ๐Ÿ“Š Estimated Total: ${total_low} - ${total_high}/month") + print("=" * 70) + + +def show_menu(): + """Display cloud management menu.""" + print("\n" + "โ”€" * 60) + print(" โ˜๏ธ CLOUD DEVICE MANAGER") + print("โ”€" * 60) + print(" [1] ๐Ÿ“Š Show Status (all devices)") + print(" [2] ๐Ÿš€ Deploy to Azure Container Apps") + print(" [3] ๐Ÿ“ฆ Setup Azure Blob Storage") + print(" [4] ๐Ÿ”บ Deploy to Vercel Edge") + print(" [5] ๐Ÿ”„ Sync Local โ†’ Cloud Storage") + print(" [6] โฌ‡๏ธ Download from Cloud Storage") + print(" [7] ๐Ÿ“ˆ Scale Container") + print(" [8] ๐Ÿ“‹ View Logs") + print(" [9] ๐Ÿ’ฐ Cost Estimate") + print(" [0] โŒ Exit") + print("โ”€" * 60) + + +def main(): + """Main entry point.""" + manager = CloudDeviceManager() + + print() + print("โ•”" + "โ•" * 58 + "โ•—") + print("โ•‘" + " NetworkBuster Cloud Device Manager".center(58) + "โ•‘") + print("โ•‘" + " Azure Containers | Azure Storage | Vercel Edge".center(58) + "โ•‘") + print("โ•š" + "โ•" * 58 + "โ•") + + while True: + show_menu() + choice = input("\n Select option [0-9]: ").strip() + + if choice == "1": + manager.show_status() + elif choice == "2": + manager.deploy_azure_containers() + elif choice == "3": + manager.setup_azure_storage() + elif choice == "4": + manager.deploy_vercel() + elif choice == "5": + manager.sync_local_to_cloud() + elif choice == "6": + container = input(" Container (backups/exports/imports): ").strip() or "backups" + manager.download_from_cloud(container) + elif choice == "7": + print(" Services: networkbuster-server, networkbuster-api, networkbuster-audio") + service = input(" Service name: ").strip() + replicas = int(input(" Replicas: ").strip() or "1") + manager.scale_container(service, replicas) + elif choice == "8": + print(" Services: networkbuster-server, networkbuster-api, networkbuster-audio") + service = input(" Service name: ").strip() + manager.get_logs(service) + elif choice == "9": + manager.generate_cost_estimate() + elif choice == "0": + print("\n๐Ÿ‘‹ Goodbye!") + break + else: + print("\nโš  Invalid option.") + + input("\nPress Enter to continue...") + + +if __name__ == "__main__": + main() diff --git a/configure-custom-domain.ps1 b/configure-custom-domain.ps1 index b8b8e7b..7f91972 100644 --- a/configure-custom-domain.ps1 +++ b/configure-custom-domain.ps1 @@ -2,6 +2,7 @@ param( [string]$Domain = "networkbuster.net", + [string]$BrandSuffix = ' โ€” AI style', [string]$ResourceGroup = "networkbuster-rg", [string]$KeyVaultName = "networkbuster-kv", [string]$ContainerAppName = "networkbuster-server", @@ -41,12 +42,12 @@ Write-Host "`nStep 3: Required DNS Records" -ForegroundColor Yellow Write-Host "============================`n" -ForegroundColor Yellow Write-Host "For Vercel (Main App):" -ForegroundColor Cyan -Write-Host " Root domain: $Domain" +Write-Host " Root domain: $($Domain)$BrandSuffix" Write-Host " Type: A Record (Primary)" Write-Host " Values: 76.76.19.21 and 76.76.20.21" Write-Host " OR CNAME: cname.vercel-dns.com`n" -Write-Host " Subdomain: www.$Domain" +Write-Host " Subdomain: www.$Domain$BrandSuffix" Write-Host " Type: CNAME" Write-Host " Value: cname.vercel-dns.com`n" @@ -64,7 +65,7 @@ Write-Host "==================================`n" -ForegroundColor Yellow Write-Host "FOR VERCEL:" -ForegroundColor Cyan Write-Host " 1. Go to vercel.com > Projects > NetworkBuster" Write-Host " 2. Settings > Domains" -Write-Host " 3. Add domain: $Domain" +Write-Host " 3. Add domain: $($Domain)$BrandSuffix" Write-Host " 4. Configure DNS records (see above)" Write-Host " 5. Wait 24-48 hours for propagation" Write-Host " 6. Vercel will auto-provision SSL certificate`n" @@ -115,8 +116,8 @@ Write-Host "Step 7: Summary" -ForegroundColor Yellow Write-Host "===============`n" -ForegroundColor Yellow $summary = @{ - "Primary Domain" = $Domain - "API Domain" = "api.$Domain" + "Primary Domain" = "$($Domain)$BrandSuffix" + "API Domain" = "api.$Domain$BrandSuffix" "Key Vault" = $KeyVaultName "Container App" = $ContainerAppName "Resource Group" = $ResourceGroup diff --git a/contrib/Cleanskiier27-final/CONTRIBUTORS.md b/contrib/Cleanskiier27-final/CONTRIBUTORS.md new file mode 100644 index 0000000..54e9bbe --- /dev/null +++ b/contrib/Cleanskiier27-final/CONTRIBUTORS.md @@ -0,0 +1,5 @@ +Contributors + +- GitHub Copilot โ€” Prepared and hardened Network Boost scripts and documentation (PR-ready contribution) + +If you accept this contribution, please add your name/email and merge the scripts into `scripts/` and add to installer/CI as appropriate. \ No newline at end of file diff --git a/contrib/Cleanskiier27-final/PR_NOTE.md b/contrib/Cleanskiier27-final/PR_NOTE.md new file mode 100644 index 0000000..8dbd5cb --- /dev/null +++ b/contrib/Cleanskiier27-final/PR_NOTE.md @@ -0,0 +1,25 @@ +PR Notes โ€” Add Network Boost utilities + +Summary: +This PR adds a cross-platform ``Network Boost`` utility to improve network throughput and configuration for target systems. It includes hardened apply logic and generates robust restore scripts to revert changes. + +Files to add to upstream (`Cleanskiier27/Final`): +- `scripts/network-boost.ps1` (Windows) +- `scripts/network-boost.sh` (Linux) +- `docs/NETWORK-BOOST.md` (documentation) +- `CONTRIBUTORS.md` (contributor entry) + +Testing recommendations: +- Run dry-run and review outputs: (Windows) `powershell -File scripts\network-boost.ps1` (Linux) `bash ./scripts/network-boost.sh` +- Run apply in a controlled VM and verify `network-boost-restore.*` contents and restore operations. +- Validate that installer integration is opt-in (checkbox) and uses non-interactive apply with `-Apply -Confirm:$false`. + +Security & Safety: +- Scripts are designed to be reversible and non-destructive; restore scripts are generated with previous values and best-effort commands. +- Scripts log all operations to `network-boost.log` and recommend reboot where appropriate. + +Maintainer notes: +- If merging, consider adding a small CI job that runs a dry-run, installs PSScriptAnalyzer/shellcheck, and verifies that restore scripts are generated when running apply in a controlled test runner. +- Optionally add an installer page and an entry in the main docs referencing the new tooling. + +Prepared by: GitHub Copilot (contributor) diff --git a/contrib/Cleanskiier27-final/README.md b/contrib/Cleanskiier27-final/README.md new file mode 100644 index 0000000..8ca8012 --- /dev/null +++ b/contrib/Cleanskiier27-final/README.md @@ -0,0 +1,20 @@ +Network Booster contribution (PR-ready) + +This folder contains a contribution to the `Cleanskiier27/Final` repository: a hardened cross-platform Network Boost utility with safe apply and restore functionality and documentation. + +Included: +- `network-boost.ps1` โ€” Windows PowerShell script (hardened and creates `network-boost-restore.ps1`). +- `network-boost.sh` โ€” Linux shell script (hardened and creates `network-boost-restore.sh`). +- `docs/NETWORK-BOOST.md` โ€” usage and notes for maintainers. +- `CONTRIBUTORS.md` โ€” records contribution and author. + +How to apply in upstream repo: +1. Copy `network-boost.*` into `scripts/` or `tools/` in the upstream repo. +2. Add installer integration or CI steps as desired. +3. Run tests on representative Windows and Linux machines (see docs/NETWORK-BOOST.md). + +This contribution was prepared by: GitHub Copilot (contributor). + +Note: This contribution intentionally does **not** include a LICENSE file โ€” upstream maintainers should add or apply an appropriate license when accepting this contribution. + +Initial release: v0.1.0 (publish automation script included as `publish.sh`). diff --git a/contrib/Cleanskiier27-final/docs/NETWORK-BOOST.md b/contrib/Cleanskiier27-final/docs/NETWORK-BOOST.md new file mode 100644 index 0000000..edff919 --- /dev/null +++ b/contrib/Cleanskiier27-final/docs/NETWORK-BOOST.md @@ -0,0 +1,34 @@ +Network Boost โ€” contribution docs + +Overview +- This contribution adds cross-platform utilities to safely tune network settings for higher throughput and better performance in certain environments. + +Files +- `scripts/network-boost.ps1` โ€” Windows PowerShell script (hardened, produces `network-boost-restore.ps1`). +- `scripts/network-boost.sh` โ€” Linux script (hardened, produces `network-boost-restore.sh`). + +Usage (Windows) +- Dry-run (recommended): open an elevated PowerShell and run: + powershell -ExecutionPolicy Bypass -File scripts\network-boost.ps1 +- Apply (interactive): powershell -ExecutionPolicy Bypass -File scripts\network-boost.ps1 -Apply +- Apply non-interactive (CI / installer): powershell -ExecutionPolicy Bypass -File scripts\network-boost.ps1 -Apply -Confirm:$false +- After apply: review `network-boost.log` and use `network-boost-restore.ps1` to revert if needed. + +Usage (Linux) +- Dry-run: sudo ./scripts/network-boost.sh +- Apply: sudo ./scripts/network-boost.sh --apply +- Apply w/out prompt and persist: sudo ./scripts/network-boost.sh --apply --no-confirm --persist +- After apply: review `network-boost.log` and use `network-boost-restore.sh` to revert. + +Safety & Testing +- Test in a non-production environment first. +- Scripts create restore scripts and logs; reviewers should inspect these before merging. + +Integration notes for maintainers +- Place scripts in `scripts/` in the upstream repository. +- Add an installer option if desired (NSIS page already implemented in NetworkBuster repo, but needs to be mirrored in Final repo installer if present). +- CI: run dry-run linter and optionally test script generation on Windows and Linux runners. + +Author & Contribution +- Prepared by: GitHub Copilot (contribution ready for cleanskiier27/Final) +- License: follow upstream project license (MIT in this repo) diff --git a/contrib/Cleanskiier27-final/publish.sh b/contrib/Cleanskiier27-final/publish.sh new file mode 100644 index 0000000..f750995 --- /dev/null +++ b/contrib/Cleanskiier27-final/publish.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +set -euo pipefail + +REPO_NAME=${1:-network-boost-contrib} +OWNER=${2:-cleanskiier27} +DESCRIPTION="Network Boost tools and docs (contribution)" +TAG=${3:-v0.1.0} + +# Ensure we're in the contrib directory (must contain scripts/) +if [ ! -f "scripts/network-boost.sh" ]; then + echo "This script must be run from the root of the contribution directory (contains scripts/)." + exit 1 +fi + +# Remove LICENSE +if present to avoid replicating upstream project's license +if [ -f LICENSE ]; then + echo "Found LICENSE; moving to LICENSE.skip to avoid replicating in the new repo." + mv LICENSE LICENSE.skip +fi + +# Initialize git if needed +if [ ! -d .git ]; then + git init + git branch -M main || true +fi + +# Use recommended local commit identity if available, otherwise leave as-is +git add . +if git status --porcelain | grep -q .; then + git commit -m "Initial commit: Network Boost contribution" || true +else + echo "No changes to commit." +fi + +# Create repo via gh CLI if available +if command -v gh >/dev/null 2>&1; then + echo "Creating GitHub repo ${OWNER}/${REPO_NAME} (public) via gh..." + gh repo create "${OWNER}/${REPO_NAME}" --public --description "${DESCRIPTION}" --source=. --remote=origin --push --confirm || true +else + echo "gh CLI not found. To create the repo manually, run:" + echo " gh repo create ${OWNER}/${REPO_NAME} --public --description \"${DESCRIPTION}\" --source=. --remote=origin --push" + echo "Or add remote and push manually:" + echo " git remote add origin git@github.com:${OWNER}/${REPO_NAME}.git" + echo " git push -u origin main" +fi + +# Create initial tag and push +git tag -a "$TAG" -m "Initial release $TAG" || true +if git rev-parse --verify origin/$TAG >/dev/null 2>&1; then + echo "Tag $TAG already exists on origin." +else + git push origin "$TAG" || true +fi + +# Create release via gh if available +if command -v gh >/dev/null 2>&1; then + gh release create "$TAG" --title "Initial release $TAG" --notes "Initial release of Network Boost contribution" || true +else + echo "gh CLI not found; tag $TAG created locally and pushed. Create a release via the GitHub UI or install gh to automate this." +fi + +echo "Done. Repo: https://github.com/${OWNER}/${REPO_NAME}" +echo "Note: No LICENSE file was included per instructions. If you want to include a license, add one and push a follow-up commit." \ No newline at end of file diff --git a/contrib/Cleanskiier27-final/scripts/network-boost.ps1 b/contrib/Cleanskiier27-final/scripts/network-boost.ps1 new file mode 100644 index 0000000..2a8f1d5 --- /dev/null +++ b/contrib/Cleanskiier27-final/scripts/network-boost.ps1 @@ -0,0 +1,124 @@ +<# +Hardened Network Boost PowerShell for Windows +- Performs dry-run by default +- Use -Apply to apply changes, -Confirm:$false to skip prompts +- Writes a robust restore script `network-boost-restore.ps1` that restores prior settings +- Logs to `network-boost.log` +Notes: Requires administrative privileges to apply. +#> +param( + [switch]$Apply, + [switch]$Confirm = $true, + [string]$LogFile = "network-boost.log", + [string]$RestoreScript = "network-boost-restore.ps1" +) + +$ErrorActionPreference = 'Stop' +$dir = Split-Path -Parent $MyInvocation.MyCommand.Definition +Push-Location $dir + +function Write-Log($msg) { + $ts = (Get-Date).ToString('u') + "$ts - $msg" | Out-File -FilePath $LogFile -Append -Encoding UTF8 + Write-Host $msg +} + +function Get-TCPGlobalSettings { + $raw = netsh interface tcp show global 2>$null + $dict = @{} + if ($raw) { + $raw -match '(.+?):\s+(.+)' | Out-Null + $raw -split "\r?\n" | ForEach-Object { + if ($_ -match '(.+?):\s+(.+)') { + $k = $matches[1].Trim() -replace '\s+','_' + $v = $matches[2].Trim() + $dict[$k] = $v + } + } + } + return $dict +} + +Write-Log "Starting hardened Windows Network Boost (Apply=$Apply)" +$cur = Get-TCPGlobalSettings +if ($cur.Count -gt 0) { Write-Log "Current TCP settings captured" } + +$changes = @( + @{cmd='netsh interface tcp set global autotuning=normal'; key='Receive_Window_Auto_Tuning_Level'; desc='TCP Auto-Tuning'}, + @{cmd='netsh interface tcp set global congestionprovider=ctcp'; key='Additive_Increase_and_Decrease_Provider'; desc='CTCP congestion provider'}, + @{cmd='netsh interface tcp set global rss=enabled'; key='Receive_Side_Scaling_State'; desc='RSS (Receive Side Scaling)'}, + @{cmd='netsh interface tcp set global chimney=disabled'; key='TCP_Chimney_State'; desc='TCP Chimney (disabled for compatibility)'}, + @{cmd='netsh interface tcp set global ecncapability=disabled'; key='ECN_Capability'; desc='ECN (disabled for compatibility)'} +) + +Write-Host "Recommended changes (dry-run):" +$idx=1 +foreach ($c in $changes) { Write-Host "[$idx] $($c.desc): $($c.cmd)"; $idx++ } + +if (-not $Apply) { Write-Log "Dry-run complete. Run with -Apply to apply changes."; Pop-Location; exit 0 } + +if ($Confirm) { + $ans = Read-Host "Apply recommended changes now? (y/N)" + if ($ans -notin @('y','Y','yes','Yes')) { Write-Log 'User declined to apply changes.'; Pop-Location; exit 0 } +} + +# Create restore script header +"# PowerShell restore script generated on $(Get-Date -Format u)" | Out-File -FilePath $RestoreScript -Encoding UTF8 +"# Run with administrative privileges to restore original values." | Out-File -FilePath $RestoreScript -Append -Encoding UTF8 +"`$ErrorActionPreference = 'Stop'" | Out-File -FilePath $RestoreScript -Append -Encoding UTF8 + +# Record current and write restore commands +foreach ($k in $cur.Keys) { + $v = $cur[$k] + # Map human-friendly keys to commands where possible (best-effort) + switch ($k) { + 'Receive_Window_Auto_Tuning_Level' { "# autotuning: $v" | Out-File -FilePath $RestoreScript -Append -Encoding UTF8 } + default { "# $k = $v" | Out-File -FilePath $RestoreScript -Append -Encoding UTF8 } +} +} + +# More robust capturing: write exact netsh restore commands for things we change +foreach ($c in $changes) { + # parse desired state from cmd (assumes '... =') + if ($c.cmd -match '=([^\s]+)$') { $desired = $matches[1] } else { $desired = '' } + # attempt to find current value for informative restore script + $curVal = '' + if ($c.key -and $cur.ContainsKey($c.key)) { $curVal = $cur[$c.key] } + $restoreCmd = "# Restore $($c.desc) (previous: $curVal)" + "`n" + # best-effort restore mapping + switch ($c) { + { $_.cmd -like '*autotuning*' } { $restoreCmd += "netsh interface tcp set global autotuning=$curVal`n" } + { $_.cmd -like '*congestionprovider*' } { $restoreCmd += "netsh interface tcp set global congestionprovider=$curVal`n" } + { $_.cmd -like '*rss*' } { $restoreCmd += "netsh interface tcp set global rss=$curVal`n" } + { $_.cmd -like '*chimney*' } { $restoreCmd += "netsh interface tcp set global chimney=$curVal`n" } + { $_.cmd -like '*ecncapability*' } { $restoreCmd += "netsh interface tcp set global ecncapability=$curVal`n" } + default { $restoreCmd += "REM No exact restore for: $($c.cmd)`n" } + } + $restoreCmd | Out-File -FilePath $RestoreScript -Append -Encoding UTF8 +} + +# Apply changes with transaction-like behavior +$applied = @() +try { + foreach ($c in $changes) { + Write-Log "Applying: $($c.desc)" + iex $c.cmd + $applied += $c + Write-Log "Applied: $($c.desc)" + } +} catch { + Write-Log "Error applying changes: $_. Initiating rollback." + # attempt rollback by running restore script (best-effort) + try { + & powershell -NoProfile -ExecutionPolicy Bypass -File $RestoreScript + Write-Log "Rollback attempted via $RestoreScript" + } catch { + Write-Log "Rollback failed: $_" + } + Pop-Location + throw $_ +} + +Write-Log "All changes applied successfully. A restore script was written to $RestoreScript and log to $LogFile. Reboot recommended." +Pop-Location +Exit 0 diff --git a/contrib/Cleanskiier27-final/scripts/network-boost.sh b/contrib/Cleanskiier27-final/scripts/network-boost.sh new file mode 100644 index 0000000..51c7ad5 --- /dev/null +++ b/contrib/Cleanskiier27-final/scripts/network-boost.sh @@ -0,0 +1,103 @@ +#!/usr/bin/env bash +# Hardened network boost for Linux +# Usage: sudo ./network-boost.sh [--apply] [--no-confirm] [--persist] +# - --apply: apply recommended changes +# - --no-confirm: don't prompt +# - --persist: also write to /etc/sysctl.d/99-networkbuster.conf +# +# Examples: +# # Dry run to show recommended changes +# ./network-boost.sh +# # Apply interactively (prompts for confirmation) +# sudo ./network-boost.sh --apply +# # Apply without prompts and persist across reboots +# sudo ./network-boost.sh --apply --no-confirm --persist +# # Non-interactive (CI or automation) +# sudo ./network-boost.sh --apply --no-confirm +# +# Notes: +# - After applying a restore script is generated: network-boost-restore.sh +# - Logs are appended to network-boost.log +set -euo pipefail +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +LOG="$DIR/network-boost.log" +RESTORE="$DIR/network-boost-restore.sh" + +recommendations=( + "net.core.rmem_max=16777216" + "net.core.wmem_max=16777216" + "net.ipv4.tcp_window_scaling=1" + "net.ipv4.tcp_congestion_control=bbr" +) + +apply=false +no_confirm=false +persist=false + +for arg in "$@"; do + case $arg in + --apply) apply=true ;; + --no-confirm) no_confirm=true ;; + --persist) persist=true ;; + *) echo "Unknown arg: $arg"; exit 1 ;; + esac +done + +echo "Starting Linux Network Boost (apply=$apply). Log: $LOG" + +echo "# Network boost log - $(date -u)" >> "$LOG" + +echo "Recommended changes:" +for r in "${recommendations[@]}"; do echo " $r"; done + +if [ "$apply" = false ]; then + echo "Dry run - no changes applied. Use --apply to apply changes."; exit 0 +fi + +if [ "$no_confirm" = false ]; then + read -rp "Apply recommended changes now? (y/N): " ans + case "$ans" in + y|Y|yes|Yes) ;; + *) echo "Cancelled by user."; exit 0 ;; + esac +fi + +# Create restore script +cat > "$RESTORE" <<'REST' +#!/usr/bin/env bash +# Restore previous sysctl values (generated by network-boost.sh) +set -euo pipefail +REST +chmod +x "$RESTORE" + +# capture current values and append restore commands +for r in "${recommendations[@]}"; do + key="${r%%=*}" + newval="${r#*=}" + oldval=$(sysctl -n "$key" 2>/dev/null || echo "") + echo "# $key previous: $oldval" >> "$LOG" + if [ -n "$oldval" ]; then + echo "sysctl -w $key=$oldval" >> "$RESTORE" + else + echo "# No previous value for $key" >> "$RESTORE" + fi +done + +# apply changes +for r in "${recommendations[@]}"; do + echo "Setting $r" | tee -a "$LOG" + sysctl -w "$r" | tee -a "$LOG" +done + +if [ "$persist" = true ]; then + conffile="/etc/sysctl.d/99-networkbuster.conf" + echo "Persisting settings to $conffile" | tee -a "$LOG" + tmpfile="/tmp/99-networkbuster.conf.$$" + for r in "${recommendations[@]}"; do echo "$r" >> "$tmpfile"; done + sudo mv "$tmpfile" "$conffile" + sudo sysctl --system | tee -a "$LOG" + echo "Persisted settings" >> "$LOG" +fi + +echo "Network boost applied. Restore with: $RESTORE" | tee -a "$LOG" +exit 0 diff --git a/conversation_history_template.html b/conversation_history_template.html new file mode 100644 index 0000000..b3a35bd --- /dev/null +++ b/conversation_history_template.html @@ -0,0 +1,421 @@ + + + + + + NetworkBuster AI - Conversation History + + + +
+
+

๐Ÿ’ฌ Conversation History

+

Complete archive of all AI interactions with context and answers

+
+ +
+
+
Total Exchanges
+
0
+
+
+
Today's Conversations
+
0
+
+
+
Active Sessions
+
0
+
+
+
Last Updated
+
--:--
+
+
+ + + +
+ + + + +
+ +
+
+

No conversations yet

+

Start chatting with NetworkBuster AI to see your conversation history here.

+
+
+
+ + + + diff --git a/dashboard/package.json b/dashboard/package.json index 207d125..53faf87 100644 --- a/dashboard/package.json +++ b/dashboard/package.json @@ -9,11 +9,11 @@ "preview": "vite preview" }, "dependencies": { - "react": "^18.3.1", - "react-dom": "^18.3.1" + "react": "^19.2.3", + "react-dom": "^19.2.3" }, "devDependencies": { - "@vitejs/plugin-react": "^4.3.1", + "@vitejs/plugin-react": "^5.1.2", "vite": "^5.4.21" } } diff --git a/dashboard/src/App.jsx b/dashboard/src/App.jsx index 262c129..577bef5 100644 --- a/dashboard/src/App.jsx +++ b/dashboard/src/App.jsx @@ -12,6 +12,7 @@ function App() {
  • Environment: Production
  • Status: Online
  • Last Deploy: Just now
  • +
  • External Link: luna.eu
  • diff --git a/data/repo-training-data.json b/data/repo-training-data.json new file mode 100644 index 0000000..ded65c3 --- /dev/null +++ b/data/repo-training-data.json @@ -0,0 +1,402 @@ +{ + "metadata": { + "generatedAt": "2025-12-25T02:17:42.139Z", + "repository": "networkbuster.net", + "version": "1.0.0" + }, + "categories": { + "server": { + "description": "Server implementations and variants", + "count": 9, + "files": [ + "api\\server-optimized.js", + "api\\server-universal.js", + "api\\server.js", + "auth-ui\\v750\\server.js", + "server-audio.js", + "server-enhanced.js", + "server-optimized.js", + "server-universal.js", + "server.js" + ] + }, + "docs": { + "description": "Documentation files", + "count": 104, + "files": [ + ".azure\\DEPLOYMENT.md", + ".azure\\documentation\\00-index.md", + ".azure\\documentation\\01-executive-summary.md", + ".azure\\documentation\\02-hidden-tools.md", + ".azure\\documentation\\03-exposed-secrets.md", + ".azure\\documentation\\04-azure-infrastructure.md", + ".azure\\documentation\\05-cicd-pipelines.md", + ".azure\\documentation\\06-docker-config.md", + ".azure\\documentation\\07-git-hooks.md", + ".azure\\documentation\\08-api-server.md", + ".azure\\documentation\\09-frontend-apps.md", + ".azure\\documentation\\10-deployment-status.md", + ".azure\\documentation\\11-security-audit.md", + ".azure\\documentation\\12-quick-reference.md", + ".azure\\QUICKSTART.md", + ".azure\\README.md", + ".github\\README.md", + "AEROSPACE_GALAXY_NAVIGATION.md", + "AI_TRAINING_PIPELINE_SETUP.md", + "android\\antigravity\\README.md", + "AUDIO-STREAMING-GUIDE.md", + "auth-ui\\v750\\README.md", + "AZURE_STORAGE_READY_cleanskiier27.md", + "AZURE_STORAGE_SETUP_cleanskiier27.md", + "BIOS-OPTIMIZATION-GUIDE.md", + "BUDGET_AND_DETAILS.md", + "BUILD-REPORT.md", + "CHANGELOG.md", + "COMPLETION-ACKNOWLEDGMENT.md", + "contrib\\Cleanskiier27-final\\CONTRIBUTORS.md", + "contrib\\Cleanskiier27-final\\docs\\NETWORK-BOOST.md", + "contrib\\Cleanskiier27-final\\PR_NOTE.md", + "contrib\\Cleanskiier27-final\\README.md", + "CUSTOM-DOMAIN-SETUP.md", + "DATACENTRA-STATUS.md", + "DATA_STORAGE_AND_VISITOR_TRACKING.md", + "DEPENDENCIES.md", + "DEPLOYMENT-REFERENCE-CARD.md", + "DEPLOYMENT_DASHBOARD.md", + "DEPLOYMENT_READINESS_MANIFEST.md", + "DEVICE_REGISTRATION_GOAL.md", + "DEV_ENVIRONMENT.md", + "DISPLAY_FIXES_SUMMARY.md", + "DNS-A-RECORD-SETUP.md", + "DOCKER-TROUBLESHOOTING.md", + "docs\\AI_TRAINING_AND_DATA_PERSONALIZATION.md", + "docs\\environmental-data\\lunar-conditions.md", + "docs\\IMPLEMENTATION_GUIDE.md", + "docs\\KEEPALIVE.md", + "docs\\NETWORK-BOOST.md", + "docs\\operational-protocols\\standard-operation.md", + "docs\\README-DEVELOPER.md", + "docs\\RECYCLING-AI.md", + "docs\\research\\bibliography.md", + "docs\\STERILIZATION.md", + "docs\\STERILIZATION_CHECKLIST.md", + "docs\\technical-specs\\material-processing.md", + "docs\\technical-specs\\system-architecture.md", + "DOMAIN-CONFIGURATION-STATUS.md", + "DOMAIN-SETUP-SUMMARY.md", + "D_DRIVE_BACKUP_SUMMARY.md", + "FLASH-COMMANDS-GUIDE.md", + "GALAXY_INTEGRATION_GUIDE.md", + "GALAXY_NAVIGATION_COMPLETE.md", + "HYPERV-LINUX-SETUP.md", + "HYPERV-QUICK-START.md", + "IMMERSIVE_READER_INTEGRATION.md", + "IMPLEMENTATION-SUMMARY.md", + "KQL_ANALYTICS_QUERIES.md", + "luna-recycle\\README.md", + "MASTER_INDEX.md", + "MATERIALS.md", + "NETWORK_PROXY_GUIDE.md", + "NETWORK_PROXY_STATUS.md", + "OPTIMIZATION_COMPLETE.md", + "os\\lfs\\README.md", + "packages\\README.md", + "PHASE_12_COMPLETION_REPORT.md", + "PROJECT-SUMMARY.md", + "PROXY_SETUP_COMPLETE.md", + "PR_NOTE.md", + "PUBLIC-VISIBILITY.md", + "PUSH-DATACENTRA.md", + "README-ANNOUNCEMENT.md", + "README-DATACENTRA.md", + "README-SECURITY-TIMELINE.md", + "README.md", + "RELEASE-v1.0.1.md", + "scripts\\installer\\branding\\README.md", + "scripts\\README-nbapp.md", + "scripts\\README.md", + "SECURE_FILES_NOT_TRACKED.md", + "SECURITY-TIMELINE-SUMMARY.md", + "SERVER_STARTUP.md", + "SETUP_COMPLETE_STATUS.md", + "SOURCE_LOG_CLEANED.md", + "SSH_KEY_SETUP.md", + "SSH_SETUP_GUIDE.md", + "templates\\sterilization-form.md", + "TOOLS_INSTALLATION_SUMMARY.md", + "UNIVERSAL-CODE-IMPLEMENTATION.md", + "UNIVERSAL-SERVER-GUIDE.md", + "VERCEL-DOMAIN-SETUP-GUIDE.md", + "WORKSPACE_GUIDE.md" + ] + }, + "config": { + "description": "Configuration files", + "count": 44, + "files": [ + ".azure\\azure.yaml", + ".github\\cspell\\cspell.json", + ".github\\deployment.config.json", + ".github\\workflows\\ci.yml", + ".github\\workflows\\deploy-azure.yml", + ".github\\workflows\\deploy.yml", + ".github\\workflows\\integration-device-registration.yml", + ".github\\workflows\\lfs-build.yml", + ".github\\workflows\\lfs-cache-validate.yml", + ".github\\workflows\\network-boost-ci.yml", + ".github\\workflows\\push-datacentra.yml", + ".github\\workflows\\recycle-ai-demo.yml", + ".github\\workflows\\release.yml", + ".github\\workflows\\render-diagrams.yml", + ".github\\workflows\\smoke-e2e-openai.yml", + ".github\\workflows\\sterilization-docs.yml", + ".github\\workflows\\sync-branches.yml", + ".github\\workflows\\test-ai-robot.yml", + ".github\\workflows\\test-openai-secret.yml", + ".vscode\\launch.json", + ".vscode\\tasks.json", + "android\\antigravity\\.github\\workflows\\build-apk.yml", + "api\\package-lock.json", + "api\\package.json", + "api\\schema\\device-registration.json", + "api\\vercel.json", + "challengerepo\\real-time-overlay\\package-lock.json", + "challengerepo\\real-time-overlay\\package.json", + "dashboard\\package-lock.json", + "dashboard\\package.json", + "data\\system-specifications.json", + "dev-config.json", + "docker-compose-flash.yml", + "docker-compose.yml", + "infra\\parameters.json", + "instances\\test-auto-ms.json", + "package-lock.json", + "package.json", + "packages\\docker\\config.json", + "packages\\flatpak\\net.networkbuster.Server.json", + "packages\\snap\\snapcraft.yaml", + "packages\\winget\\NetworkBuster.NetworkBuster.yaml", + "reports\\networkbuster-firewall.json", + "vercel.json" + ] + }, + "powershell": { + "description": "PowerShell automation scripts", + "count": 56, + "files": [ + "ANDREW.ps1", + "android\\antigravity\\scripts\\build-and-install.ps1", + "boot-to-bios.ps1", + "build-all.ps1", + "cloud-storage-manager.ps1", + "configure-custom-domain.ps1", + "contrib\\Cleanskiier27-final\\scripts\\network-boost.ps1", + "deploy-ai-training.ps1", + "deploy-azure.ps1", + "deploy-docker-to-acr.ps1", + "deploy-galaxy-navigation.ps1", + "deploy-storage-azure.ps1", + "dev-server.ps1", + "os\\lfs\\validate-cache.ps1", + "packages\\chocolatey\\tools\\chocolateyInstall.ps1", + "packages\\chocolatey\\tools\\chocolateyUninstall.ps1", + "run-elevated.ps1", + "scripts\\apply-sterilization.ps1", + "scripts\\apply-to-upstream.ps1", + "scripts\\build-nsis.ps1", + "scripts\\compare-with-luna.ps1", + "scripts\\copy-to-drive.ps1", + "scripts\\create-shortcut.ps1", + "scripts\\detect-dotnet-projects.ps1", + "scripts\\generate-icons.ps1", + "scripts\\generate-project-index.ps1", + "scripts\\install-datacentra.ps1", + "scripts\\install-nbapp-service.ps1", + "scripts\\install-node-msi.ps1", + "scripts\\install-nvm.ps1", + "scripts\\install-service-nssm.ps1", + "scripts\\install-watchdog-task.ps1", + "scripts\\installer\\convert-icon.ps1", + "scripts\\network-boost.ps1", + "scripts\\render-local.ps1", + "scripts\\render-mermaid.ps1", + "scripts\\set-openai-key.ps1", + "scripts\\start-test-instance.ps1", + "scripts\\sync-drives.ps1", + "scripts\\test-ai-robot.ps1", + "scripts\\test-crash.ps1", + "scripts\\test-local-build.ps1", + "scripts\\test-recycle-api.ps1", + "scripts\\transform-ai-training.ps1", + "scripts\\transform-recycling-data.ps1", + "scripts\\update-materials-and-push.ps1", + "scripts\\update-wsl.ps1", + "scripts\\watchdog.ps1", + "setup-admin.ps1", + "setup-ssh-agent.ps1", + "start-all-services.ps1", + "start-local-dev.ps1", + "start-security-timeline.ps1", + "Start-Server.ps1", + "tools\\robot-analyzer.ps1", + "verify-admin.ps1" + ] + }, + "docker": { + "description": "Docker containerization configs", + "count": 5, + "files": [ + "auth-ui\\v750\\Dockerfile", + "challengerepo\\real-time-overlay\\Dockerfile", + "Dockerfile", + "Dockerfile.flash", + "os\\lfs\\Dockerfile" + ] + } + }, + "fileRelationships": [], + "patterns": { + "naming": [ + { + "style": "kebabCase", + "count": 213, + "examples": [ + "00-index.md", + "01-executive-summary.md", + "02-hidden-tools.md", + "03-exposed-secrets.md", + "04-azure-infrastructure.md" + ] + }, + { + "style": "camelCase", + "count": 20, + "examples": [ + "AndroidManifest.xml", + "MainActivity.kt", + "AvatarWorld.jsx", + "CameraFeed.jsx", + "ConnectionGraph.jsx" + ] + }, + { + "style": "uppercase", + "count": 58, + "examples": [ + "AZURE_DEVOPS_DASHBOARD.html", + "CONSOLIDATED_INDEX.html", + "DEPLOYMENT.md", + "DOCUMENTATION_PORTAL.html", + "QUICKSTART.md" + ] + }, + { + "style": "lowercaseWithDots", + "count": 7, + "examples": [ + "deployment.config.json", + "postcss.config.js", + "tailwind.config.js", + "vite.config.js", + "vite.config.js" + ] + } + ], + "structure": [], + "dependencies": [] + }, + "serverVariants": { + "files": [ + "server-audio.js", + "server-enhanced.js", + "server-optimized.js", + "server-universal.js", + "server.js" + ], + "purposes": { + "server-audio.js": { + "summary": "Audio streaming server", + "features": [ + "audio-streaming", + "media-handling" + ], + "ports": [], + "dependencies": [ + "express", + "path", + "url" + ] + }, + "server-enhanced.js": { + "summary": "Enhanced server with additional features", + "features": [], + "ports": [], + "dependencies": [ + "express", + "path", + "url", + "os" + ] + }, + "server-optimized.js": { + "summary": "Performance-optimized server", + "features": [ + "compression", + "caching" + ], + "ports": [], + "dependencies": [ + "express", + "path", + "url", + "os", + "compression", + "helmet" + ] + }, + "server-universal.js": { + "summary": "Universal server with all features", + "features": [ + "multi-purpose", + "comprehensive" + ], + "ports": [], + "dependencies": [ + "express", + "path", + "url", + "os" + ] + }, + "server.js": { + "summary": "Main production server", + "features": [ + "core", + "production" + ], + "ports": [], + "dependencies": [ + "express", + "path", + "url", + "os", + "./api/recycle.js", + "./api/devices.js" + ] + } + }, + "consolidationOpportunities": [ + { + "recommendation": "Consider using a single server.js with feature flags", + "variants": [ + "server-audio.js", + "server-enhanced.js", + "server-optimized.js", + "server-universal.js", + "server.js" + ], + "approach": "Use environment variables to enable/disable features" + } + ] + } +} \ No newline at end of file diff --git a/deploy-azure.ps1 b/deploy-azure.ps1 index f487060..eb6473b 100644 --- a/deploy-azure.ps1 +++ b/deploy-azure.ps1 @@ -4,8 +4,14 @@ param( [string]$ResourceGroup = "networkbuster-rg", [string]$Location = "eastus", - [string]$RegistryName = "networkbusterlo25gft5nqwzg" -) + [string]$RegistryName = "networkbusterlo25gft5nqwzg", + [string]$DotNetProject = '', # Path to dotnet project folder containing Dockerfile (optional) + [string]$DotNetImage = 'networkbuster-dotnet-server', + [string]$Domain = 'www.networkbuster.org', + [string]$BrandSuffix = ' โ€” AI style', + [switch]$MapDomain, # If supplied, attempt to map $Domain to resulting Container App (prints instructions if not possible) + [switch]$SetupServiceBus # If supplied, create Azure Service Bus namespace and topic for device registrations +) Write-Host "๐Ÿš€ NetworkBuster Azure Deployment" -ForegroundColor Cyan Write-Host "===================================" -ForegroundColor Cyan @@ -29,6 +35,39 @@ $registryUrl = $registry.loginServer Write-Host "โœ“ Registry: $registryUrl" -ForegroundColor Green Write-Host "" +# Setup Azure Service Bus if requested +if ($SetupServiceBus) { + Write-Host "๐ŸšŒ Setting up Azure Service Bus..." -ForegroundColor Yellow + $sbNamespace = "networkbuster-sb" + $topicName = "device-registrations.v1" + + # Check if namespace exists + $sbExists = az servicebus namespace show --resource-group $ResourceGroup --name $sbNamespace --query name -o tsv 2>$null + if (-not $sbExists) { + Write-Host "โœจ Creating Service Bus namespace '$sbNamespace'..." -ForegroundColor Yellow + az servicebus namespace create --resource-group $ResourceGroup --name $sbNamespace --location $Location --sku Standard + Write-Host "โœ“ Service Bus namespace created" -ForegroundColor Green + } else { + Write-Host "โœ“ Service Bus namespace '$sbNamespace' already exists" -ForegroundColor Green + } + + # Create topic + $topicExists = az servicebus topic show --resource-group $ResourceGroup --namespace-name $sbNamespace --name $topicName --query name -o tsv 2>$null + if (-not $topicExists) { + Write-Host "โœจ Creating topic '$topicName'..." -ForegroundColor Yellow + az servicebus topic create --resource-group $ResourceGroup --namespace-name $sbNamespace --name $topicName + Write-Host "โœ“ Topic created" -ForegroundColor Green + } else { + Write-Host "โœ“ Topic '$topicName' already exists" -ForegroundColor Green + } + + # Get connection string + $sbConnectionString = az servicebus namespace authorization-rule keys list --resource-group $ResourceGroup --namespace-name $sbNamespace --name RootManageSharedAccessKey --query primaryConnectionString -o tsv + Write-Host "โœ“ Service Bus connection string retrieved" -ForegroundColor Green + Write-Host " (Set as env var: AZURE_SERVICEBUS_CONNECTION_STRING)" -ForegroundColor Cyan + Write-Host "" +} + # Check Docker Write-Host "๐Ÿณ Checking Docker..." -ForegroundColor Yellow try { @@ -63,9 +102,45 @@ try { Write-Host "โœ“ Overlay UI image pushed" -ForegroundColor Green } + # Optional: build/push DotNet image if requested + if ($DotNetProject -and (Test-Path -Path $DotNetProject)) { + Write-Host ""; Write-Host "๐Ÿ”จ Building DotNet image from: $DotNetProject" -ForegroundColor Yellow + try { + az acr build --registry $RegistryName --image $DotNetImage:latest $DotNetProject + Write-Host "โœ“ DotNet image built and pushed to: $registryUrl/$DotNetImage:latest" -ForegroundColor Green + + # Deploy or update Container App for DotNet + $dotnetAppName = 'networkbuster-dotnet' + $exists = az containerapp show --name $dotnetAppName --resource-group $ResourceGroup --query name -o tsv 2>$null + if ($exists) { + Write-Host "๐Ÿ” Updating Container App '$dotnetAppName' with new image..." -ForegroundColor Yellow + az containerapp update --name $dotnetAppName --resource-group $ResourceGroup --image "$registryUrl/$DotNetImage:latest" + } else { + Write-Host "โœจ Creating Container App '$dotnetAppName' (ingress enabled) ..." -ForegroundColor Yellow + az containerapp create --name $dotnetAppName --resource-group $ResourceGroup --environment networkbuster-env --image "$registryUrl/$DotNetImage:latest" --ingress 'external' --target-port 80 + } + + $dotnetFqdn = az containerapp show --name $dotnetAppName --resource-group $ResourceGroup --query 'properties.configuration.ingress.fqdn' -o tsv + Write-Host "โœ“ DotNet Container App: $dotnetFqdn" -ForegroundColor Green + + if ($MapDomain) { + Write-Host ""; Write-Host "โš ๏ธ Attempting to map domain '$($Domain)$BrandSuffix' to $dotnetAppName" -ForegroundColor Yellow + Write-Host "Please ensure DNS: create a CNAME record 'www' pointing to: $dotnetFqdn" -ForegroundColor Cyan + Write-Host "After DNS propagates, follow Azure docs to bind the custom domain and issue a certificate for the Container App." -ForegroundColor Cyan + Write-Host "Docs: https://learn.microsoft.com/azure/container-apps/custom-domains" -ForegroundColor Cyan + } else { + Write-Host "To expose via custom domain: Create CNAME 'www' -> $dotnetFqdn and then bind certificate per Azure docs." -ForegroundColor Cyan + } + } catch { + Write-Warning "DotNet build or Container App update failed: $($_.Exception.Message)" + } + } else { + Write-Host "No DotNet project supplied or path not found; skipping DotNet build." -ForegroundColor Yellow + } + Write-Host "" Write-Host "โœ… Docker images built and pushed successfully" -ForegroundColor Green -} +} catch { Write-Host "โš ๏ธ Docker is not running or not installed" -ForegroundColor Yellow Write-Host "๐Ÿ“ Skip local Docker builds" -ForegroundColor Yellow @@ -78,6 +153,11 @@ Write-Host "============================" -ForegroundColor Cyan Write-Host "Resource Group: $ResourceGroup" Write-Host "Container Registry: $registryUrl" Write-Host "Location: $Location" +if ($SetupServiceBus) { + Write-Host "Service Bus Namespace: $sbNamespace" + Write-Host "Topic: $topicName" + Write-Host "Connection String: [hidden - set AZURE_SERVICEBUS_CONNECTION_STRING]" +} Write-Host "" Write-Host "โœ… Base infrastructure is ready for deployment!" -ForegroundColor Green Write-Host "" @@ -86,4 +166,8 @@ Write-Host "1. Build and push Docker images (or use the script with Docker runni Write-Host "2. Update Container Apps with the new images using:" Write-Host " az containerapp create --name networkbuster-server ..." Write-Host " az containerapp create --name networkbuster-overlay ..." +if ($SetupServiceBus) { + Write-Host "3. Set environment variable: `$env:AZURE_SERVICEBUS_CONNECTION_STRING = '$sbConnectionString'" + Write-Host "4. Deploy consumer worker as Container App or Function App" +} Write-Host "" diff --git a/deploy-azure.sh b/deploy-azure.sh index 739a1c5..f2e1d2f 100644 --- a/deploy-azure.sh +++ b/deploy-azure.sh @@ -14,6 +14,7 @@ echo "==================================" RESOURCE_GROUP="networkbuster-rg" REGISTRY_NAME=$(az deployment group show --resource-group $RESOURCE_GROUP --name main --query 'properties.outputs.containerRegistryLoginServer.value' -o tsv | cut -d'.' -f1) REGISTRY_URL=$(az deployment group show --resource-group $RESOURCE_GROUP --name main --query 'properties.outputs.containerRegistryLoginServer.value' -o tsv) +BRAND_SUFFIX=" - AI style" echo -e "${GREEN}โœ“ Resource Group: $RESOURCE_GROUP${NC}" echo -e "${GREEN}โœ“ Registry: $REGISTRY_URL${NC}" @@ -30,6 +31,32 @@ az acr build --registry $REGISTRY_NAME --image networkbuster-server:latest --ima echo -e "${YELLOW}๐Ÿ”จ Building Overlay UI image...${NC}" az acr build --registry $REGISTRY_NAME --image networkbuster-overlay:latest --image networkbuster-overlay:$(git rev-parse --short HEAD) challengerepo/real-time-overlay +# Optional: Build & push a .NET image if DOTNET_PROJECT is provided +if [ -n "$DOTNET_PROJECT" ]; then + echo -e "${YELLOW}๐Ÿ”จ Building DotNet image from $DOTNET_PROJECT...${NC}" + az acr build --registry $REGISTRY_NAME --image ${DOTNET_IMAGE:-networkbuster-dotnet-server}:latest $DOTNET_PROJECT + echo -e "${GREEN}โœ“ DotNet image built and pushed: $REGISTRY_URL/${DOTNET_IMAGE:-networkbuster-dotnet-server}:latest${NC}" + + # Deploy or update dotnet container app + DOTNET_APP_NAME="networkbuster-dotnet" + if az containerapp show --name $DOTNET_APP_NAME --resource-group $RESOURCE_GROUP > /dev/null 2>&1; then + echo -e "${YELLOW}๐Ÿ” Updating Container App '$DOTNET_APP_NAME' with new image...${NC}" + az containerapp update --name $DOTNET_APP_NAME --resource-group $RESOURCE_GROUP --image $REGISTRY_URL/${DOTNET_IMAGE:-networkbuster-dotnet-server}:latest + else + echo -e "${YELLOW}โœจ Creating Container App '$DOTNET_APP_NAME' (ingress enabled)...${NC}" + az containerapp create --name $DOTNET_APP_NAME --resource-group $RESOURCE_GROUP --environment networkbuster-env --image $REGISTRY_URL/${DOTNET_IMAGE:-networkbuster-dotnet-server}:latest --ingress 'external' --target-port 80 + fi + + DOTNET_FQDN=$(az containerapp show --name $DOTNET_APP_NAME --resource-group $RESOURCE_GROUP --query 'properties.configuration.ingress.fqdn' -o tsv) + echo -e "${GREEN}โœ“ DotNet Container App: $DOTNET_FQDN${NC}" + if [ -n "$MAP_DOMAIN" ] && [ -n "$DOMAIN" ]; then + echo -e "\n${YELLOW}โš ๏ธ DNS NOTE: Create a CNAME record 'www' -> $DOTNET_FQDN (or your desired host) and then follow Azure Container Apps docs to bind a custom domain and certificate.${NC}" + echo "Docs: https://learn.microsoft.com/azure/container-apps/custom-domains" + else + echo -e "\nCreate CNAME 'www' -> $DOTNET_FQDN to expose the dotnet app via your DNS provider." + fi +fi + # Update Container Apps echo -e "${YELLOW}๐Ÿš€ Updating Container Apps...${NC}" az containerapp update \ @@ -48,3 +75,5 @@ echo "" echo -e "${YELLOW}๐Ÿ“Š Deployment URLs:${NC}" echo "Main Server: $(az containerapp show --name networkbuster-server --resource-group $RESOURCE_GROUP --query 'properties.configuration.ingress.fqdn' -o tsv)" echo "Overlay UI: $(az containerapp show --name networkbuster-overlay --resource-group $RESOURCE_GROUP --query 'properties.configuration.ingress.fqdn' -o tsv)" +echo "" +echo "Site: https://networkbuster.org$BRAND_SUFFIX" diff --git a/docs/KEEPALIVE.md b/docs/KEEPALIVE.md new file mode 100644 index 0000000..172efdd --- /dev/null +++ b/docs/KEEPALIVE.md @@ -0,0 +1,26 @@ +# Plan: Install as Windows Service (NSSM) โ€” Steps & Considerations + +Goal: run NetworkBuster as a native Windows service with automatic restart and robust management. + +Steps (high level): +1. Download NSSM (https://nssm.cc) and place `nssm.exe` in a fixed path (e.g., `C:\tools\nssm\nssm.exe`). +3. Install service (example using NSSM): + - Install script included: `scripts/install-service-nssm.ps1` (run as Administrator) + - Example manual command: `nssm install NetworkBuster "powershell.exe" "-NoProfile -ExecutionPolicy Bypass -File \"S:\NetworkBuster_Production\scripts\watchdog.ps1\" -AppExe \"C:\Program Files\nodejs\node.exe\" -AppArgs \"start-servers.js\" -WorkingDir \"S:\NetworkBuster_Production\" -LogDir \"S:\NetworkBuster_Production\logs\" -HealthUrl \"http://localhost:3001/api/health\"` + - Configure Startup directory: `S:\NetworkBuster_Production` +3. Configure service settings via NSSM (or nssm set commands): + - Log output to `S:\NetworkBuster_Production\logs\service.out.log` and `service.err.log`. + - On exit, set Restart on unexpected exit, with a delay (e.g., 5 seconds) and no back-off limit. +4. Set Windows Recovery options (Services.msc > Recovery): First failure: Restart Service; Second failure: Restart; Subsequent: Run Program/Restart. +5. Configure service to run under a dedicated service account if the app needs network or drive access to mapped volumes (create `NetworkBusterSvc` with least privileges and grant necessary file permissions to S:). +6. Health checking: configure a secondary small monitor process or use NSSM's stdout health hooks; consider a scheduled monitor script to verify `/api/health` and restart service via `nssm restart` if necessary. + +Notes & safety: +- Installing a service requires admin privileges. +- If you prefer an installed, signed service, consider building a real Windows Service wrapper (.NET worker) which provides tighter integration and telemetry. + +Rollback: +- nssm remove NetworkBuster confirm +- Remove service account permissions + +I can implement this plan once you confirm and grant admin for install steps (or I can provide a script you run as admin). \ No newline at end of file diff --git a/docs/NETWORK-BOOST.md b/docs/NETWORK-BOOST.md new file mode 100644 index 0000000..8e981e7 --- /dev/null +++ b/docs/NETWORK-BOOST.md @@ -0,0 +1,22 @@ +# Network Boost โ€” Overview + +This document describes the optional "Network Boost" tuning available during installation or as a manual step. + +What it does (safe, recommended changes) +- Windows (via `netsh`): adjusts TCP autotuning, congestion provider (CTCP if available), RSS, and ECN settings (non-destructive; reversible). +- Linux (via `sysctl`): increases socket buffers, enables window scaling, optionally chooses congestion control if available (e.g., BBR). + +How it's applied +- During installation you can opt in by checking "Apply Network Boost" on the installer page. The installer runs a bundled script `scripts/network-boost.ps1` with the `-Apply` flag. +- Manually via npm script: + - Show recommended changes (dry-run): `npm run show:network-boost` + - Apply non-interactively: `npm run apply:network-boost` + +Reversion and safety +- The script records current settings in `scripts/network-boost.log` and creates a `scripts/network-boost-restore.ps1` (Windows/Linux) to restore previous settings. +- The script will prompt for confirmation unless run with `-Confirm:$false`. + +Notes +- Changes requiring admin/root will fail without proper privileges. +- Reboot may be required for some Windows settings to take effect. +- Always test in a controlled environment before applying to production servers. \ No newline at end of file diff --git a/docs/README-DEVELOPER.md b/docs/README-DEVELOPER.md new file mode 100644 index 0000000..6225c29 --- /dev/null +++ b/docs/README-DEVELOPER.md @@ -0,0 +1,123 @@ + # Developer Reverse-Engineering Guide ๐Ÿ” + + This document helps developers understand the architecture and code flow of the project so you can inspect, extend, debug, and test the app. + + ## Quick overview + - Project: NetworkBuster (server, web UI, scripts, tooling, AI features) + - Primary runtime: Node.js (server.js) + - Key languages: JavaScript (ESM), PowerShell for automation & Windows service helpers + - New feature: AI-powered recycling assistant (see `api/recycle.js`, `lib/aiClient.js`) + + ## High-level architecture + + - `server.js` โ€” Express-based HTTP server and entry point. Exposes `/api/*` endpoints and serves `web-app/` static files. + - `api/` โ€” server-side routers. `api/recycle.js` contains recycling endpoints (recommend, feedback). + - `lib/` โ€” helpers and domain logic. `lib/aiClient.js` wraps LLM calls with heuristic fallback; `lib/profileStore.js` manages local JSON profiles and feedback storage. + - `web-app/` โ€” static frontend pages and simple client JS (`recycle.html`, `recycle.js`). + - `scripts/` โ€” admin & operational helpers (install-service-nssm.ps1, watchdog.ps1, installer helpers, transforms, tests). + - `data/` โ€” runtime data: `profiles/` and `feedback/` (JSON files). Do not leak these to external services without consent. + + ## How requests flow (AI feature example) + + 1. Client (UI or API consumer) POSTs to `/api/recycle/recommend` with items + optional userId. + 2. `api/recycle.js` loads the user profile (if present) from `lib/profileStore.js` and merges preferences. + 3. `api/recycle.js` calls `lib/aiClient.getRecommendations(items, context, prefs)`. + 4. `lib/aiClient` attempts an LLM call (OpenAI) if `OPENAI_API_KEY` is set; it expects JSON-like output and falls back to deterministic heuristics if the model fails or no key is present. + 5. The API responds with `recommendations` and `source` (e.g., `llm`, `heuristic`, or `fallback`). Feedback endpoints write JSON to `data/feedback/` for later review & training. + + ## Key files to inspect + - `server.js` โ€” start here to see middleware, endpoints registration, and static file serving. + - `api/recycle.js` โ€” API contract and request handling for recycling features. + - `lib/aiClient.js` โ€” where LLM calls are made and heuristics implemented. Add provider adapters here. + - `lib/profileStore.js` โ€” simple filesystem-based profile & feedback storage. + - `scripts/install-service-nssm.ps1` โ€” see how the service is registered and NSSM is managed on Windows. + + ## Running locally (dev) + + 1. Ensure Node (24.x) is available or use the repo-local portable node (`tools/node` when present). + 2. Install dependencies in the repo root: + + ```powershell + npm install --no-audit --no-fund + ``` + + 3. Start the server (dev): + + ```powershell + node server.js + # server listens on port 3001 by default (see env PORT) + ``` + + 4. Open the recycling UI: `http://localhost:3001/recycle.html` (or the server root if configured to serve). + + 5. Quick test using PowerShell helper: + + ```powershell + .\scripts\test-recycle-api.ps1 + ``` + + ## Debugging tips + + - Check server logs (console output) and `/logs` for watchdog/service logs. + - If LLM output is unexpected, inspect the `raw` field returned from `lib/aiClient.getRecommendations` for model text. + - Use `scripts/set-openai-key.ps1` to set `OPENAI_API_KEY` for testing with OpenAI. + + ## How to extend the AI integration + + 1. Add provider adapters to `lib/aiClient.js` (wrap calls and normalize responses). + 2. If you add a retriever/knowledge base, add a new module like `lib/retriever.js` and call it from `aiClient` prior to prompt construction. + 3. Store curated feedback JSON lines in `data/feedback/` and add a transform script to convert them into training JSONL (`scripts/transform-recycling-data.ps1` is an example). + + ## Testing & CI + + - A demo workflow `/.github/workflows/recycle-ai-demo.yml` shows how to run a simple demo using `OPENAI_API_KEY` from repo secrets. + - Add unit tests for `lib/aiClient.js` and `api/recycle.js` to validate heuristics and API contract. + + ## Security & privacy notes + + - Profiles/feedback should be handled with consent. Avoid sending PII to LLMs unless consented and masked. + - Keep secrets out of source control: use `.env` (gitignored) for local dev and GitHub Secrets for CI. + + ## Rollback & investigation checklist + + - To inspect the Windows service: `Get-Service -Name NetworkBuster` and `sc.exe qc NetworkBuster`. + - To stop/remove the NSSM service (if installed): `nssm stop NetworkBuster` and `nssm remove NetworkBuster confirm` (run elevated). + - Logs: `S:\NetworkBuster_Production\logs` or repo `logs` folder for local runs. + + --- + +## Diagrams + +Sequence diagrams and additional diagrams are in `docs/diagrams/`: + +- `recycle-sequence.mmd` โ€” request flow for `/api/recycle/recommend` (LLM + fallback) + + ![Recycle sequence](./diagrams/recycle-sequence.svg) + +- `feedback-sequence.mmd` โ€” UI feedback flow into `data/feedback` + + ![Feedback sequence](./diagrams/feedback-sequence.svg) + +- `watchdog-sequence.mmd` โ€” watchdog health check and restart loop + + ![Watchdog sequence](./diagrams/watchdog-sequence.svg) + +- `class-ai-profile.mmd` โ€” class diagram of AIClient/ProfileStore/RecycleAPI + + ![Class diagram](./diagrams/class-ai-profile.svg) + +- `data-pipeline.mmd` โ€” data transform -> JSONL -> training -> deployed model + + ![Data pipeline](./diagrams/data-pipeline.svg) + +- `component-overview.mmd` โ€” component diagram overview of server, UI, and ops + + ![Component overview](./diagrams/component-overview.svg) + +You can also render the `.mmd` files to SVG locally using the helper script (if you prefer to generate them yourself): + +```powershell +.\scripts\render-mermaid.ps1 +``` + +If you'd like, I can also generate a class diagram or extra sequence diagrams, add sample unit tests, or create a short screencast demo. What would you like next? ๐Ÿš€ diff --git a/docs/RECYCLING-AI.md b/docs/RECYCLING-AI.md new file mode 100644 index 0000000..8d47262 --- /dev/null +++ b/docs/RECYCLING-AI.md @@ -0,0 +1,19 @@ +# Recycling AI โ€” design & notes + +This document describes the AI-powered recycling recommendation feature (MVP-level). + +Features +- POST /api/recycle/recommend โ€” accepts items and returns per-item recommendations. +- POST /api/recycle/feedback โ€” accepts feedback to store for later model tuning. + +Privacy +- Profiles are opt-in and stored as JSON in `data/profiles/`. +- Feedback is stored in `data/feedback/` and should be purged or aggregated before any external uploads. + +LLM Integration +- Controlled by `OPENAI_API_KEY` env var. If absent, the system falls back to deterministic heuristics. +- Responses from the model are parsed for JSON; if parsing fails, we fall back to heuristics. + +Next steps +- Add a small retriever or local knowledge base for municipality-specific rules. +- Implement fine-tuning / prompt engineering pipeline using the feedback dataset. diff --git a/docs/STERILIZATION.md b/docs/STERILIZATION.md new file mode 100644 index 0000000..3235145 --- /dev/null +++ b/docs/STERILIZATION.md @@ -0,0 +1,37 @@ +# Sterilization & Decontamination (Aerospace Instruments) + +**Context:** A common real-world scene is discovering a dirty, weathered car with someone sheltering inside. Before approaching or cleaning anything, prioritize the safety and dignity of any occupant โ€” do **not** disturb them without consent. Contact local outreach or social services for support and obtain explicit permission before handling personal effects or equipment. If an instrument is suspected to be contaminated (biohazard or chemical), defer to institutional biosafety/OSH procedures and qualified personnel. + +> **Important safety note:** Aerospace instruments are precision devices. Always consult the manufacturer's maintenance and contamination control guidance before cleaning. When in doubt, move the instrument to a controlled environment (lab/cleanroom) for decontamination and functional verification. + +## Quick scene assessment (before any cleaning) +1. Ensure the area is safe and stable. If someone is present, introduce yourself, explain intent, and ask permission to inspect the item. +2. If any hazardous materials, bodily fluids, or biological contamination is suspected, **stop** and contact the appropriate safety officer or public health authority. +3. Document the instrument (photos, serial, model) before touching it. +4. If feasible, transfer the instrument to a clean, ventilated area or lab for controlled work. + +## Step-by-step sterilization / decontamination (general safe procedure) +1. Prepare PPE and supplies: nitrile gloves, N95 respirator or PAPR, safety goggles/face shield, disposable gowns or coveralls, shoe covers, and waste bags. +2. Pre-clean (mechanical): remove gross debris, dust, and foreign matter with soft brushes and lint-free wipes. Avoid forcing debris into vents or connectors. +3. Isolation: power down the instrument and remove batteries/power sources when safe. Shield or mask optical/sensor surfaces. +4. Surface disinfection (electronics-safe): use lint-free wipes moistened with 70% isopropyl alcohol (IPA) and gently wipe external enclosures and non-porous surfaces. For sensitive optics, follow manufacturer-approved optical cleaning fluids and procedures. +5. Crevices and connectors: use sterile swabs lightly moistened with IPA to clean crevices and connector shells โ€” **do not** introduce liquids into connectors or open electronics. +6. HEPA-filtered local ventilation: when possible, perform cleaning with a local HEPA air cleaner to reduce airborne particulates. +7. UV-C (optional supplement): UV-C may reduce microbial load on exposed surfaces โ€” use only as a supplement and follow strict safety protocols (avoid human exposure, follow irradiance/time guidance). UV-C is not a substitute for mechanical cleaning and does not penetrate materials. +8. Disposal: place used wipes, gowns, and contaminated consumables in sealed biohazard or waste bags as required; label and dispose according to local regulations. +9. Drying & curing: allow surfaces to dry fully (follow chemical dwell times); do not re-power until alcohol is fully evaporated. +10. Functional check: after cleaning and reassembly, perform operational and calibration checks in a controlled environment; document results and any deviations. + +## When to escalate +- Any suspected biological contamination (blood, bodily fluids): stop and escalate to biosafety/public health. +- Instrument shows abnormal behavior after cleaning: stop use and contact manufacturer or qualified technician. + +--- + +## Empathy & ethics +- Respect the belongings and privacy of people experiencing homelessness. If you find instruments or items that may belong to someone sheltering in a vehicle, ask for consent and offer resources instead of immediately discarding personal items. + +## References & best practices +- Follow manufacturer maintenance manuals +- Institutional biosafety & occupational safety guidance +- Cleanroom and contamination control best practices diff --git a/docs/STERILIZATION_CHECKLIST.md b/docs/STERILIZATION_CHECKLIST.md new file mode 100644 index 0000000..86f23a4 --- /dev/null +++ b/docs/STERILIZATION_CHECKLIST.md @@ -0,0 +1,48 @@ +# Sterilization Checklist (Printable) + +Use this checklist when preparing and performing sterilization/decontamination of aerospace instruments. Follow institutional biosafety and manufacturer guidance. + +Date: ____________________ +Technician: _______________ +Instrument: ______________ +Serial / ID: ______________ +Location: ________________ + +--- + +Pre-clean and safety +- [ ] Confirm consent from owner/occupant or authority +- [ ] Document instrument with photos and serial +- [ ] Don required PPE (gloves, respirator, eye protection, gown) +- [ ] Ensure local ventilation / HEPA available +- [ ] Power down instrument and remove power sources + +Mechanical cleaning +- [ ] Remove gross debris with soft brush +- [ ] Wipe surfaces with dry lint-free wipes +- [ ] Use swabs for crevices (do not allow fluids into connectors) + +Disinfection +- [ ] Apply 70% IPA to lint-free wipe and disinfect surfaces +- [ ] Use manufacturer-approved optical cleaner for optics +- [ ] Allow appropriate dwell time and dry fully + +Supplemental measures +- [ ] Run UV-C treatment (if used) โ€” record lamp model and exposure time +- [ ] HEPA filtration operational during procedure + +Post-clean checks +- [ ] Reinstall batteries/power and perform basic power-on tests +- [ ] Run calibration / functional checks per manufacturer +- [ ] Document results and any anomalies +- [ ] Seal and label waste, dispose per regulations + +Sign-off +- Technician signature: ______________________ Date: ___________ +- Supervisor sign-off (if required): _______________ + +--- + +Notes: +- If any suspected biohazard or chemical contamination is found, **stop** and contact biosafety/public health. +- Never use liquids inside open electronics or connectors; when in doubt, escalate to a qualified technician. diff --git a/docs/diagrams/class-ai-profile.mmd b/docs/diagrams/class-ai-profile.mmd new file mode 100644 index 0000000..3b3b88f --- /dev/null +++ b/docs/diagrams/class-ai-profile.mmd @@ -0,0 +1,20 @@ +classDiagram + class AIClient { + +getRecommendations(items, context, prefs) + -callOpenAI(prompt) + -heuristicRecommendations(items) + } + + class ProfileStore { + +getProfile(userId) + +saveProfile(userId, profile) + +appendFeedback(feedback) + } + + class RecycleAPI { + +POST /api/recycle/recommend + +POST /api/recycle/feedback + } + + AIClient --> RecycleAPI : used by + RecycleAPI --> ProfileStore : reads/writes diff --git a/docs/diagrams/class-ai-profile.svg b/docs/diagrams/class-ai-profile.svg new file mode 100644 index 0000000..78c21c7 --- /dev/null +++ b/docs/diagrams/class-ai-profile.svg @@ -0,0 +1,25 @@ + + + + + AIClient + + getRecommendations(items, context, prefs) + - callOpenAI(prompt) + - heuristicRecommendations(items) + + + ProfileStore + + getProfile(userId) + + saveProfile(userId, profile) + + appendFeedback(feedback) + + + RecycleAPI + + POST /recommend + + POST /feedback + + + + used by + + diff --git a/docs/diagrams/component-overview.mmd b/docs/diagrams/component-overview.mmd new file mode 100644 index 0000000..0837071 --- /dev/null +++ b/docs/diagrams/component-overview.mmd @@ -0,0 +1,17 @@ +graph LR + subgraph Server + API[/API Server\n(server.js)] + AI[AI Client] + PS[Profile Store] + end + UI --> API + API --> AI + API --> PS + AI -->|calls| LLM((LLM Provider)) + PS --> DB[(data/ profiles & feedback files)] + subgraph Ops + WD[Watchdog] + NSSM[NSSM Service] + end + WD --> API + NSSM --> WD diff --git a/docs/diagrams/component-overview.svg b/docs/diagrams/component-overview.svg new file mode 100644 index 0000000..ef579ac --- /dev/null +++ b/docs/diagrams/component-overview.svg @@ -0,0 +1,21 @@ + + + + + Server + API Server (server.js) + AI Client (lib/aiClient.js) + Profile Store (lib/profileStore.js) + + + Web UI + + + Ops + Watchdog + NSSM (optional) + + + serves + + diff --git a/docs/diagrams/data-pipeline.mmd b/docs/diagrams/data-pipeline.mmd new file mode 100644 index 0000000..3450c2b --- /dev/null +++ b/docs/diagrams/data-pipeline.mmd @@ -0,0 +1,7 @@ +flowchart LR + A[Raw Data (csv/md)] --> B[transform-recycling-data.ps1] + B --> C[data/recycling.jsonl] + C --> D[Fine-tune / prompt examples] + D --> E[Model artifact] + E --> F[Deployed AI Client] + F -->|used by| G[/api/recycle/recommend] diff --git a/docs/diagrams/data-pipeline.svg b/docs/diagrams/data-pipeline.svg new file mode 100644 index 0000000..01208c3 --- /dev/null +++ b/docs/diagrams/data-pipeline.svg @@ -0,0 +1,23 @@ + + + + + Raw data + + + transform-recycling-data.ps1 + + + data/recycling.jsonl + + + Training / Fine-tune + + + + + + + + Artifacts: JSONL -> model -> deployed AI client used by /api/recycle/recommend + diff --git a/docs/diagrams/emoji-stack.mmd b/docs/diagrams/emoji-stack.mmd new file mode 100644 index 0000000..2a19252 --- /dev/null +++ b/docs/diagrams/emoji-stack.mmd @@ -0,0 +1,15 @@ +%% Emoji stack diagram +flowchart TB + A["โ™ป๏ธ"] + B["๐Ÿฅค"] + C["๐Ÿงด"] + D["๐Ÿ“ฆ"] + + A --> B + B --> C + C --> D + + style A fill:#f0fff4,stroke:#0b6623,stroke-width:2px + style B fill:#fff7e6,stroke:#d97706,stroke-width:2px + style C fill:#eff6ff,stroke:#1e40af,stroke-width:2px + style D fill:#fef2f2,stroke:#7f1d1d,stroke-width:2px diff --git a/docs/diagrams/emoji-stack.svg b/docs/diagrams/emoji-stack.svg new file mode 100644 index 0000000..9efe46c --- /dev/null +++ b/docs/diagrams/emoji-stack.svg @@ -0,0 +1,13 @@ + + + + + โ™ป๏ธ + ๐Ÿฅค + ๐Ÿงด + ๐Ÿ“ฆ + Emoji stack (render) โ€” visual quick-reference for recycle items + diff --git a/docs/diagrams/feedback-sequence.mmd b/docs/diagrams/feedback-sequence.mmd new file mode 100644 index 0000000..ed4ba2b --- /dev/null +++ b/docs/diagrams/feedback-sequence.mmd @@ -0,0 +1,11 @@ +sequenceDiagram + participant UI as Web UI + participant Server as API Server + participant Store as Feedback Store + participant Analyst as Data Analyst + + UI->>Server: POST /api/recycle/feedback { userId, item, action, rating } + Server->>Store: appendFeedback(feedback) + Store-->>Server: file path + Server-->>UI: { ok: true } + Note over Store,Analyst: Feedback files aggregate in data/feedback for later review/training diff --git a/docs/diagrams/feedback-sequence.svg b/docs/diagrams/feedback-sequence.svg new file mode 100644 index 0000000..0af20ad --- /dev/null +++ b/docs/diagrams/feedback-sequence.svg @@ -0,0 +1,21 @@ + + + + + Web UI + + + API Server + + + Feedback Store + + + POST /api/recycle/feedback + + appendFeedback + + + Note: Feedback stored in data/feedback for later review and model training + + diff --git a/docs/diagrams/openai-secret-flow.mmd b/docs/diagrams/openai-secret-flow.mmd new file mode 100644 index 0000000..fd30b82 --- /dev/null +++ b/docs/diagrams/openai-secret-flow.mmd @@ -0,0 +1,12 @@ +sequenceDiagram + participant Dev as Developer + participant GH as GitHub + participant CI as GitHub Actions + participant App as NetworkBuster App + + Dev->>GH: Add OPENAI_API_KEY secret (via UI or CLI) + GH->>CI: Secrets available to workflows + CI->>CI: Validate secret (models endpoint) + CI->>App: Start server and run E2E smoke test (/api/recycle/recommend) + App-->>CI: Return recommendations (ok: true) + CI-->>Dev: Report result (success/failure) diff --git a/docs/diagrams/recycle-sequence.mmd b/docs/diagrams/recycle-sequence.mmd new file mode 100644 index 0000000..b46e6d6 --- /dev/null +++ b/docs/diagrams/recycle-sequence.mmd @@ -0,0 +1,19 @@ +sequenceDiagram + participant UI as Web UI + participant Server as API Server + participant Profile as Profile Store + participant AI as AI Client + participant LLM as LLM Provider + + UI->>Server: POST /api/recycle/recommend { items, userId? } + Server->>Profile: loadProfile(userId) + Profile-->>Server: profile (prefs) + Server->>AI: getRecommendations(items, context, prefs) + AI->>LLM: prompt + context (if OPENAI_API_KEY) + alt LLM returns JSON + LLM-->>AI: JSON recommendations + AI-->>Server: recommendations (source: llm) + else LLM fails or no key + AI-->>Server: heuristic recommendations (source: heuristic) + end + Server-->>UI: { recommendations, source } diff --git a/docs/diagrams/recycle-sequence.svg b/docs/diagrams/recycle-sequence.svg new file mode 100644 index 0000000..f4f7672 --- /dev/null +++ b/docs/diagrams/recycle-sequence.svg @@ -0,0 +1,53 @@ + + + + + + + + + + + + Web UI + + + API Server + + + Profile Store + + + AI Client + + + LLM Provider + + + + POST /api/recycle/recommend + + + loadProfile(userId) + + + getRecommendations() + + + prompt + context + + + JSON response or error + + + recommendations + + + return to API + + + response โ†’ UI + + + Note: If no LLM key or model fails, AI Client falls back to deterministic heuristics. + diff --git a/docs/diagrams/service-install-sequence.mmd b/docs/diagrams/service-install-sequence.mmd new file mode 100644 index 0000000..1968d97 --- /dev/null +++ b/docs/diagrams/service-install-sequence.mmd @@ -0,0 +1,15 @@ +sequenceDiagram + participant Admin as Admin + participant Installer as install-nbapp-service.ps1 + participant Git as GitHub Repo + participant NSSM as NSSM Installer + participant Windows as Windows Service Manager + + Admin->>Installer: run install-nbapp-service.ps1 -InstallService + Installer->>Git: clone/pull repo (nbapp) + Installer->>Installer: npm install (if package.json) + Installer->>NSSM: call install-service-nssm.ps1 (elevate) + NSSM->>Windows: create service 'nbapp' + Windows-->>NSSM: service registered + NSSM-->>Installer: success + Installer-->>Admin: installation complete, service started diff --git a/docs/diagrams/service-install-sequence.svg b/docs/diagrams/service-install-sequence.svg new file mode 100644 index 0000000..8dcd62c --- /dev/null +++ b/docs/diagrams/service-install-sequence.svg @@ -0,0 +1,42 @@ + + + + + + + + + + + Admin + + + install-nbapp-service.ps1 + + + GitHub Repo + + + NSSM + + + Windows Service + + + run installer + + + clone / pull + + + npm install + + + call NSSM installer + + + service registered + + + Result: service started and nbapp installed (if UAC accepted) + diff --git a/docs/diagrams/watchdog-sequence.mmd b/docs/diagrams/watchdog-sequence.mmd new file mode 100644 index 0000000..1bd78e2 --- /dev/null +++ b/docs/diagrams/watchdog-sequence.mmd @@ -0,0 +1,17 @@ +sequenceDiagram + participant Watchdog + participant App as Application + participant Health as /api/health + participant Log as Logs + + Watchdog->>App: Start process + App-->>Watchdog: PID + loop monitor + Watchdog->>Health: GET /api/health + alt healthy + Health-->>Watchdog: 200 OK + else unhealthy + Watchdog->>App: Kill/Restart + App-->>Log: crash info + end + end diff --git a/docs/diagrams/watchdog-sequence.svg b/docs/diagrams/watchdog-sequence.svg new file mode 100644 index 0000000..f6fb56c --- /dev/null +++ b/docs/diagrams/watchdog-sequence.svg @@ -0,0 +1,22 @@ + + + + + Watchdog + + + Application + + + /api/health + + + start process + + + GET /api/health + + + If unhealthy: kill & restart + + diff --git a/drone_flight_system.py b/drone_flight_system.py new file mode 100644 index 0000000..8b5cb51 --- /dev/null +++ b/drone_flight_system.py @@ -0,0 +1,217 @@ +import time +import math +import random +import threading +import sys +from datetime import datetime +from pathlib import Path + +# Import security verification +try: + from security_verification import UserVerification, SecurityLevel + SECURITY_AVAILABLE = True +except ImportError: + SECURITY_AVAILABLE = False + print("โš ๏ธ WARNING: Security module not available. Running in unsecured mode.") + +class DroneState: + def __init__(self, drone_id): + self.id = drone_id + self.position = {"x": 0.0, "y": 0.0, "z": 0.0} + self.velocity = {"x": 0.0, "y": 0.0, "z": 0.0} + self.battery = 100.0 + self.status = "IDLE" + self.integrity = 100.0 + self.sensors_active = False + +class ScanAlgorithms: + """ + Advanced algorithms for automated drone patterns and matter detection. + """ + + @staticmethod + def generate_spiral_search(center_x, center_y, max_radius, spacing=5.0): + """Generates a spiral flight path for area coverage.""" + path = [] + theta = 0 + r = 0 + while r < max_radius: + x = center_x + r * math.cos(theta) + y = center_y + r * math.sin(theta) + path.append({"x": x, "y": y, "z": 15.0}) # Default scan altitude + theta += 0.5 # Angle increment + r = (spacing * theta) / (2 * math.pi) + return path + + @staticmethod + def generate_grid_raster(width, height, altitude=20.0, density=10.0): + """Generates a lawnmower/raster pattern for detailed mapping.""" + path = [] + rows = int(height / density) + cols = int(width / density) + + for r in range(rows): + y = r * density + if r % 2 == 0: + # Left to Right + for c in range(cols): + path.append({"x": c * density, "y": y, "z": altitude}) + else: + # Right to Left + for c in range(cols - 1, -1, -1): + path.append({"x": c * density, "y": y, "z": altitude}) + return path + + @staticmethod + def analyze_matter_signature(sensor_data): + """ + Simulates real-time analysis of sensor data to identify matter composition. + Returns a confidence score and material type. + """ + # Simulated spectral analysis logic + signatures = { + "SILICA": (0.8, 0.9), + "FERROUS": (0.4, 0.6), + "ORGANIC": (0.1, 0.3), + "UNKNOWN": (0.0, 1.0) + } + + reading = sum(sensor_data) / len(sensor_data) if sensor_data else 0 + + for material, (low, high) in signatures.items(): + if low <= reading <= high: + return material, reading * 100 + return "ANOMALY", 99.9 + +class UnbreakableAutopilot: + """ + Self-healing, redundant control software for high-reliability flight. + """ + def __init__(self, drone_state): + self.drone = drone_state + self.lock = threading.Lock() + self.running = False + self.error_log = [] + + def _watchdog(self): + """Internal watchdog to detect and correct system freezes or logic errors.""" + while self.running: + with self.lock: + if self.drone.integrity < 80: + print(f"[WATCHDOG] CRITICAL: Integrity drop on Drone {self.drone.id}. Rerouting power...") + self.drone.integrity += 10 # Self-repair simulation + + if self.drone.battery < 20 and self.drone.status != "RETURNING": + print(f"[WATCHDOG] LOW BATTERY: Forcing Return-to-Home for Drone {self.drone.id}") + self.drone.status = "RETURNING" + + time.sleep(1) + + def execute_pattern(self, pattern_name, waypoints): + self.running = True + self.drone.status = "FLYING" + self.drone.sensors_active = True + + # Start Watchdog in background + wd_thread = threading.Thread(target=self._watchdog, daemon=True) + wd_thread.start() + + print(f"\n>>> LAUNCHING DRONE {self.drone.id} - PATTERN: {pattern_name}") + print(f">>> SYSTEM: UNBREAKABLE MODE ACTIVE (Triple-Redundancy Check)") + + try: + for i, wp in enumerate(waypoints): + if not self.running: break + + # Simulate flight to waypoint + self.drone.position = wp + + # Simulate Sensor Scan + scan_data = [random.random() for _ in range(5)] + material, confidence = ScanAlgorithms.analyze_matter_signature(scan_data) + + print(f"[{datetime.now().strftime('%H:%M:%S')}] WP-{i}: {wp} | SCAN: {material} ({confidence:.1f}%)") + + # Simulate random turbulence/error + if random.random() < 0.05: + self._handle_error("Turbulence detected - Gyro destabilized") + + time.sleep(0.2) # Fast simulation + self.drone.battery -= 0.5 + + except Exception as e: + self._handle_error(f"Runtime Exception: {str(e)}") + finally: + self.land() + + def _handle_error(self, error_msg): + """Self-healing error handler.""" + self.error_log.append(error_msg) + print(f"!!! ERROR DETECTED: {error_msg}") + print("!!! INITIATING SELF-HEALING PROTOCOLS...") + time.sleep(0.5) + print(">>> ERROR CORRECTED. RESUMING FLIGHT PATH.") + self.drone.integrity -= 5 + + def land(self): + self.running = False + self.drone.status = "LANDED" + self.drone.sensors_active = False + print(f"\n>>> DRONE {self.drone.id} LANDED SAFELY. Mission Complete.") + print(f">>> Final Battery: {self.drone.battery:.1f}% | Integrity: {self.drone.integrity}%") + +def run_simulation(): + print("Initializing Drone Swarm Control Interface...") + print("Loading Unbreakable Flight Software v4.0...") + + # Security verification + if SECURITY_AVAILABLE: + verifier = UserVerification() + session = verifier.load_session() + + if not session: + print("\nโš ๏ธ SECURE SYSTEM: Authentication required") + success, session = verifier.authenticate() + if not success: + print("\nโŒ Unauthorized access denied. Exiting.") + sys.exit(1) + else: + print(f"โœ… Session verified: {session['username']} (Level {session['level']})") + + # Require operator level for drone control + if not verifier.require_level(SecurityLevel.OPERATOR): + print("\nโŒ Drone operations require Operator clearance (Level 3+)") + sys.exit(1) + + time.sleep(1) + + drone1 = DroneState(id="ALPHA-1") + autopilot = UnbreakableAutopilot(drone1) + + while True: + print("\n--- DRONE COMMAND CENTER ---") + print("1. Execute Spiral Search (Wide Area)") + print("2. Execute Grid Raster (Detailed Scan)") + print("3. Run Diagnostics") + print("4. Exit") + + choice = input("Select Mission Profile: ") + + if choice == "1": + path = ScanAlgorithms.generate_spiral_search(0, 0, 50) + autopilot.execute_pattern("SPIRAL_ALPHA", path) + elif choice == "2": + path = ScanAlgorithms.generate_grid_raster(40, 40) + autopilot.execute_pattern("GRID_BETA", path) + elif choice == "3": + print(f"Drone ID: {drone1.id}") + print(f"Battery: {drone1.battery}%") + print(f"Integrity: {drone1.integrity}%") + print(f"Location: {drone1.position}") + elif choice == "4": + break + else: + print("Invalid command.") + +if __name__ == "__main__": + run_simulation() diff --git a/extract_thumbnails.py b/extract_thumbnails.py new file mode 100644 index 0000000..fbfb6f4 --- /dev/null +++ b/extract_thumbnails.py @@ -0,0 +1,280 @@ +""" +NetworkBuster Network Map Thumbnail Extractor +Generates static thumbnail images from network topology +""" + +import os +import sys +import json +from datetime import datetime +from pathlib import Path + +def extract_network_thumbnails(): + """Extract and save network device thumbnails""" + + # Create thumbnails directory + thumb_dir = Path('network_thumbnails') + thumb_dir.mkdir(exist_ok=True) + + print("๐Ÿ“ธ Network Map Thumbnail Extractor") + print("="*60) + + # Device configurations for thumbnail generation + devices = { + 'workstation': { + 'icon': '๐Ÿ–ฅ๏ธ', + 'name': 'Primary Workstation', + 'type': 'Hardware', + 'status': 'online' + }, + 'router-wifi7': { + 'icon': '๐ŸŒ', + 'name': 'WiFi 7 Mesh Router', + 'type': 'Network', + 'status': 'online' + }, + 'router-networkbuster': { + 'icon': '๐Ÿ”ง', + 'name': 'NetworkBuster Router', + 'type': 'Network', + 'status': 'online' + }, + 'mesh-node-1': { + 'icon': '๐Ÿ“ก', + 'name': 'Mesh Node Alpha', + 'type': 'Network', + 'status': 'online' + }, + 'mesh-node-2': { + 'icon': '๐Ÿ“ก', + 'name': 'Mesh Node Beta', + 'type': 'Network', + 'status': 'online' + }, + 'mesh-node-3': { + 'icon': '๐Ÿ“ก', + 'name': 'Mesh Node Gamma', + 'type': 'Network', + 'status': 'online' + }, + 'service-web': { + 'icon': 'โšก', + 'name': 'Web Server (3000)', + 'type': 'Service', + 'status': 'running' + }, + 'service-api': { + 'icon': 'โšก', + 'name': 'API Server (3001)', + 'type': 'Service', + 'status': 'running' + }, + 'service-audio': { + 'icon': 'โšก', + 'name': 'Audio Stream (3002)', + 'type': 'Service', + 'status': 'running' + }, + 'service-mission': { + 'icon': 'โšก', + 'name': 'Mission Control (5000)', + 'type': 'Service', + 'status': 'running' + } + } + + # Generate HTML thumbnail for each device + extracted = 0 + for device_id, info in devices.items(): + thumb_html = f""" + + + + {info['name']} - Thumbnail + + + +
    +
    +
    {info['icon']}
    +
    +

    {info['name']}

    +

    {info['type']}

    +
    +
    +
    {info['status'].upper()}
    + +
    + +""" + + # Save thumbnail HTML + thumb_path = thumb_dir / f"{device_id}.html" + with open(thumb_path, 'w', encoding='utf-8') as f: + f.write(thumb_html) + + extracted += 1 + print(f" โœ… {info['name']}") + + # Create thumbnail index + index_html = """ + + + + NetworkBuster Thumbnail Gallery + + + +
    +

    ๐ŸŒ NetworkBuster Thumbnail Gallery

    +

    Extracted Network Device Thumbnails

    +

    Generated: """ + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + """

    +
    + + +""" + + # Save index + index_path = thumb_dir / 'index.html' + with open(index_path, 'w', encoding='utf-8') as f: + f.write(index_html) + + # Create metadata JSON + metadata = { + 'generated': datetime.now().isoformat(), + 'version': '1.0.1', + 'total_devices': len(devices), + 'devices': devices + } + + metadata_path = thumb_dir / 'thumbnails.json' + with open(metadata_path, 'w', encoding='utf-8') as f: + json.dump(metadata, f, indent=2) + + print("\n" + "="*60) + print(f"โœ… Extracted {extracted} thumbnails") + print(f"๐Ÿ“ Location: {thumb_dir.absolute()}") + print(f"๐ŸŒ Index: {index_path.absolute()}") + print(f"๐Ÿ“Š Metadata: {metadata_path.absolute()}") + + return thumb_dir, extracted + +if __name__ == '__main__': + extract_network_thumbnails() diff --git a/flash_git_backup.py b/flash_git_backup.py new file mode 100644 index 0000000..5e1b045 --- /dev/null +++ b/flash_git_backup.py @@ -0,0 +1,252 @@ +""" +NetworkBuster - Git Repository Flash Backup +Fast backup of entire git repository to multiple drives +""" + +import os +import shutil +import subprocess +from datetime import datetime +from pathlib import Path +import json + +def get_git_info(): + """Get current git repository information""" + try: + branch = subprocess.check_output( + ['git', 'branch', '--show-current'], + stderr=subprocess.DEVNULL + ).decode().strip() + + commit = subprocess.check_output( + ['git', 'rev-parse', '--short', 'HEAD'], + stderr=subprocess.DEVNULL + ).decode().strip() + + return { + 'branch': branch, + 'commit': commit, + 'timestamp': datetime.now().isoformat() + } + except: + return None + +def get_repo_stats(): + """Get repository statistics""" + stats = { + 'files': 0, + 'size': 0, + 'folders': 0 + } + + for root, dirs, files in os.walk('.'): + # Skip .git and other hidden folders + dirs[:] = [d for d in dirs if not d.startswith('.') and d not in ['node_modules', '__pycache__', '.venv']] + + stats['folders'] += len(dirs) + stats['files'] += len(files) + + for file in files: + try: + filepath = os.path.join(root, file) + stats['size'] += os.path.getsize(filepath) + except: + pass + + return stats + +def format_size(bytes): + """Format bytes to human readable""" + for unit in ['B', 'KB', 'MB', 'GB']: + if bytes < 1024.0: + return f"{bytes:.2f} {unit}" + bytes /= 1024.0 + return f"{bytes:.2f} TB" + +def flash_to_drive(source_path, drive_letter, backup_name): + """Flash entire repository to drive""" + dest_path = f"{drive_letter}:\\{backup_name}" + + print(f"\n๐Ÿ“ฆ Flashing to {drive_letter}: drive...") + print(f" Source: {source_path}") + print(f" Destination: {dest_path}") + + try: + # Remove old backup if exists + if os.path.exists(dest_path): + print(f" ๐Ÿ—‘๏ธ Removing old backup...") + shutil.rmtree(dest_path) + + # Create new backup + print(f" ๐Ÿ“‹ Copying files...") + shutil.copytree( + source_path, + dest_path, + ignore=shutil.ignore_patterns( + '.git', + 'node_modules', + '__pycache__', + '*.pyc', + '.venv', + 'venv', + '.env', + '*.log' + ) + ) + + # Copy .git folder separately (for full git functionality) + git_source = os.path.join(source_path, '.git') + git_dest = os.path.join(dest_path, '.git') + + if os.path.exists(git_source): + print(f" ๐Ÿ”ง Copying git repository...") + shutil.copytree(git_source, git_dest) + + # Create backup info file + info_file = os.path.join(dest_path, 'BACKUP_INFO.json') + backup_info = { + 'backup_date': datetime.now().isoformat(), + 'source_path': source_path, + 'git_info': get_git_info(), + 'stats': get_repo_stats() + } + + with open(info_file, 'w') as f: + json.dump(backup_info, f, indent=2) + + # Get backup size + backup_size = sum( + os.path.getsize(os.path.join(dirpath, filename)) + for dirpath, dirnames, filenames in os.walk(dest_path) + for filename in filenames + ) + + print(f" โœ… Successfully flashed to {drive_letter}:") + print(f" Size: {format_size(backup_size)}") + print(f" Path: {dest_path}") + + return { + 'success': True, + 'drive': drive_letter, + 'path': dest_path, + 'size': backup_size + } + + except Exception as e: + print(f" โŒ Error flashing to {drive_letter}: {e}") + return { + 'success': False, + 'drive': drive_letter, + 'error': str(e) + } + +def verify_backup(backup_path): + """Verify backup integrity""" + try: + # Check if git repository is valid + git_dir = os.path.join(backup_path, '.git') + if not os.path.exists(git_dir): + return False, "Git folder missing" + + # Check if backup info exists + info_file = os.path.join(backup_path, 'BACKUP_INFO.json') + if not os.path.exists(info_file): + return False, "Backup info missing" + + # Count files + file_count = sum(1 for _, _, files in os.walk(backup_path) for _ in files) + if file_count < 10: + return False, "Too few files" + + return True, f"Verified: {file_count} files" + except Exception as e: + return False, str(e) + +def main(): + print(""" +โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•— +โ•‘ NetworkBuster - Git Repository Flash Backup โ•‘ +โ•‘ Fast backup to multiple drives โ•‘ +โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• + """) + + # Get current directory + source_path = os.getcwd() + repo_name = os.path.basename(source_path) + + print(f"๐Ÿ“‚ Repository: {repo_name}") + print(f"๐Ÿ“ Location: {source_path}") + + # Get git info + git_info = get_git_info() + if git_info: + print(f"๐ŸŒฟ Branch: {git_info['branch']}") + print(f"๐Ÿ“ Commit: {git_info['commit']}") + + # Get stats + print("\n๐Ÿ“Š Analyzing repository...") + stats = get_repo_stats() + print(f" Files: {stats['files']:,}") + print(f" Folders: {stats['folders']:,}") + print(f" Size: {format_size(stats['size'])}") + + # Create backup name with timestamp + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + backup_name = f"NetworkBuster_Backup_{timestamp}" + + # Flash to available drives + results = [] + + # Check D: drive + if os.path.exists('D:\\'): + result = flash_to_drive(source_path, 'D', backup_name) + results.append(result) + + if result['success']: + is_valid, msg = verify_backup(result['path']) + if is_valid: + print(f" โœ… Verification: {msg}") + else: + print(f" โš ๏ธ Verification failed: {msg}") + + # Check K: drive + if os.path.exists('K:\\'): + result = flash_to_drive(source_path, 'K', backup_name) + results.append(result) + + if result['success']: + is_valid, msg = verify_backup(result['path']) + if is_valid: + print(f" โœ… Verification: {msg}") + else: + print(f" โš ๏ธ Verification failed: {msg}") + + # Summary + print("\n" + "="*60) + print("๐Ÿ“‹ BACKUP SUMMARY") + print("="*60) + + successful = [r for r in results if r['success']] + failed = [r for r in results if not r['success']] + + if successful: + print(f"\nโœ… Successfully backed up to {len(successful)} drive(s):") + total_size = 0 + for result in successful: + print(f" โ€ข {result['drive']}: - {format_size(result['size'])}") + print(f" Path: {result['path']}") + total_size += result['size'] + + print(f"\n๐Ÿ’พ Total backup size: {format_size(total_size)}") + + if failed: + print(f"\nโŒ Failed backups ({len(failed)}):") + for result in failed: + print(f" โ€ข {result['drive']}: - {result['error']}") + + print("\n๐ŸŽ‰ Flash backup complete!") + print(f"๐Ÿ“ฆ Backup name: {backup_name}") + print(f"โฐ Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") + +if __name__ == '__main__': + main() diff --git a/gemini_integration.py b/gemini_integration.py new file mode 100644 index 0000000..e20d9e0 --- /dev/null +++ b/gemini_integration.py @@ -0,0 +1,526 @@ +""" +NetworkBuster - Google Gemini AI Integration +Simple integration with Google Gemini API +""" + +import os +from flask import Flask, render_template_string, request, jsonify +from flask_cors import CORS +import requests + +app = Flask(__name__) +CORS(app) + +# Gemini API Configuration +GEMINI_API_KEY = os.environ.get('GEMINI_API_KEY', '') +GEMINI_API_URL = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent' + +# HTML Template for Gemini Chat Interface +GEMINI_TEMPLATE = """ + + + + + + NetworkBuster Gemini AI + + + +
    +
    +

    ๐Ÿค– NetworkBuster Gemini AI

    +

    Powered by Google Gemini Pro

    +
    + +
    +
    + Checking API connection... +
    + +
    +
    + ๐Ÿ“‹ Setup Instructions:
    + Set your Gemini API key as an environment variable:
    + $env:GEMINI_API_KEY="your-api-key-here"

    + Get your free API key at: Google AI Studio +
    +
    + +
    + + +
    +
    + + + + +""" + +@app.route('/') +def index(): + """Serve the Gemini chat interface""" + return render_template_string(GEMINI_TEMPLATE) + +@app.route('/api/gemini/status') +def api_status(): + """Check if Gemini API is configured""" + configured = bool(GEMINI_API_KEY) + return jsonify({ + 'configured': configured, + 'model': 'gemini-pro' + }) + +@app.route('/api/gemini/chat', methods=['POST']) +def chat(): + """Send message to Gemini and get response""" + if not GEMINI_API_KEY: + return jsonify({ + 'success': False, + 'error': 'Gemini API key not configured. Set GEMINI_API_KEY environment variable.' + }), 400 + + try: + data = request.json + user_message = data.get('message', '') + + if not user_message: + return jsonify({ + 'success': False, + 'error': 'No message provided' + }), 400 + + # Call Gemini API + headers = { + 'Content-Type': 'application/json' + } + + payload = { + 'contents': [{ + 'parts': [{ + 'text': user_message + }] + }] + } + + response = requests.post( + f'{GEMINI_API_URL}?key={GEMINI_API_KEY}', + headers=headers, + json=payload, + timeout=30 + ) + + if response.status_code == 200: + result = response.json() + + # Extract the response text + if 'candidates' in result and len(result['candidates']) > 0: + candidate = result['candidates'][0] + if 'content' in candidate and 'parts' in candidate['content']: + parts = candidate['content']['parts'] + if len(parts) > 0 and 'text' in parts[0]: + gemini_response = parts[0]['text'] + + return jsonify({ + 'success': True, + 'response': gemini_response + }) + + return jsonify({ + 'success': False, + 'error': 'Unexpected response format from Gemini API' + }), 500 + else: + error_message = response.json().get('error', {}).get('message', 'Unknown error') + return jsonify({ + 'success': False, + 'error': f'Gemini API error: {error_message}' + }), response.status_code + + except requests.exceptions.RequestException as e: + return jsonify({ + 'success': False, + 'error': f'Network error: {str(e)}' + }), 500 + except Exception as e: + return jsonify({ + 'success': False, + 'error': f'Server error: {str(e)}' + }), 500 + +@app.route('/health') +def health(): + """Health check endpoint""" + return jsonify({ + 'status': 'healthy', + 'service': 'gemini-integration', + 'api_configured': bool(GEMINI_API_KEY) + }) + +if __name__ == '__main__': + print(""" +โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•— +โ•‘ NetworkBuster - Google Gemini AI Integration โ•‘ +โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• + """) + + if GEMINI_API_KEY: + print("โœ… Gemini API Key: Configured") + else: + print("โš ๏ธ Gemini API Key: Not configured") + print("\n๐Ÿ“‹ To configure, set environment variable:") + print(" PowerShell: $env:GEMINI_API_KEY=\"your-api-key\"") + print(" Get your key: https://makersuite.google.com/app/apikey\n") + + print("๐Ÿš€ Starting Gemini Integration Server on http://localhost:4000") + print("โšก Features:") + print(" โœ“ Google Gemini Pro chat interface") + print(" โœ“ Real-time AI responses") + print(" โœ“ Clean, modern UI") + print(" โœ“ Health check: /health") + print("") + + app.run(host='0.0.0.0', port=4000, debug=False) diff --git a/git_cloud_shortcuts.py b/git_cloud_shortcuts.py new file mode 100644 index 0000000..c6a6340 --- /dev/null +++ b/git_cloud_shortcuts.py @@ -0,0 +1,569 @@ +""" +NetworkBuster - Git Repository Cloud Shortcuts Creator +Finds all git repositories and creates shortcuts for cloud access +""" + +import os +import json +import subprocess +from pathlib import Path +from datetime import datetime +import shutil + +def find_git_repositories(root_path='.'): + """Find all git repositories in the project""" + git_repos = [] + root_path = Path(root_path).resolve() + + for item in root_path.rglob('.git'): + if item.is_dir(): + repo_path = item.parent + repo_info = get_repo_info(repo_path) + if repo_info: + git_repos.append(repo_info) + + return git_repos + +def get_repo_info(repo_path): + """Get detailed information about a git repository""" + try: + os.chdir(repo_path) + + # Get branch + branch = subprocess.check_output( + ['git', 'branch', '--show-current'], + stderr=subprocess.DEVNULL + ).decode().strip() + + # Get remote URL + try: + remote_url = subprocess.check_output( + ['git', 'config', '--get', 'remote.origin.url'], + stderr=subprocess.DEVNULL + ).decode().strip() + except: + remote_url = 'No remote configured' + + # Get last commit + try: + last_commit = subprocess.check_output( + ['git', 'log', '-1', '--format=%h - %s (%cr)'], + stderr=subprocess.DEVNULL + ).decode().strip() + except: + last_commit = 'No commits' + + # Get status + try: + status = subprocess.check_output( + ['git', 'status', '--short'], + stderr=subprocess.DEVNULL + ).decode().strip() + modified_files = len(status.split('\n')) if status else 0 + except: + modified_files = 0 + + # Get commit count + try: + commit_count = subprocess.check_output( + ['git', 'rev-list', '--count', 'HEAD'], + stderr=subprocess.DEVNULL + ).decode().strip() + except: + commit_count = '0' + + return { + 'name': repo_path.name, + 'path': str(repo_path), + 'branch': branch, + 'remote_url': remote_url, + 'last_commit': last_commit, + 'modified_files': modified_files, + 'commit_count': commit_count, + 'size': get_folder_size(repo_path) + } + except Exception as e: + print(f"Error getting info for {repo_path}: {e}") + return None + +def get_folder_size(folder_path): + """Get total size of folder in bytes""" + total_size = 0 + try: + for dirpath, dirnames, filenames in os.walk(folder_path): + for filename in filenames: + filepath = os.path.join(dirpath, filename) + try: + total_size += os.path.getsize(filepath) + except: + pass + except: + pass + return total_size + +def format_size(bytes): + """Format bytes to human readable size""" + for unit in ['B', 'KB', 'MB', 'GB']: + if bytes < 1024.0: + return f"{bytes:.2f} {unit}" + bytes /= 1024.0 + return f"{bytes:.2f} TB" + +def create_shortcuts_folder(base_path): + """Create folder structure for git shortcuts""" + shortcuts_path = Path(base_path) / 'NetworkBuster_Git_Shortcuts' + shortcuts_path.mkdir(exist_ok=True) + return shortcuts_path + +def create_windows_shortcut(repo_info, shortcuts_path): + """Create Windows .lnk shortcut file""" + try: + import winshell + from win32com.client import Dispatch + + shortcut_name = f"{repo_info['name']}.lnk" + shortcut_path = shortcuts_path / shortcut_name + + shell = Dispatch('WScript.Shell') + shortcut = shell.CreateShortCut(str(shortcut_path)) + shortcut.TargetPath = repo_info['path'] + shortcut.WorkingDirectory = repo_info['path'] + shortcut.IconLocation = "shell32.dll,4" # Folder icon with git + shortcut.Description = f"Git: {repo_info['branch']} | {repo_info['commit_count']} commits" + shortcut.save() + + return True + except ImportError: + # If winshell not available, create batch file instead + batch_file = shortcuts_path / f"{repo_info['name']}.bat" + with open(batch_file, 'w') as f: + f.write(f'@echo off\n') + f.write(f'cd /d "{repo_info["path"]}"\n') + f.write(f'start "" "%SystemRoot%\\explorer.exe" "{repo_info["path"]}"\n') + return True + except Exception as e: + print(f"Error creating shortcut for {repo_info['name']}: {e}") + return False + +def create_html_dashboard(repos, output_path): + """Create HTML dashboard with all git repositories""" + html_content = f""" + + + + + + NetworkBuster Git Repositories + + + +
    +
    +

    ๐Ÿ—‚๏ธ NetworkBuster Git Repositories

    +

    Cloud-Synced Repository Dashboard

    +

    Generated: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}

    +
    + +
    +
    +

    {len(repos)}

    +

    ๐Ÿ“ฆ Total Repositories

    +
    +
    +

    {sum(int(r['commit_count']) for r in repos)}

    +

    ๐Ÿ“ Total Commits

    +
    +
    +

    {sum(r['modified_files'] for r in repos)}

    +

    ๐Ÿ”ง Modified Files

    +
    +
    +

    {format_size(sum(r['size'] for r in repos))}

    +

    ๐Ÿ’พ Total Size

    +
    +
    + +
    +""" + + for repo in repos: + status_class = "clean" if repo['modified_files'] == 0 else "" + status_text = "Clean" if repo['modified_files'] == 0 else f"{repo['modified_files']} modified" + + html_content += f""" +
    +
    +
    ๐Ÿ“
    +
    +

    {repo['name']}

    + ๐ŸŒฟ {repo['branch']} +
    +
    + +
    +
    + ๐Ÿ“ + {repo['path']} +
    +
    + ๐Ÿ”— + {repo['remote_url']} +
    +
    + ๐Ÿ“Š + {repo['commit_count']} commits โ€ข {format_size(repo['size'])} +
    +
    + +
    + {repo['last_commit']} +
    + +
    + + {status_text} + +
    + + +
    +""" + + html_content += """ +
    + + +
    + + + + +""" + + with open(output_path, 'w', encoding='utf-8') as f: + f.write(html_content) + +def create_json_manifest(repos, output_path): + """Create JSON manifest of all repositories""" + manifest = { + 'generated': datetime.now().isoformat(), + 'total_repos': len(repos), + 'total_commits': sum(int(r['commit_count']) for r in repos), + 'total_size': sum(r['size'] for r in repos), + 'repositories': repos + } + + with open(output_path, 'w', encoding='utf-8') as f: + json.dump(manifest, f, indent=2) + +def sync_to_cloud_drives(source_folder, repos): + """Sync shortcuts to cloud drives""" + cloud_drives = [] + + # Check D: drive + if os.path.exists('D:\\'): + cloud_drives.append('D:\\NetworkBuster_Git_Cloud') + + # Check K: drive + if os.path.exists('K:\\'): + cloud_drives.append('K:\\NetworkBuster_Git_Cloud') + + synced_locations = [] + + for drive_path in cloud_drives: + try: + os.makedirs(drive_path, exist_ok=True) + + # Copy HTML dashboard + html_source = source_folder / 'git_dashboard.html' + if html_source.exists(): + shutil.copy2(html_source, os.path.join(drive_path, 'git_dashboard.html')) + + # Copy JSON manifest + json_source = source_folder / 'git_manifest.json' + if json_source.exists(): + shutil.copy2(json_source, os.path.join(drive_path, 'git_manifest.json')) + + # Copy all shortcuts + for file in source_folder.glob('*.bat'): + shutil.copy2(file, drive_path) + + synced_locations.append(drive_path) + print(f"โœ… Synced to: {drive_path}") + except Exception as e: + print(f"โŒ Failed to sync to {drive_path}: {e}") + + return synced_locations + +def main(): + print(""" +โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•— +โ•‘ NetworkBuster - Git Cloud Shortcuts Creator โ•‘ +โ•‘ Find and organize all git repositories โ•‘ +โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• + """) + + print("๐Ÿ” Scanning for git repositories...") + repos = find_git_repositories() + + print(f"\n๐Ÿ“ฆ Found {len(repos)} git repositories:") + for repo in repos: + print(f" โ€ข {repo['name']} ({repo['branch']}) - {repo['commit_count']} commits") + + print("\n๐Ÿ—‚๏ธ Creating shortcuts folder...") + shortcuts_path = create_shortcuts_folder('.') + + print("๐Ÿ“ Creating shortcuts...") + for repo in repos: + create_windows_shortcut(repo, shortcuts_path) + + print("๐ŸŒ Creating HTML dashboard...") + html_path = shortcuts_path / 'git_dashboard.html' + create_html_dashboard(repos, html_path) + + print("๐Ÿ“‹ Creating JSON manifest...") + json_path = shortcuts_path / 'git_manifest.json' + create_json_manifest(repos, json_path) + + print("\nโ˜๏ธ Syncing to cloud drives...") + synced_locations = sync_to_cloud_drives(shortcuts_path, repos) + + print("\nโœ… All operations completed!") + print(f"\n๐Ÿ“ Local shortcuts: {shortcuts_path}") + print(f"๐ŸŒ Dashboard: {html_path}") + print(f"๐Ÿ“‹ Manifest: {json_path}") + + if synced_locations: + print("\nโ˜๏ธ Cloud locations:") + for location in synced_locations: + print(f" โ€ข {location}") + + print(f"\n๐Ÿ“Š Summary:") + print(f" โ€ข {len(repos)} repositories") + print(f" โ€ข {sum(int(r['commit_count']) for r in repos)} total commits") + print(f" โ€ข {format_size(sum(r['size'] for r in repos))} total size") + print(f" โ€ข {sum(r['modified_files'] for r in repos)} modified files") + + # Open dashboard + try: + import webbrowser + webbrowser.open(str(html_path)) + print("\n๐Ÿš€ Opening dashboard in browser...") + except: + pass + +if __name__ == '__main__': + main() diff --git a/index.html b/index.html index 73de653..3a6f345 100644 --- a/index.html +++ b/index.html @@ -9,5 +9,8 @@
    + diff --git a/install_app.ps1 b/install_app.ps1 new file mode 100644 index 0000000..091fec2 --- /dev/null +++ b/install_app.ps1 @@ -0,0 +1,254 @@ +#Requires -RunAsAdministrator + +<# +.SYNOPSIS + NetworkBuster Application Installer +.DESCRIPTION + Installs NetworkBuster as a Windows application with shortcuts, Start Menu entry, and system integration +#> + +param( + [switch]$Uninstall +) + +$AppName = "NetworkBuster" +$AppVersion = "1.0.0" +$InstallPath = $PSScriptRoot +$StartMenuPath = "$env:ProgramData\Microsoft\Windows\Start Menu\Programs\$AppName" +$DesktopPath = "$env:PUBLIC\Desktop" + +function Write-Header { + param([string]$Text) + Write-Host "`nโ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" -ForegroundColor Cyan + Write-Host "โ•‘ $Text" -ForegroundColor Cyan + Write-Host "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Cyan +} + +function Install-Application { + Write-Header "NetworkBuster Application Installer v$AppVersion" + + Write-Host "`n๐Ÿ“ฆ Installing NetworkBuster..." -ForegroundColor Yellow + Write-Host " Install Path: $InstallPath" -ForegroundColor Gray + + # Create Start Menu folder + Write-Host "`n๐Ÿ“ Creating Start Menu entries..." -ForegroundColor Yellow + if (-not (Test-Path $StartMenuPath)) { + New-Item -Path $StartMenuPath -ItemType Directory -Force | Out-Null + } + + # Create shortcuts + $WshShell = New-Object -ComObject WScript.Shell + + # 1. Main Launcher Shortcut + Write-Host " Creating: NetworkBuster Launcher" -ForegroundColor Gray + $Shortcut = $WshShell.CreateShortcut("$StartMenuPath\NetworkBuster Launcher.lnk") + $Shortcut.TargetPath = "$InstallPath\.venv\Scripts\pythonw.exe" + $Shortcut.Arguments = "`"$InstallPath\networkbuster_launcher.py`"" + $Shortcut.WorkingDirectory = $InstallPath + $Shortcut.Description = "NetworkBuster All-in-One Launcher" + $Shortcut.IconLocation = "imageres.dll,1" + $Shortcut.Save() + + # 2. Admin Launcher Shortcut + Write-Host " Creating: NetworkBuster (Admin Mode)" -ForegroundColor Gray + $Shortcut = $WshShell.CreateShortcut("$StartMenuPath\NetworkBuster (Admin Mode).lnk") + $Shortcut.TargetPath = "powershell.exe" + $Shortcut.Arguments = "-ExecutionPolicy Bypass -File `"$InstallPath\run_launcher_admin.ps1`"" + $Shortcut.WorkingDirectory = $InstallPath + $Shortcut.Description = "NetworkBuster with Administrator privileges" + $Shortcut.IconLocation = "imageres.dll,73" + $Shortcut.Save() + + # 3. Start Services Shortcut + Write-Host " Creating: Start All Services" -ForegroundColor Gray + $Shortcut = $WshShell.CreateShortcut("$StartMenuPath\Start All Services.lnk") + $Shortcut.TargetPath = "$InstallPath\.venv\Scripts\python.exe" + $Shortcut.Arguments = "`"$InstallPath\networkbuster_launcher.py`" --start" + $Shortcut.WorkingDirectory = $InstallPath + $Shortcut.Description = "Start all NetworkBuster services" + $Shortcut.IconLocation = "shell32.dll,137" + $Shortcut.Save() + + # 4. Status Monitor Shortcut + Write-Host " Creating: Status Monitor" -ForegroundColor Gray + $Shortcut = $WshShell.CreateShortcut("$StartMenuPath\Status Monitor.lnk") + $Shortcut.TargetPath = "$InstallPath\.venv\Scripts\python.exe" + $Shortcut.Arguments = "`"$InstallPath\networkbuster_launcher.py`" --status" + $Shortcut.WorkingDirectory = $InstallPath + $Shortcut.Description = "Check NetworkBuster service status" + $Shortcut.IconLocation = "shell32.dll,16" + $Shortcut.Save() + + # 5. Mission Control Shortcut + Write-Host " Creating: Mission Control Dashboard" -ForegroundColor Gray + $Shortcut = $WshShell.CreateShortcut("$StartMenuPath\Mission Control Dashboard.lnk") + $Shortcut.TargetPath = "$env:ProgramFiles\Google\Chrome\Application\chrome.exe" + $Shortcut.Arguments = "--new-window http://localhost:5000" + $Shortcut.Description = "Open Mission Control Dashboard" + $Shortcut.IconLocation = "shell32.dll,43" + $Shortcut.Save() + + # 6. API Tracer Shortcut + Write-Host " Creating: API Tracer" -ForegroundColor Gray + $Shortcut = $WshShell.CreateShortcut("$StartMenuPath\API Tracer.lnk") + $Shortcut.TargetPath = "$env:ProgramFiles\Google\Chrome\Application\chrome.exe" + $Shortcut.Arguments = "--new-window http://localhost:8000" + $Shortcut.Description = "Open API Tracer Dashboard" + $Shortcut.IconLocation = "shell32.dll,21" + $Shortcut.Save() + + # 7. Uninstaller Shortcut + Write-Host " Creating: Uninstaller" -ForegroundColor Gray + $Shortcut = $WshShell.CreateShortcut("$StartMenuPath\Uninstall NetworkBuster.lnk") + $Shortcut.TargetPath = "powershell.exe" + $Shortcut.Arguments = "-ExecutionPolicy Bypass -File `"$InstallPath\install_app.ps1`" -Uninstall" + $Shortcut.WorkingDirectory = $InstallPath + $Shortcut.Description = "Uninstall NetworkBuster" + $Shortcut.IconLocation = "shell32.dll,131" + $Shortcut.Save() + + # Create Desktop Shortcut + Write-Host "`n๐Ÿ–ฅ๏ธ Creating desktop shortcut..." -ForegroundColor Yellow + $Shortcut = $WshShell.CreateShortcut("$DesktopPath\NetworkBuster.lnk") + $Shortcut.TargetPath = "$InstallPath\.venv\Scripts\pythonw.exe" + $Shortcut.Arguments = "`"$InstallPath\networkbuster_launcher.py`"" + $Shortcut.WorkingDirectory = $InstallPath + $Shortcut.Description = "NetworkBuster All-in-One Launcher" + $Shortcut.IconLocation = "imageres.dll,1" + $Shortcut.Save() + + # Add to Windows Registry + Write-Host "`n๐Ÿ“ Registering application..." -ForegroundColor Yellow + $RegPath = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\$AppName" + + if (-not (Test-Path $RegPath)) { + New-Item -Path $RegPath -Force | Out-Null + } + + Set-ItemProperty -Path $RegPath -Name "DisplayName" -Value $AppName + Set-ItemProperty -Path $RegPath -Name "DisplayVersion" -Value $AppVersion + Set-ItemProperty -Path $RegPath -Name "Publisher" -Value "NetworkBuster Team" + Set-ItemProperty -Path $RegPath -Name "InstallLocation" -Value $InstallPath + Set-ItemProperty -Path $RegPath -Name "UninstallString" -Value "powershell.exe -ExecutionPolicy Bypass -File `"$InstallPath\install_app.ps1`" -Uninstall" + Set-ItemProperty -Path $RegPath -Name "DisplayIcon" -Value "imageres.dll,1" + Set-ItemProperty -Path $RegPath -Name "NoModify" -Value 1 + Set-ItemProperty -Path $RegPath -Name "NoRepair" -Value 1 + + # Create scheduled task for launch + Write-Host "`nโฐ Creating scheduled task..." -ForegroundColor Yellow + & "$InstallPath\.venv\Scripts\python.exe" "$InstallPath\networkbuster_launcher.py" --schedule + + # Firewall rules + Write-Host "`n๐Ÿ”ฅ Configuring firewall rules..." -ForegroundColor Yellow + $ports = @(3000, 3001, 3002, 4000, 5000, 6000, 7000, 8000) + foreach ($port in $ports) { + $ruleName = "NetworkBuster Port $port" + $existing = Get-NetFirewallRule -DisplayName $ruleName -ErrorAction SilentlyContinue + + if (-not $existing) { + New-NetFirewallRule -DisplayName $ruleName ` + -Direction Inbound ` + -Action Allow ` + -Protocol TCP ` + -LocalPort $port ` + -Profile Any ` + -Description "NetworkBuster service port" | Out-Null + Write-Host " โœ… Port $port allowed" -ForegroundColor Green + } else { + Write-Host " โ„น๏ธ Port $port already configured" -ForegroundColor Gray + } + } + + # Create uninstall script + Write-Host "`n๐Ÿ“„ Creating uninstall script..." -ForegroundColor Yellow + $uninstallScript = @" +# NetworkBuster Uninstaller +Write-Host "Uninstalling NetworkBuster..." -ForegroundColor Yellow +Stop-Process -Name python,node -Force -ErrorAction SilentlyContinue +Remove-Item -Path "$StartMenuPath" -Recurse -Force -ErrorAction SilentlyContinue +Remove-Item -Path "$DesktopPath\NetworkBuster.lnk" -Force -ErrorAction SilentlyContinue +Remove-Item -Path "$RegPath" -Recurse -Force -ErrorAction SilentlyContinue +schtasks /Delete /TN "NetworkBuster_ScheduledLaunch" /F 2>`$null +Write-Host "NetworkBuster uninstalled successfully!" -ForegroundColor Green +"@ + $uninstallScript | Out-File "$InstallPath\uninstall.ps1" -Encoding UTF8 + + # Installation complete + Write-Host "`n" -ForegroundColor Green + Write-Host "โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" -ForegroundColor Green + Write-Host "โ•‘ โœ… INSTALLATION COMPLETE! โ•‘" -ForegroundColor Green + Write-Host "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Green + + Write-Host "`n๐Ÿ“Š Installation Summary:" -ForegroundColor Cyan + Write-Host " โœ… Start Menu entries created" -ForegroundColor Green + Write-Host " โœ… Desktop shortcut created" -ForegroundColor Green + Write-Host " โœ… Application registered in Windows" -ForegroundColor Green + Write-Host " โœ… Scheduled task created (Jan 17, 2026)" -ForegroundColor Green + Write-Host " โœ… Firewall rules configured (ports 3000-8000)" -ForegroundColor Green + Write-Host " โœ… Uninstaller created" -ForegroundColor Green + + Write-Host "`n๐Ÿš€ How to Launch:" -ForegroundColor Cyan + Write-Host " โ€ข Start Menu โ†’ NetworkBuster" -ForegroundColor White + Write-Host " โ€ข Desktop โ†’ NetworkBuster icon" -ForegroundColor White + Write-Host " โ€ข Search โ†’ 'NetworkBuster'" -ForegroundColor White + + Write-Host "`n๐Ÿ“… Scheduled Launch:" -ForegroundColor Cyan + Write-Host " Date: January 17, 2026 at 9:00 AM" -ForegroundColor White + Write-Host " Countdown: 14 days" -ForegroundColor White + + Write-Host "`n๐ŸŒ Quick Access URLs:" -ForegroundColor Cyan + Write-Host " Mission Control: http://localhost:5000" -ForegroundColor White + Write-Host " API Tracer: http://localhost:8000" -ForegroundColor White + Write-Host " Universal Launch: http://localhost:7000" -ForegroundColor White + + Write-Host "`n" +} + +function Uninstall-Application { + Write-Header "NetworkBuster Application Uninstaller" + + Write-Host "`n๐Ÿ›‘ Uninstalling NetworkBuster..." -ForegroundColor Yellow + + # Stop all processes + Write-Host "`n Stopping services..." -ForegroundColor Gray + Stop-Process -Name python,node,pythonw -Force -ErrorAction SilentlyContinue + Start-Sleep -Seconds 2 + + # Remove Start Menu folder + Write-Host " Removing Start Menu entries..." -ForegroundColor Gray + Remove-Item -Path $StartMenuPath -Recurse -Force -ErrorAction SilentlyContinue + + # Remove Desktop shortcut + Write-Host " Removing desktop shortcut..." -ForegroundColor Gray + Remove-Item -Path "$DesktopPath\NetworkBuster.lnk" -Force -ErrorAction SilentlyContinue + + # Remove Registry entries + Write-Host " Removing registry entries..." -ForegroundColor Gray + $RegPath = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\$AppName" + Remove-Item -Path $RegPath -Recurse -Force -ErrorAction SilentlyContinue + + # Remove scheduled task + Write-Host " Removing scheduled task..." -ForegroundColor Gray + schtasks /Delete /TN "NetworkBuster_ScheduledLaunch" /F 2>$null | Out-Null + + # Remove firewall rules (optional) + Write-Host " Removing firewall rules..." -ForegroundColor Gray + $ports = @(3000, 3001, 3002, 4000, 5000, 6000, 7000, 8000) + foreach ($port in $ports) { + Remove-NetFirewallRule -DisplayName "NetworkBuster Port $port" -ErrorAction SilentlyContinue + } + + Write-Host "`nโœ… NetworkBuster has been uninstalled successfully!" -ForegroundColor Green + Write-Host " Note: Source files in $InstallPath were preserved" -ForegroundColor Gray + Write-Host "`n" +} + +# Main execution +if ($Uninstall) { + Uninstall-Application +} else { + Install-Application +} + +Write-Host "Press any key to exit..." -ForegroundColor Gray +$null = $Host.UI.RawUI.ReadKey("NoEcho,IncludeKeyDown") diff --git a/install_autostart.ps1 b/install_autostart.ps1 new file mode 100644 index 0000000..443d820 --- /dev/null +++ b/install_autostart.ps1 @@ -0,0 +1,126 @@ +#Requires -RunAsAdministrator + +<# +.SYNOPSIS + NetworkBuster Startup Service Installer +.DESCRIPTION + Installs NetworkBuster to auto-start on Windows boot +#> + +$InstallDir = $PSScriptRoot +$ServiceName = "NetworkBuster_AutoStart" + +Write-Host "`nโ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" -ForegroundColor Cyan +Write-Host "โ•‘ NetworkBuster Auto-Start Installer โ•‘" -ForegroundColor Cyan +Write-Host "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Cyan + +Write-Host "`n[1/3] Creating startup script..." -ForegroundColor Yellow + +# Create startup script +$startupScript = @" +@echo off +cd /d "$InstallDir" +call .venv\Scripts\activate.bat +start /min python auto_start_service.py +exit +"@ + +$startupScriptPath = Join-Path $InstallDir "startup_service.bat" +Set-Content -Path $startupScriptPath -Value $startupScript +Write-Host " โœ… Startup script created" -ForegroundColor Green + +Write-Host "`n[2/3] Creating Windows startup shortcut..." -ForegroundColor Yellow + +# Add to Windows Startup folder +$startupFolder = [Environment]::GetFolderPath("Startup") +$WshShell = New-Object -ComObject WScript.Shell +$shortcut = $WshShell.CreateShortcut("$startupFolder\NetworkBuster.lnk") +$shortcut.TargetPath = $startupScriptPath +$shortcut.WorkingDirectory = $InstallDir +$shortcut.Description = "NetworkBuster Auto-Start Service" +$shortcut.Save() + +Write-Host " โœ… Startup shortcut created: $startupFolder\NetworkBuster.lnk" -ForegroundColor Green + +Write-Host "`n[3/3] Creating scheduled task..." -ForegroundColor Yellow + +# Create scheduled task for system events +$taskXml = @" + + + + NetworkBuster Auto-Start on System Events + + + + true + + + true + + + + + $env:USERNAME + InteractiveToken + HighestAvailable + + + + IgnoreNew + false + false + true + true + true + true + true + false + PT0S + + + + $startupScriptPath + $InstallDir + + + +"@ + +$taskXmlPath = Join-Path $InstallDir "autostart_task.xml" +Set-Content -Path $taskXmlPath -Value $taskXml -Encoding Unicode + +# Create the task +try { + schtasks /Create /TN "$ServiceName" /XML "$taskXmlPath" /F | Out-Null + Write-Host " โœ… Scheduled task created: $ServiceName" -ForegroundColor Green +} catch { + Write-Host " โš ๏ธ Task creation failed (may already exist)" -ForegroundColor Yellow +} + +Write-Host "`nโ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" -ForegroundColor Green +Write-Host "โ•‘ โœ… AUTO-START INSTALLED! โ•‘" -ForegroundColor Green +Write-Host "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Green + +Write-Host "`n๐Ÿš€ NetworkBuster will now automatically start:" -ForegroundColor Cyan +Write-Host " โ€ข On Windows boot" -ForegroundColor White +Write-Host " โ€ข On user login" -ForegroundColor White +Write-Host " โ€ข After system resume" -ForegroundColor White + +Write-Host "`n๐ŸŽฎ Control Options:" -ForegroundColor Cyan +Write-Host " Disable: schtasks /Change /TN `"$ServiceName`" /DISABLE" -ForegroundColor White +Write-Host " Enable: schtasks /Change /TN `"$ServiceName`" /ENABLE" -ForegroundColor White +Write-Host " Remove: schtasks /Delete /TN `"$ServiceName`" /F" -ForegroundColor White + +Write-Host "`n๐Ÿ“ Startup Location:" -ForegroundColor Cyan +Write-Host " $startupFolder\NetworkBuster.lnk" -ForegroundColor White + +Write-Host "`nโœจ Test now?" -ForegroundColor Yellow +$test = Read-Host "Start services now? (y/n)" +if ($test -eq 'y') { + Write-Host "`nStarting services..." -ForegroundColor Cyan + & "$InstallDir\.venv\Scripts\python.exe" "$InstallDir\auto_start_service.py" +} + +Write-Host "`nPress any key to exit..." -ForegroundColor Gray +$null = $Host.UI.RawUI.ReadKey("NoEcho,IncludeKeyDown") diff --git a/install_networkbuster.ps1 b/install_networkbuster.ps1 new file mode 100644 index 0000000..f259307 --- /dev/null +++ b/install_networkbuster.ps1 @@ -0,0 +1,167 @@ +#Requires -RunAsAdministrator + +<# +.SYNOPSIS + NetworkBuster Application Installer +.DESCRIPTION + Complete installation with desktop shortcuts and Start Menu integration +#> + +$ErrorActionPreference = "Continue" + +Write-Host "โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" -ForegroundColor Cyan +Write-Host "โ•‘ NetworkBuster Application Installer โ•‘" -ForegroundColor Cyan +Write-Host "โ•‘ Version 1.0.1 - Production Ready โ•‘" -ForegroundColor Cyan +Write-Host "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Cyan + +$InstallDir = $PSScriptRoot +Set-Location $InstallDir + +Write-Host "`n[1/7] ๐Ÿ” Checking Python environment..." -ForegroundColor Yellow +if (Test-Path ".venv\Scripts\python.exe") { + Write-Host " โœ… Virtual environment found" -ForegroundColor Green +} else { + Write-Host " โŒ Virtual environment not found!" -ForegroundColor Red + exit 1 +} + +Write-Host "`n[2/7] ๐Ÿ“ฆ Building NetworkBuster package..." -ForegroundColor Yellow +& ".venv\Scripts\Activate.ps1" +& ".venv\Scripts\pip.exe" install --upgrade pip setuptools wheel +& ".venv\Scripts\pip.exe" install -e . + +Write-Host "`n[3/7] ๐ŸŽจ Creating application icon..." -ForegroundColor Yellow +$IconPath = Join-Path $InstallDir "networkbuster.ico" + +# Create simple ICO file (placeholder - you can replace with real icon) +Add-Type -AssemblyName System.Drawing +$bitmap = New-Object System.Drawing.Bitmap(64, 64) +$graphics = [System.Drawing.Graphics]::FromImage($bitmap) +$graphics.Clear([System.Drawing.Color]::Black) +$graphics.FillEllipse([System.Drawing.Brushes]::Lime, 10, 10, 44, 44) +$graphics.DrawString("NB", (New-Object System.Drawing.Font("Arial", 16, [System.Drawing.FontStyle]::Bold)), [System.Drawing.Brushes]::Black, 16, 20) + +# Save as icon +$iconStream = New-Object System.IO.MemoryStream +$bitmap.Save($iconStream, [System.Drawing.Imaging.ImageFormat]::Png) +$iconBytes = $iconStream.ToArray() +[System.IO.File]::WriteAllBytes($IconPath, $iconBytes) + +$graphics.Dispose() +$bitmap.Dispose() +$iconStream.Dispose() + +Write-Host " โœ… Icon created at $IconPath" -ForegroundColor Green + +Write-Host "`n[4/7] ๐Ÿ“Œ Creating Desktop shortcuts..." -ForegroundColor Yellow +$WshShell = New-Object -ComObject WScript.Shell + +# Main launcher shortcut +$Shortcut = $WshShell.CreateShortcut("$env:USERPROFILE\Desktop\NetworkBuster.lnk") +$Shortcut.TargetPath = "$InstallDir\.venv\Scripts\python.exe" +$Shortcut.Arguments = "`"$InstallDir\networkbuster_launcher.py`"" +$Shortcut.WorkingDirectory = $InstallDir +$Shortcut.IconLocation = $IconPath +$Shortcut.Description = "NetworkBuster - Network Management Suite" +$Shortcut.Save() +Write-Host " โœ… Desktop: NetworkBuster.lnk" -ForegroundColor Green + +# Network Map shortcut +$Shortcut = $WshShell.CreateShortcut("$env:USERPROFILE\Desktop\NetworkBuster Map.lnk") +$Shortcut.TargetPath = "$InstallDir\.venv\Scripts\python.exe" +$Shortcut.Arguments = "`"$InstallDir\network_map_viewer.py`"" +$Shortcut.WorkingDirectory = $InstallDir +$Shortcut.IconLocation = $IconPath +$Shortcut.Description = "NetworkBuster - Network Topology Map" +$Shortcut.Save() +Write-Host " โœ… Desktop: NetworkBuster Map.lnk" -ForegroundColor Green + +# API Tracer shortcut +$Shortcut = $WshShell.CreateShortcut("$env:USERPROFILE\Desktop\NetworkBuster API Tracer.lnk") +$Shortcut.TargetPath = "$InstallDir\.venv\Scripts\python.exe" +$Shortcut.Arguments = "`"$InstallDir\api_tracer.py`"" +$Shortcut.WorkingDirectory = $InstallDir +$Shortcut.IconLocation = $IconPath +$Shortcut.Description = "NetworkBuster - API Endpoint Tracer" +$Shortcut.Save() +Write-Host " โœ… Desktop: NetworkBuster API Tracer.lnk" -ForegroundColor Green + +Write-Host "`n[5/7] ๐Ÿ“‚ Creating Start Menu folder..." -ForegroundColor Yellow +$StartMenuPath = "$env:APPDATA\Microsoft\Windows\Start Menu\Programs\NetworkBuster" +New-Item -ItemType Directory -Force -Path $StartMenuPath | Out-Null + +# Start Menu shortcuts +$shortcuts = @( + @{Name="NetworkBuster Launcher"; Script="networkbuster_launcher.py"; Desc="Main application launcher"}, + @{Name="Network Map Viewer"; Script="network_map_viewer.py"; Desc="Interactive network topology map"}, + @{Name="API Tracer"; Script="api_tracer.py"; Desc="API endpoint monitoring"}, + @{Name="Mission Control"; Script="nasa_home_base.py"; Desc="Mission control dashboard"}, + @{Name="Universal Launcher"; Script="universal_launcher.py"; Desc="Universal service launcher"}, + @{Name="Git Cloud Shortcuts"; Script="git_cloud_shortcuts.py"; Desc="Git repository shortcuts"}, + @{Name="Flash Git Backup"; Script="flash_git_backup.py"; Desc="Quick git backup tool"} +) + +foreach ($item in $shortcuts) { + $Shortcut = $WshShell.CreateShortcut("$StartMenuPath\$($item.Name).lnk") + $Shortcut.TargetPath = "$InstallDir\.venv\Scripts\python.exe" + $Shortcut.Arguments = "`"$InstallDir\$($item.Script)`"" + $Shortcut.WorkingDirectory = $InstallDir + $Shortcut.IconLocation = $IconPath + $Shortcut.Description = $item.Desc + $Shortcut.Save() + Write-Host " โœ… Start Menu: $($item.Name)" -ForegroundColor Green +} + +# Uninstaller +$Shortcut = $WshShell.CreateShortcut("$StartMenuPath\Uninstall NetworkBuster.lnk") +$Shortcut.TargetPath = "powershell.exe" +$Shortcut.Arguments = "-ExecutionPolicy Bypass -File `"$InstallDir\uninstall_networkbuster.ps1`"" +$Shortcut.WorkingDirectory = $InstallDir +$Shortcut.Description = "Uninstall NetworkBuster" +$Shortcut.Save() +Write-Host " โœ… Start Menu: Uninstall NetworkBuster" -ForegroundColor Green + +Write-Host "`n[6/7] โš™๏ธ Creating scheduled task..." -ForegroundColor Yellow +& ".venv\Scripts\python.exe" networkbuster_launcher.py --schedule + +Write-Host "`n[7/7] ๐Ÿ“ Registering application..." -ForegroundColor Yellow +$RegPath = "HKCU:\Software\NetworkBuster" +New-Item -Path $RegPath -Force | Out-Null +Set-ItemProperty -Path $RegPath -Name "InstallPath" -Value $InstallDir +Set-ItemProperty -Path $RegPath -Name "Version" -Value "1.0.1" +Set-ItemProperty -Path $RegPath -Name "InstallDate" -Value (Get-Date -Format "yyyy-MM-dd HH:mm:ss") +Write-Host " โœ… Registry keys created" -ForegroundColor Green + +Write-Host "`nโ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" -ForegroundColor Green +Write-Host "โ•‘ โœ… INSTALLATION COMPLETE! โ•‘" -ForegroundColor Green +Write-Host "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Green + +Write-Host "`n๐Ÿ“ฆ Installed Components:" -ForegroundColor Cyan +Write-Host " โ€ข NetworkBuster package (pip installable)" -ForegroundColor White +Write-Host " โ€ข 3 Desktop shortcuts" -ForegroundColor White +Write-Host " โ€ข 8 Start Menu shortcuts" -ForegroundColor White +Write-Host " โ€ข Scheduled task (January 17, 2026)" -ForegroundColor White +Write-Host " โ€ข Registry integration" -ForegroundColor White + +Write-Host "`n๐ŸŽฏ Quick Access:" -ForegroundColor Cyan +Write-Host " Desktop: NetworkBuster.lnk" -ForegroundColor White +Write-Host " Start Menu: Programs > NetworkBuster" -ForegroundColor White +Write-Host " Command: networkbuster" -ForegroundColor White + +Write-Host "`n๐Ÿš€ Launch Options:" -ForegroundColor Cyan +Write-Host " 1. Double-click desktop icon" -ForegroundColor White +Write-Host " 2. Start Menu > NetworkBuster" -ForegroundColor White +Write-Host " 3. Command: python networkbuster_launcher.py --start" -ForegroundColor White + +Write-Host "`nโฐ Scheduled Launch:" -ForegroundColor Cyan +Write-Host " Date: January 17, 2026 at 9:00 AM" -ForegroundColor White +Write-Host " Task: NetworkBuster_ScheduledLaunch" -ForegroundColor White + +Write-Host "`nโœจ Press any key to launch NetworkBuster Map..." -ForegroundColor Yellow +$null = $Host.UI.RawUI.ReadKey("NoEcho,IncludeKeyDown") + +# Launch network map to show the new navigation +Start-Process "$InstallDir\.venv\Scripts\python.exe" -ArgumentList "`"$InstallDir\network_map_viewer.py`"" -WorkingDirectory $InstallDir + +Write-Host "`nโœ… NetworkBuster Map launched with enhanced navigation!" -ForegroundColor Green +Write-Host " Use the navigation buttons to explore the topology" -ForegroundColor White diff --git a/instances/test-auto-ms.json b/instances/test-auto-ms.json new file mode 100644 index 0000000..04d9b0a --- /dev/null +++ b/instances/test-auto-ms.json @@ -0,0 +1,7 @@ +๏ปฟ{ + "name": "test-auto-ms", + "status": "accepted", + "created": "2025-12-21T12:18:57.7790547-07:00", + "port": 4003, + "approvedAt": "2025-12-21T12:18:57.8482387-07:00" +} diff --git a/launch.py b/launch.py new file mode 100644 index 0000000..1d28f00 --- /dev/null +++ b/launch.py @@ -0,0 +1,614 @@ +#!/usr/bin/env python3 +""" +NetworkBuster Master Launcher +Cross-platform launcher with user role management (Windows/Unix) +Supports: admin, user, visitor modes with bypass options +""" + +import subprocess +import sys +import os +import time +import platform +import getpass +from pathlib import Path + +# Import security verification +try: + from security_verification import UserVerification, SecurityLevel + SECURITY_ENABLED = True +except ImportError: + SECURITY_ENABLED = False + +PROJECT_PATH = Path(__file__).parent.resolve() +IS_WINDOWS = platform.system() == "Windows" +IS_UNIX = platform.system() in ("Linux", "Darwin") + +# User role definitions +USER_ROLES = { + "admin": { + "level": 3, + "permissions": ["all", "firewall", "services", "startup", "kill_process"], + "description": "Full system access" + }, + "user": { + "level": 2, + "permissions": ["start", "stop", "status", "health", "dashboard"], + "description": "Standard operations" + }, + "visitor": { + "level": 1, + "permissions": ["status", "health", "dashboard"], + "description": "Read-only access" + } +} + +# Current session +CURRENT_ROLE = "user" +BYPASS_MODE = False + + +def detect_platform(): + """Detect and display platform info.""" + info = { + "system": platform.system(), + "release": platform.release(), + "machine": platform.machine(), + "python": platform.python_version(), + "user": getpass.getuser() + } + return info + + +def is_admin(): + """Check if running as administrator/root (cross-platform).""" + if IS_WINDOWS: + try: + import ctypes + return ctypes.windll.shell32.IsUserAnAdmin() + except: + return False + else: + # Unix/Linux - check for root + return os.geteuid() == 0 + + +def is_sudo_available(): + """Check if sudo is available on Unix systems.""" + if IS_WINDOWS: + return False + try: + result = subprocess.run(["which", "sudo"], capture_output=True, text=True) + return result.returncode == 0 + except: + return False + + +def run_as_admin(args=None): + """Restart script with admin/root privileges (cross-platform).""" + print("โ†‘ Requesting elevated privileges...") + + if IS_WINDOWS: + try: + import ctypes + script_args = f'"{__file__}"' + if args: + script_args += f' {args}' + result = ctypes.windll.shell32.ShellExecuteW( + None, "runas", sys.executable, + script_args, str(PROJECT_PATH), 1 + ) + if result > 32: + sys.exit(0) + else: + print("โœ— Failed to elevate. Running without admin...") + return False + except Exception as e: + print(f"โœ— Elevation error: {e}") + return False + else: + # Unix/Linux - use sudo + if is_sudo_available(): + cmd = ["sudo", sys.executable, __file__] + if args: + cmd.extend(args.split()) + os.execvp("sudo", cmd) + else: + print("โœ— sudo not available. Run as root manually.") + return False + + +def run_with_sudo(command): + """Run a command with sudo on Unix (cross-platform wrapper).""" + if IS_WINDOWS: + return subprocess.run(command, capture_output=True, text=True, shell=True) + else: + if isinstance(command, list): + cmd = ["sudo"] + command + else: + cmd = f"sudo {command}" + return subprocess.run(cmd, capture_output=True, text=True, shell=isinstance(cmd, str)) + + +def set_user_role(role): + """Set the current user role.""" + global CURRENT_ROLE + if role in USER_ROLES: + CURRENT_ROLE = role + print(f"โœ“ Role set to: {role} ({USER_ROLES[role]['description']})") + return True + else: + print(f"โœ— Invalid role: {role}") + return False + + +def has_permission(permission): + """Check if current role has a permission.""" + global BYPASS_MODE, CURRENT_ROLE + if BYPASS_MODE: + return True + role_perms = USER_ROLES.get(CURRENT_ROLE, {}).get("permissions", []) + return "all" in role_perms or permission in role_perms + + +def toggle_bypass(): + """Toggle bypass mode (requires admin confirmation).""" + global BYPASS_MODE + if is_admin(): + BYPASS_MODE = not BYPASS_MODE + status = "ENABLED" if BYPASS_MODE else "DISABLED" + print(f"โšก Bypass mode: {status}") + return True + else: + print("โœ— Bypass requires admin privileges") + return False + + +def require_permission(permission): + """Decorator to check permissions before running a function.""" + def decorator(func): + def wrapper(*args, **kwargs): + if has_permission(permission): + return func(*args, **kwargs) + else: + print(f"โœ— Permission denied: requires '{permission}'") + print(f" Current role: {CURRENT_ROLE}") + return None + return wrapper + return decorator + + +def check_node(): + """Check if Node.js is available.""" + try: + result = subprocess.run(["node", "--version"], capture_output=True, text=True) + version = result.stdout.strip() + print(f"โœ“ Node.js: {version}") + return True + except: + print("โœ— Node.js not found!") + return False + + +def check_python_deps(): + """Check and install Python dependencies.""" + print("\n[2/5] Checking Python dependencies...") + + try: + import psutil + print(" โœ“ psutil installed") + return True + except ImportError: + print(" โš  psutil not found, attempting install...") + try: + subprocess.run( + [sys.executable, "-m", "pip", "install", "psutil", "-q"], + check=True, + capture_output=True + ) + print(" โœ“ psutil installed successfully") + return True + except subprocess.CalledProcessError as e: + print(" โš  psutil install failed (optional dependency)") + print(" โ„น Some monitoring features will be limited") + return False + except Exception as e: + print(f" โš  Could not install psutil: {e}") + return False + + +def setup_firewall(): + """Setup firewall rules (requires admin) - cross-platform.""" + if not has_permission("firewall"): + print(" โš  Permission denied for firewall") + return + + if not is_admin() and not BYPASS_MODE: + print(" โš  Skipping firewall (no admin)") + return + + print("\n[3/5] Configuring firewall...") + + ports = [ + (3000, "NetworkBuster-Web"), + (3001, "NetworkBuster-API"), + (3002, "NetworkBuster-Audio"), + ] + + for port, name in ports: + if IS_WINDOWS: + subprocess.run([ + "powershell", "-Command", + f'New-NetFirewallRule -DisplayName "{name}" -Direction Inbound -Protocol TCP -LocalPort {port} -Action Allow -ErrorAction SilentlyContinue' + ], capture_output=True) + elif IS_UNIX: + # Try ufw first (Ubuntu/Debian), then firewalld (CentOS/RHEL), then iptables + result = subprocess.run(["which", "ufw"], capture_output=True) + if result.returncode == 0: + run_with_sudo(["ufw", "allow", str(port)]) + else: + result = subprocess.run(["which", "firewall-cmd"], capture_output=True) + if result.returncode == 0: + run_with_sudo(["firewall-cmd", "--add-port", f"{port}/tcp", "--permanent"]) + else: + run_with_sudo(["iptables", "-A", "INPUT", "-p", "tcp", "--dport", str(port), "-j", "ACCEPT"]) + print(f" โœ“ Port {port} opened") + + +def kill_existing_servers(): + """Kill any existing Node.js processes on our ports - cross-platform.""" + if not has_permission("kill_process") and not has_permission("stop"): + print(" โš  Permission denied") + return + + print("\n[4/5] Cleaning up existing processes...") + + if IS_WINDOWS: + for port in [3000, 3001, 3002]: + subprocess.run([ + "powershell", "-Command", + f"$p = Get-NetTCPConnection -LocalPort {port} -ErrorAction SilentlyContinue; if ($p) {{ Stop-Process -Id $p.OwningProcess -Force -ErrorAction SilentlyContinue }}" + ], capture_output=True) + else: + # Unix - use lsof and kill + for port in [3000, 3001, 3002]: + result = subprocess.run( + f"lsof -ti:{port} | xargs kill -9 2>/dev/null || true", + shell=True, capture_output=True + ) + + print(" โœ“ Cleaned up old processes") + time.sleep(1) + + +def start_servers(): + """Start all NetworkBuster servers - cross-platform.""" + if not has_permission("start"): + print(" โš  Permission denied: requires 'start' permission") + return + + print("\n[5/5] Starting servers...") + + os.chdir(PROJECT_PATH) + + if IS_WINDOWS: + # Start servers in a new console window + subprocess.Popen( + ["cmd", "/c", "start", "NetworkBuster Servers", "node", "start-servers.js"], + cwd=PROJECT_PATH + ) + else: + # Unix - use nohup or screen if available + result = subprocess.run(["which", "screen"], capture_output=True) + if result.returncode == 0: + subprocess.Popen( + ["screen", "-dmS", "networkbuster", "node", "start-servers.js"], + cwd=PROJECT_PATH + ) + print(" โ„น Use 'screen -r networkbuster' to attach") + else: + subprocess.Popen( + ["nohup", "node", "start-servers.js", "&"], + cwd=PROJECT_PATH, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL + ) + + print(" โœ“ Servers launching") + + +def start_health_monitor(): + """Start health monitoring in background.""" + health_script = PROJECT_PATH / "system_health.py" + if health_script.exists(): + subprocess.Popen( + [sys.executable, str(health_script), "--monitor", "60"], + creationflags=subprocess.CREATE_NEW_CONSOLE, + cwd=PROJECT_PATH + ) + print(" โœ“ Health monitor started") + + +def show_status(): + """Show final status.""" + print("\n" + "=" * 60) + print(" NetworkBuster Started Successfully!") + print("=" * 60) + print() + print(" ๐ŸŒ Web Server: http://localhost:3000") + print(" ๐Ÿ”Œ API Server: http://localhost:3001") + print(" ๐Ÿ”Š Audio Server: http://localhost:3002") + print() + print(" ๐Ÿ“‹ Quick Commands:") + print(" python quick_admin.py - Admin menu") + print(" python system_health.py - Health check") + print() + print("=" * 60) + + +def show_menu(): + """Display main menu with role info.""" + global CURRENT_ROLE, BYPASS_MODE + + print("\n" + "โ”€" * 60) + print(" ๐Ÿ“‹ MAIN MENU") + print("โ”€" * 60) + + # Show role and bypass status + role_info = USER_ROLES.get(CURRENT_ROLE, {}) + bypass_str = " [BYPASS]" if BYPASS_MODE else "" + print(f" ๐Ÿ‘ค Role: {CURRENT_ROLE.upper()}{bypass_str} - {role_info.get('description', '')}") + print("โ”€" * 60) + + # Server operations + print(" [1] ๐Ÿš€ Launch All Servers") + print(" [2] ๐Ÿ›‘ Stop All Servers") + print(" [3] ๐Ÿ”Œ Check Port Status") + print(" [4] ๐ŸŒ Open Dashboard") + print() + + # Tools + print(" [5] ๐Ÿ”ง Quick Admin Tools") + print(" [6] ๐Ÿ“Š System Health Check") + print(" [7] โš™๏ธ Service Manager") + print(" [8] ๐Ÿ”„ Auto-Startup Setup") + print() + + # Cloud & Admin + print(" [9] ๐Ÿ”ฅ Configure Firewall (admin)") + print(" [c] โ˜๏ธ Cloud Device Manager (Azure/Vercel)") + print(" [m] ๐Ÿ“ฑ Mobile Deployment (iOS/Android/PWA)") + print(" [d] ๐Ÿ›ธ Drone Flight System (Scan/Auto)") + print(" [v] ๐ŸŒ Vercel Domain Setup") + print() + + # Role management + print(" [r] ๐Ÿ‘ค Change Role (admin/user/visitor)") + print(" [b] โšก Toggle Bypass Mode (admin only)") + print(" [e] โ†‘ Elevate to Admin") + print(" [i] โ„น๏ธ System Info") + print() + + # Security + if SECURITY_ENABLED: + print(" [s] ๐Ÿ” Security Verification") + + print(" [0] โŒ Exit") + print("โ”€" * 60) + + +def stop_all_servers(): + """Stop all Node.js processes - cross-platform.""" + if not has_permission("stop"): + print(" โš  Permission denied: requires 'stop' permission") + return + + print("\n๐Ÿ›‘ Stopping all servers...") + + if IS_WINDOWS: + subprocess.run([ + "powershell", "-Command", + "Get-Process node -ErrorAction SilentlyContinue | Stop-Process -Force" + ], capture_output=True) + else: + # Unix - kill all node processes + subprocess.run("pkill -f 'node.*start-servers' || killall node 2>/dev/null || true", shell=True) + + print("โœ“ All Node.js processes stopped") + + +def check_port_status(): + """Check status of server ports - cross-platform.""" + if not has_permission("status"): + print(" โš  Permission denied") + return + + print("\n๐Ÿ”Œ Port Status:") + print("-" * 40) + + for port, name in [(3000, "Web"), (3001, "API"), (3002, "Audio")]: + if IS_WINDOWS: + result = subprocess.run([ + "powershell", "-Command", + f"Get-NetTCPConnection -LocalPort {port} -State Listen -ErrorAction SilentlyContinue" + ], capture_output=True, text=True) + is_running = bool(result.stdout.strip()) + else: + # Unix - use netstat or ss + result = subprocess.run( + f"ss -tlnp 2>/dev/null | grep :{port} || netstat -tlnp 2>/dev/null | grep :{port}", + shell=True, capture_output=True, text=True + ) + is_running = bool(result.stdout.strip()) + + status = "๐ŸŸข RUNNING" if is_running else "โšช STOPPED" + print(f" Port {port} ({name}): {status}") + + +def open_dashboard(): + """Open dashboard in browser.""" + import webbrowser + print("\n๐ŸŒ Opening http://localhost:3000...") + webbrowser.open("http://localhost:3000") + + +def run_external_script(script_name): + """Run another Python script.""" + script_path = PROJECT_PATH / script_name + if script_path.exists(): + subprocess.run([sys.executable, str(script_path)], cwd=PROJECT_PATH) + else: + print(f"โœ— Script not found: {script_name}") + + +def launch_all(): + """Full launch sequence.""" + print("\n" + "-" * 60) + + # Step 1: Check Node.js + print("\n[1/5] Checking Node.js...") + if not check_node(): + print("\nโœ— Please install Node.js first!") + return False + + # Step 2: Python deps + check_python_deps() + + # Step 3: Firewall + setup_firewall() + + # Step 4: Kill existing + kill_existing_servers() + + # Step 5: Start servers + start_servers() + + # Wait for servers to start + print("\nโณ Waiting for servers to initialize...") + time.sleep(3) + + # Show status + show_status() + return True + + +def show_system_info(): + """Display system information.""" + info = detect_platform() + + print("\n" + "โ”€" * 60) + print(" โ„น๏ธ SYSTEM INFORMATION") + print("โ”€" * 60) + print(f" Platform: {info['system']} {info['release']}") + print(f" Machine: {info['machine']}") + print(f" Python: {info['python']}") + print(f" User: {info['user']}") + print(f" Admin: {'Yes' if is_admin() else 'No'}") + print(f" Project: {PROJECT_PATH}") + print(f" Role: {CURRENT_ROLE}") + print(f" Bypass: {'Enabled' if BYPASS_MODE else 'Disabled'}") + + if IS_UNIX: + print(f" Sudo: {'Available' if is_sudo_available() else 'Not available'}") + + print("โ”€" * 60) + + +def change_role(): + """Change user role interactively.""" + print("\n๐Ÿ‘ค Available Roles:") + for role, info in USER_ROLES.items(): + print(f" - {role}: {info['description']}") + print(f" Permissions: {', '.join(info['permissions'])}") + + new_role = input("\n Enter role (admin/user/visitor): ").strip().lower() + + if new_role == "admin" and not is_admin(): + print("โš  Admin role requires elevated privileges") + resp = input(" Elevate now? (y/n): ").strip().lower() + if resp == 'y': + run_as_admin() + return + + set_user_role(new_role) + + +def main(): + """Main launcher with menu.""" + global CURRENT_ROLE + + # Auto-detect role based on privileges + if is_admin(): + CURRENT_ROLE = "admin" + + print() + print("โ•”" + "โ•" * 58 + "โ•—") + print("โ•‘" + " NetworkBuster Master Launcher".center(58) + "โ•‘") + print("โ•‘" + f" {platform.system()} | Python {platform.python_version()}".center(58) + "โ•‘") + print("โ•š" + "โ•" * 58 + "โ•") + + # Admin check + admin_status = "โœ“ Administrator" if is_admin() else "โš  Standard User" + print(f"\n Status: {admin_status}") + print(f" Project: {PROJECT_PATH}") + + while True: + show_menu() + choice = input("\n Select option: ").strip().lower() + + if choice == "1": + launch_all() + elif choice == "2": + stop_all_servers() + elif choice == "3": + check_port_status() + elif choice == "4": + open_dashboard() + elif choice == "5": + run_external_script("quick_admin.py") + elif choice == "6": + run_external_script("system_health.py") + elif choice == "7": + run_external_script("service_manager.py") + elif choice == "8": + run_external_script("auto_startup.py") + elif choice == "9": + if has_permission("firewall"): + setup_firewall() + else: + print("\nโš  Firewall requires 'firewall' permission") + resp = input(" Elevate to admin? (y/n): ").strip().lower() + if resp == 'y': + run_as_admin() + elif choice == "c": + run_external_script("cloud_devices.py") + elif choice == "m": + run_external_script("mobile_deployment.py") + elif choice == "d": + run_external_script("drone_flight_system.py") + elif choice == "v": + run_external_script("vercel_domain_setup.py") + elif choice == "r": + change_role() + elif choice == "b": + toggle_bypass() + elif choice == "e": + run_as_admin() + elif choice == "i": + show_system_info() + elif choice == "s": + if SECURITY_ENABLED: + run_external_script("security_verification.py") + else: + print("\nโš  Security module not available") + elif choice == "0": + print("\n๐Ÿ‘‹ Goodbye!") + break + else: + print("\nโš  Invalid option.") + + input("\nPress Enter to continue...") + + +if __name__ == "__main__": + main() diff --git a/lib/aiClient.js b/lib/aiClient.js new file mode 100644 index 0000000..87ec59e --- /dev/null +++ b/lib/aiClient.js @@ -0,0 +1,83 @@ +/* Simple AI client wrapper for recycling recommendations. + * Uses OPENAI_API_KEY if present; otherwise falls back to rule-based heuristics. + * Exports: getRecommendations(items, context, prefs) + */ +import fs from 'fs'; + +const OPENAI_KEY = process.env.OPENAI_API_KEY || process.env.OPENAI_API; + +async function callOpenAI(prompt) { + if (!OPENAI_KEY) throw new Error('OpenAI key not configured'); + const res = await fetch('https://api.openai.com/v1/chat/completions', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${OPENAI_KEY}` + }, + body: JSON.stringify({ + model: 'gpt-4o-mini', + messages: [{ role: 'system', content: 'You are an assistant that provides recycling guidance.' }, { role: 'user', content: prompt }], + max_tokens: 500 + }) + }); + if (!res.ok) { + const body = await res.text(); + throw new Error(`OpenAI error: ${res.status} ${body}`); + } + const data = await res.json(); + const txt = data?.choices?.[0]?.message?.content || ''; + return txt; +} + +function heuristicRecommendations(items = []) { + const recs = []; + for (const it of items) { + const name = (it.name || '').toLowerCase(); + if (name.includes('cardboard') || name.includes('box')) { + recs.push({ action: 'recycle', reason: 'Cardboard and paperboard are often accepted in curbside recycling. Remove liquids and grease.', confidence: 0.9 }); + } else if (name.includes('pizza') || name.includes('greasy')) { + recs.push({ action: 'compost', reason: 'Greasy cardboard is better composted or thrown away if contaminated.', confidence: 0.8 }); + } else if (name.includes('bottle') || name.includes('plastic bottle')) { + recs.push({ action: 'recycle', reason: 'Rinse and place in curbside recycling if accepted locally.', confidence: 0.95 }); + } else if (name.includes('glass')) { + recs.push({ action: 'recycle', reason: 'Glass is recyclable in many curbside programs; check local rules.', confidence: 0.9 }); + } else { + recs.push({ action: 'unknown', reason: 'No heuristic; consider checking local disposal guidelines.', confidence: 0.5 }); + } + } + return recs; +} + +export async function getRecommendations(items = [], context = {}, prefs = {}) { + // Build prompt + const inputSummary = (items || []).map(i => `${i.name}${i.context ? ' (' + i.context + ')' : ''}`).join(', '); + const prompt = `User items: ${inputSummary}\nContext: ${JSON.stringify(context)}\nPreferences: ${JSON.stringify(prefs)}\nGive per-item recommendation with short reason and confidence as JSON array [{item, action, reason, confidence}]`; + + if (OPENAI_KEY) { + try { + const txt = await callOpenAI(prompt); + // Try to extract JSON from the model output + const m = txt.match(/\[\s*\{/s); + if (m) { + const jsonStart = m.index; + const j = txt.slice(jsonStart); + try { + const parsed = JSON.parse(j); + return { source: 'llm', raw: txt, recommendations: parsed }; + } catch (err) { + // Fall back to heuristics + return { source: 'llm-failed', raw: txt, recommendations: heuristicRecommendations(items) }; + } + } + return { source: 'llm-text', raw: txt, recommendations: heuristicRecommendations(items) }; + } catch (err) { + console.warn('LLM call failed, using heuristics', err.message); + return { source: 'fallback', recommendations: heuristicRecommendations(items) }; + } + } + + // No key: return heuristics + return { source: 'heuristic', recommendations: heuristicRecommendations(items) }; +} + +export default { getRecommendations }; diff --git a/lib/aiProviders.js b/lib/aiProviders.js new file mode 100644 index 0000000..338661e --- /dev/null +++ b/lib/aiProviders.js @@ -0,0 +1,661 @@ +/** + * AI Providers - Multi-provider abstraction layer for AI inference + * Supports: OpenAI, Azure OpenAI, Anthropic Claude, Google Gemini, Custom endpoints + * + * Usage: + * import { createProvider, getAvailableProviders, chat, embed, generateImage } from './aiProviders.js'; + * const result = await chat('openai', [{ role: 'user', content: 'Hello!' }]); + */ + +import crypto from 'crypto'; + +// Provider configurations from environment +const PROVIDER_CONFIGS = { + openai: { + name: 'OpenAI', + apiKey: () => process.env.OPENAI_API_KEY, + baseUrl: 'https://api.openai.com/v1', + models: { + chat: ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'gpt-3.5-turbo'], + embed: ['text-embedding-3-small', 'text-embedding-3-large', 'text-embedding-ada-002'], + image: ['dall-e-3', 'dall-e-2'] + }, + defaultModel: { chat: 'gpt-4o-mini', embed: 'text-embedding-3-small', image: 'dall-e-3' } + }, + azure: { + name: 'Azure OpenAI', + apiKey: () => process.env.AZURE_OPENAI_KEY, + endpoint: () => process.env.AZURE_OPENAI_ENDPOINT, + deployment: () => process.env.AZURE_OPENAI_DEPLOYMENT || 'gpt-4o', + apiVersion: '2024-02-15-preview', + models: { chat: ['deployment'], embed: ['deployment'], image: [] } + }, + anthropic: { + name: 'Anthropic Claude', + apiKey: () => process.env.ANTHROPIC_API_KEY, + baseUrl: 'https://api.anthropic.com/v1', + models: { + chat: ['claude-3-5-sonnet-20241022', 'claude-3-5-haiku-20241022', 'claude-3-opus-20240229'], + embed: [], + image: [] + }, + defaultModel: { chat: 'claude-3-5-sonnet-20241022' } + }, + gemini: { + name: 'Google Gemini', + apiKey: () => process.env.GOOGLE_GEMINI_KEY || process.env.GEMINI_API_KEY, + baseUrl: 'https://generativelanguage.googleapis.com/v1beta', + models: { + chat: ['gemini-2.0-flash-exp', 'gemini-1.5-pro', 'gemini-1.5-flash'], + embed: ['text-embedding-004'], + image: ['imagen-3.0-generate-001'] + }, + defaultModel: { chat: 'gemini-2.0-flash-exp', embed: 'text-embedding-004' } + }, + custom: { + name: 'Custom Endpoint', + apiKey: () => process.env.CUSTOM_AI_KEY, + baseUrl: () => process.env.CUSTOM_AI_ENDPOINT || 'http://localhost:11434/v1', + models: { chat: ['*'], embed: ['*'], image: [] }, + defaultModel: { chat: 'llama2', embed: 'nomic-embed-text' } + } +}; + +// Response cache +const responseCache = new Map(); +const CACHE_TTL = parseInt(process.env.AI_CACHE_TTL_SECONDS || '300') * 1000; + +function getCacheKey(provider, type, payload) { + const hash = crypto.createHash('sha256') + .update(JSON.stringify({ provider, type, payload })) + .digest('hex'); + return hash.substring(0, 32); +} + +function getCached(key) { + const entry = responseCache.get(key); + if (!entry) return null; + if (Date.now() > entry.expiresAt) { + responseCache.delete(key); + return null; + } + return entry.data; +} + +function setCache(key, data) { + responseCache.set(key, { data, expiresAt: Date.now() + CACHE_TTL }); + // Cleanup old entries + if (responseCache.size > 1000) { + const now = Date.now(); + for (const [k, v] of responseCache) { + if (now > v.expiresAt) responseCache.delete(k); + } + } +} + +// Rate limiting +const rateLimits = new Map(); +const RATE_LIMIT_PER_MINUTE = parseInt(process.env.AI_RATE_LIMIT_PER_MINUTE || '60'); + +function checkRateLimit(deviceId) { + const now = Date.now(); + const windowStart = now - 60000; + + let deviceLimits = rateLimits.get(deviceId); + if (!deviceLimits) { + deviceLimits = []; + rateLimits.set(deviceId, deviceLimits); + } + + // Remove old entries + while (deviceLimits.length > 0 && deviceLimits[0] < windowStart) { + deviceLimits.shift(); + } + + if (deviceLimits.length >= RATE_LIMIT_PER_MINUTE) { + return { allowed: false, remaining: 0, resetIn: Math.ceil((deviceLimits[0] + 60000 - now) / 1000) }; + } + + deviceLimits.push(now); + return { allowed: true, remaining: RATE_LIMIT_PER_MINUTE - deviceLimits.length, resetIn: 60 }; +} + +// Get available providers +export function getAvailableProviders() { + const available = []; + for (const [id, config] of Object.entries(PROVIDER_CONFIGS)) { + const hasKey = typeof config.apiKey === 'function' ? !!config.apiKey() : !!config.apiKey; + if (hasKey || id === 'custom') { + available.push({ + id, + name: config.name, + capabilities: { + chat: config.models.chat.length > 0, + embed: config.models.embed.length > 0, + image: config.models.image.length > 0 + }, + models: config.models + }); + } + } + return available; +} + +// Check if provider is available +export function isProviderAvailable(providerId) { + const config = PROVIDER_CONFIGS[providerId]; + if (!config) return false; + if (providerId === 'custom') return true; + const apiKey = typeof config.apiKey === 'function' ? config.apiKey() : config.apiKey; + return !!apiKey; +} + +// ============ OPENAI PROVIDER ============ +async function openaiChat(messages, options = {}) { + const config = PROVIDER_CONFIGS.openai; + const apiKey = config.apiKey(); + if (!apiKey) throw new Error('OpenAI API key not configured'); + + const model = options.model || config.defaultModel.chat; + const response = await fetch(`${config.baseUrl}/chat/completions`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${apiKey}` + }, + body: JSON.stringify({ + model, + messages, + max_tokens: options.maxTokens || 1000, + temperature: options.temperature ?? 0.7, + stream: options.stream || false, + ...options.extra + }) + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`OpenAI error: ${response.status} - ${error}`); + } + + if (options.stream) { + return response.body; // Return the readable stream + } + + const data = await response.json(); + return { + provider: 'openai', + model, + content: data.choices?.[0]?.message?.content || '', + usage: data.usage, + raw: data + }; +} + +async function openaiEmbed(text, options = {}) { + const config = PROVIDER_CONFIGS.openai; + const apiKey = config.apiKey(); + if (!apiKey) throw new Error('OpenAI API key not configured'); + + const model = options.model || config.defaultModel.embed; + const input = Array.isArray(text) ? text : [text]; + + const response = await fetch(`${config.baseUrl}/embeddings`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${apiKey}` + }, + body: JSON.stringify({ model, input }) + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`OpenAI embedding error: ${response.status} - ${error}`); + } + + const data = await response.json(); + return { + provider: 'openai', + model, + embeddings: data.data.map(d => d.embedding), + usage: data.usage + }; +} + +async function openaiImage(prompt, options = {}) { + const config = PROVIDER_CONFIGS.openai; + const apiKey = config.apiKey(); + if (!apiKey) throw new Error('OpenAI API key not configured'); + + const model = options.model || config.defaultModel.image; + const response = await fetch(`${config.baseUrl}/images/generations`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${apiKey}` + }, + body: JSON.stringify({ + model, + prompt, + n: options.n || 1, + size: options.size || '1024x1024', + quality: options.quality || 'standard', + response_format: 'url' + }) + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`OpenAI image error: ${response.status} - ${error}`); + } + + const data = await response.json(); + return { + provider: 'openai', + model, + images: data.data.map(d => ({ url: d.url, revisedPrompt: d.revised_prompt })) + }; +} + +// ============ AZURE OPENAI PROVIDER ============ +async function azureChat(messages, options = {}) { + const config = PROVIDER_CONFIGS.azure; + const apiKey = config.apiKey(); + const endpoint = config.endpoint(); + const deployment = options.deployment || config.deployment(); + + if (!apiKey || !endpoint) throw new Error('Azure OpenAI not configured'); + + const url = `${endpoint}/openai/deployments/${deployment}/chat/completions?api-version=${config.apiVersion}`; + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'api-key': apiKey + }, + body: JSON.stringify({ + messages, + max_tokens: options.maxTokens || 1000, + temperature: options.temperature ?? 0.7 + }) + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`Azure OpenAI error: ${response.status} - ${error}`); + } + + const data = await response.json(); + return { + provider: 'azure', + model: deployment, + content: data.choices?.[0]?.message?.content || '', + usage: data.usage, + raw: data + }; +} + +async function azureEmbed(text, options = {}) { + const config = PROVIDER_CONFIGS.azure; + const apiKey = config.apiKey(); + const endpoint = config.endpoint(); + const deployment = options.deployment || process.env.AZURE_OPENAI_EMBED_DEPLOYMENT || 'text-embedding-ada-002'; + + if (!apiKey || !endpoint) throw new Error('Azure OpenAI not configured'); + + const input = Array.isArray(text) ? text : [text]; + const url = `${endpoint}/openai/deployments/${deployment}/embeddings?api-version=${config.apiVersion}`; + + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'api-key': apiKey + }, + body: JSON.stringify({ input }) + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`Azure embedding error: ${response.status} - ${error}`); + } + + const data = await response.json(); + return { + provider: 'azure', + model: deployment, + embeddings: data.data.map(d => d.embedding), + usage: data.usage + }; +} + +// ============ ANTHROPIC CLAUDE PROVIDER ============ +async function anthropicChat(messages, options = {}) { + const config = PROVIDER_CONFIGS.anthropic; + const apiKey = config.apiKey(); + if (!apiKey) throw new Error('Anthropic API key not configured'); + + const model = options.model || config.defaultModel.chat; + + // Convert OpenAI-style messages to Anthropic format + const systemMessage = messages.find(m => m.role === 'system')?.content; + const nonSystemMessages = messages.filter(m => m.role !== 'system').map(m => ({ + role: m.role === 'assistant' ? 'assistant' : 'user', + content: m.content + })); + + const response = await fetch(`${config.baseUrl}/messages`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-api-key': apiKey, + 'anthropic-version': '2023-06-01' + }, + body: JSON.stringify({ + model, + max_tokens: options.maxTokens || 1000, + system: systemMessage, + messages: nonSystemMessages + }) + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`Anthropic error: ${response.status} - ${error}`); + } + + const data = await response.json(); + return { + provider: 'anthropic', + model, + content: data.content?.[0]?.text || '', + usage: { input_tokens: data.usage?.input_tokens, output_tokens: data.usage?.output_tokens }, + raw: data + }; +} + +// ============ GOOGLE GEMINI PROVIDER ============ +async function geminiChat(messages, options = {}) { + const config = PROVIDER_CONFIGS.gemini; + const apiKey = config.apiKey(); + if (!apiKey) throw new Error('Google Gemini API key not configured'); + + const model = options.model || config.defaultModel.chat; + + // Convert to Gemini format + const contents = messages.filter(m => m.role !== 'system').map(m => ({ + role: m.role === 'assistant' ? 'model' : 'user', + parts: [{ text: m.content }] + })); + + const systemInstruction = messages.find(m => m.role === 'system')?.content; + + const url = `${config.baseUrl}/models/${model}:generateContent?key=${apiKey}`; + const response = await fetch(url, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + contents, + systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : undefined, + generationConfig: { + maxOutputTokens: options.maxTokens || 1000, + temperature: options.temperature ?? 0.7 + } + }) + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`Gemini error: ${response.status} - ${error}`); + } + + const data = await response.json(); + return { + provider: 'gemini', + model, + content: data.candidates?.[0]?.content?.parts?.[0]?.text || '', + usage: data.usageMetadata, + raw: data + }; +} + +async function geminiEmbed(text, options = {}) { + const config = PROVIDER_CONFIGS.gemini; + const apiKey = config.apiKey(); + if (!apiKey) throw new Error('Google Gemini API key not configured'); + + const model = options.model || config.defaultModel.embed; + const input = Array.isArray(text) ? text : [text]; + + const url = `${config.baseUrl}/models/${model}:batchEmbedContents?key=${apiKey}`; + const response = await fetch(url, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + requests: input.map(t => ({ + model: `models/${model}`, + content: { parts: [{ text: t }] } + })) + }) + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`Gemini embedding error: ${response.status} - ${error}`); + } + + const data = await response.json(); + return { + provider: 'gemini', + model, + embeddings: data.embeddings.map(e => e.values) + }; +} + +// ============ CUSTOM ENDPOINT PROVIDER ============ +async function customChat(messages, options = {}) { + const config = PROVIDER_CONFIGS.custom; + const baseUrl = typeof config.baseUrl === 'function' ? config.baseUrl() : config.baseUrl; + const apiKey = config.apiKey(); + + const model = options.model || config.defaultModel.chat; + const headers = { 'Content-Type': 'application/json' }; + if (apiKey) headers['Authorization'] = `Bearer ${apiKey}`; + + const response = await fetch(`${baseUrl}/chat/completions`, { + method: 'POST', + headers, + body: JSON.stringify({ + model, + messages, + max_tokens: options.maxTokens || 1000, + temperature: options.temperature ?? 0.7 + }) + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`Custom endpoint error: ${response.status} - ${error}`); + } + + const data = await response.json(); + return { + provider: 'custom', + model, + content: data.choices?.[0]?.message?.content || data.message?.content || '', + usage: data.usage, + raw: data + }; +} + +async function customEmbed(text, options = {}) { + const config = PROVIDER_CONFIGS.custom; + const baseUrl = typeof config.baseUrl === 'function' ? config.baseUrl() : config.baseUrl; + const apiKey = config.apiKey(); + + const model = options.model || config.defaultModel.embed; + const input = Array.isArray(text) ? text : [text]; + const headers = { 'Content-Type': 'application/json' }; + if (apiKey) headers['Authorization'] = `Bearer ${apiKey}`; + + const response = await fetch(`${baseUrl}/embeddings`, { + method: 'POST', + headers, + body: JSON.stringify({ model, input }) + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`Custom embedding error: ${response.status} - ${error}`); + } + + const data = await response.json(); + return { + provider: 'custom', + model, + embeddings: data.data?.map(d => d.embedding) || data.embeddings || [] + }; +} + +// ============ UNIFIED API ============ +const PROVIDER_HANDLERS = { + openai: { chat: openaiChat, embed: openaiEmbed, image: openaiImage }, + azure: { chat: azureChat, embed: azureEmbed, image: null }, + anthropic: { chat: anthropicChat, embed: null, image: null }, + gemini: { chat: geminiChat, embed: geminiEmbed, image: null }, + custom: { chat: customChat, embed: customEmbed, image: null } +}; + +/** + * Send a chat completion request + * @param {string} provider - Provider ID (openai, azure, anthropic, gemini, custom) + * @param {Array} messages - Array of {role, content} messages + * @param {Object} options - {model, maxTokens, temperature, deviceId, useCache} + */ +export async function chat(provider, messages, options = {}) { + const handler = PROVIDER_HANDLERS[provider]?.chat; + if (!handler) throw new Error(`Provider '${provider}' does not support chat`); + + // Rate limiting + if (options.deviceId) { + const rateCheck = checkRateLimit(options.deviceId); + if (!rateCheck.allowed) { + throw new Error(`Rate limit exceeded. Try again in ${rateCheck.resetIn} seconds.`); + } + } + + // Check cache (skip for streaming) + if (options.useCache !== false && !options.stream) { + const cacheKey = getCacheKey(provider, 'chat', { messages, model: options.model }); + const cached = getCached(cacheKey); + if (cached) return { ...cached, cached: true }; + } + + const result = await handler(messages, options); + + // Store in cache (skip for streaming) + if (options.useCache !== false && !options.stream && result && !result.then) { + const cacheKey = getCacheKey(provider, 'chat', { messages, model: options.model }); + setCache(cacheKey, result); + } + + if (options.deviceId && !options.stream) { + trackUsage(options.deviceId, provider, 'chat', result.usage?.total_tokens || 0); + } + + return result; +} + +/** + * Generate embeddings + * @param {string} provider - Provider ID + * @param {string|Array} text - Text(s) to embed + * @param {Object} options - {model, deviceId} + */ +export async function embed(provider, text, options = {}) { + const handler = PROVIDER_HANDLERS[provider]?.embed; + if (!handler) throw new Error(`Provider '${provider}' does not support embeddings`); + + if (options.deviceId) { + const rateCheck = checkRateLimit(options.deviceId); + if (!rateCheck.allowed) { + throw new Error(`Rate limit exceeded. Try again in ${rateCheck.resetIn} seconds.`); + } + } + + return handler(text, options); +} + +/** + * Generate images + * @param {string} provider - Provider ID + * @param {string} prompt - Image prompt + * @param {Object} options - {model, size, quality, n, deviceId} + */ +export async function generateImage(provider, prompt, options = {}) { + const handler = PROVIDER_HANDLERS[provider]?.image; + if (!handler) throw new Error(`Provider '${provider}' does not support image generation`); + + if (options.deviceId) { + const rateCheck = checkRateLimit(options.deviceId); + if (!rateCheck.allowed) { + throw new Error(`Rate limit exceeded. Try again in ${rateCheck.resetIn} seconds.`); + } + } + + return handler(prompt, options); +} + +// Get default provider +export function getDefaultProvider() { + const preferred = process.env.AI_DEFAULT_PROVIDER || 'openai'; + if (isProviderAvailable(preferred)) return preferred; + + // Fallback order + const fallbackOrder = ['openai', 'azure', 'anthropic', 'gemini', 'custom']; + for (const p of fallbackOrder) { + if (isProviderAvailable(p)) return p; + } + return null; +} + +// Usage tracking per device +const deviceUsage = new Map(); + +export function trackUsage(deviceId, provider, type, tokens = 0) { + if (!deviceUsage.has(deviceId)) { + deviceUsage.set(deviceId, { requests: 0, tokens: 0, byProvider: {}, byType: {} }); + } + const usage = deviceUsage.get(deviceId); + usage.requests++; + usage.tokens += tokens; + usage.byProvider[provider] = (usage.byProvider[provider] || 0) + 1; + usage.byType[type] = (usage.byType[type] || 0) + 1; + usage.lastRequest = new Date().toISOString(); +} + +export function getDeviceUsage(deviceId) { + return deviceUsage.get(deviceId) || { requests: 0, tokens: 0, byProvider: {}, byType: {} }; +} + +export function getAllUsage() { + const result = {}; + for (const [id, usage] of deviceUsage) { + result[id] = usage; + } + return result; +} + +export default { + chat, + embed, + generateImage, + getAvailableProviders, + isProviderAvailable, + getDefaultProvider, + trackUsage, + getDeviceUsage, + getAllUsage, + checkRateLimit +}; diff --git a/lib/deviceStore.js b/lib/deviceStore.js new file mode 100644 index 0000000..62a0a62 --- /dev/null +++ b/lib/deviceStore.js @@ -0,0 +1,66 @@ +import fs from 'fs'; +import path from 'path'; +import crypto from 'crypto'; + +const dataDir = path.join(process.cwd(), 'data', 'devices'); + +function ensureDir() { + if (!fs.existsSync(dataDir)) fs.mkdirSync(dataDir, { recursive: true }); +} + +export function saveRegistration(reg) { + ensureDir(); + const id = reg.deviceId || (Date.now().toString() + '-' + crypto.randomBytes(4).toString('hex')); + const record = Object.assign({ + deviceId: id, + status: 'registered', + createdAt: new Date().toISOString() + }, reg); + + const fn = path.join(dataDir, `${id}.json`); + fs.writeFileSync(fn, JSON.stringify(record, null, 2), 'utf8'); + return record; +} + +export function getRegistration(deviceId) { + const fn = path.join(dataDir, `${deviceId}.json`); + if (!fs.existsSync(fn)) return null; + return JSON.parse(fs.readFileSync(fn, 'utf8')); +} + +export function updateStatus(deviceId, status, extra = {}) { + const rec = getRegistration(deviceId); + if (!rec) return null; + rec.status = status; + rec.updatedAt = new Date().toISOString(); + Object.assign(rec, extra); + const fn = path.join(dataDir, `${deviceId}.json`); + fs.writeFileSync(fn, JSON.stringify(rec, null, 2), 'utf8'); + return rec; +} + +export function listRegistrations() { + ensureDir(); + return fs.readdirSync(dataDir).filter(f => f.endsWith('.json')).map(f => JSON.parse(fs.readFileSync(path.join(dataDir, f), 'utf8'))); +} + +// Status transition validation +const VALID_TRANSITIONS = { + 'registered': ['queued'], + 'queued': ['processing', 'failed'], + 'processing': ['acknowledged', 'failed'], + 'acknowledged': [], + 'failed': ['queued'] // allow retry +}; + +export function transitionStatus(deviceId, newStatus, extra = {}) { + const rec = getRegistration(deviceId); + if (!rec) return null; + + const currentStatus = rec.status; + if (!VALID_TRANSITIONS[currentStatus]?.includes(newStatus)) { + throw new Error(`Invalid status transition from ${currentStatus} to ${newStatus}`); + } + + return updateStatus(deviceId, newStatus, extra); +} diff --git a/lib/messageQueue.js b/lib/messageQueue.js new file mode 100644 index 0000000..257f77a --- /dev/null +++ b/lib/messageQueue.js @@ -0,0 +1,112 @@ +import { ServiceBusClient } from '@azure/service-bus'; +import fs from 'fs'; +import path from 'path'; +import crypto from 'crypto'; + +const queueBase = path.join(process.cwd(), 'data', 'queue'); +const TOPIC_NAME = 'device-registrations.v1'; + +let sbClient = null; +let useAzure = false; + +function initAzureServiceBus() { + const connectionString = process.env.AZURE_SERVICEBUS_CONNECTION_STRING; + if (connectionString) { + sbClient = new ServiceBusClient(connectionString); + useAzure = true; + console.log('Using Azure Service Bus for queue operations'); + } else { + console.log('No Azure Service Bus connection string; falling back to file-based queue'); + } +} + +initAzureServiceBus(); + +function ensureDir() { + if (!fs.existsSync(queueBase)) fs.mkdirSync(queueBase, { recursive: true }); +} + +export async function enqueue(topic, payload) { + if (useAzure && topic === TOPIC_NAME) { + const sender = sbClient.createSender(topic); + try { + const message = { + body: payload, + messageId: crypto.randomUUID(), + contentType: 'application/json' + }; + await sender.sendMessages(message); + await sender.close(); + return { id: message.messageId, topic, payload }; + } catch (err) { + console.error('Failed to enqueue to Azure Service Bus:', err); + throw err; + } + } else { + // Fallback to file-based + ensureDir(); + const dir = path.join(queueBase, topic); + if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true }); + const id = Date.now().toString() + '-' + crypto.randomBytes(4).toString('hex'); + const msg = { + id, + topic, + timestamp: new Date().toISOString(), + payload + }; + const fn = path.join(dir, `${id}.json`); + fs.writeFileSync(fn, JSON.stringify(msg, null, 2), 'utf8'); + return msg; + } +} + +export async function dequeue(topic) { + if (useAzure && topic === TOPIC_NAME) { + const receiver = sbClient.createReceiver(topic); + try { + const messages = await receiver.receiveMessages(1, { maxWaitTimeInMs: 5000 }); + if (messages.length > 0) { + const msg = messages[0]; + const payload = { + id: msg.messageId, + topic, + timestamp: msg.enqueuedTimeUtc?.toISOString() || new Date().toISOString(), + payload: msg.body + }; + await receiver.completeMessage(msg); + await receiver.close(); + return payload; + } + await receiver.close(); + return null; + } catch (err) { + console.error('Failed to dequeue from Azure Service Bus:', err); + await receiver?.close(); + throw err; + } + } else { + // Fallback to file-based + const dir = path.join(queueBase, topic); + if (!fs.existsSync(dir)) return null; + const files = fs.readdirSync(dir).filter(f => f.endsWith('.json')).sort(); + if (files.length === 0) return null; + const fn = path.join(dir, files[0]); + const msg = JSON.parse(fs.readFileSync(fn, 'utf8')); + // Move to processed + const processedDir = path.join(queueBase, `${topic}-processed`); + if (!fs.existsSync(processedDir)) fs.mkdirSync(processedDir, { recursive: true }); + fs.renameSync(fn, path.join(processedDir, files[0])); + return msg; + } +} + +export function list(topic) { + if (useAzure) { + // For Azure, we can't easily list; return empty for compatibility + console.warn('list() not supported with Azure Service Bus; use Azure portal or CLI'); + return []; + } + const dir = path.join(queueBase, topic); + if (!fs.existsSync(dir)) return []; + return fs.readdirSync(dir).filter(f => f.endsWith('.json')).map(f => JSON.parse(fs.readFileSync(path.join(dir, f), 'utf8'))); +} diff --git a/lib/profileStore.js b/lib/profileStore.js new file mode 100644 index 0000000..87dcbf2 --- /dev/null +++ b/lib/profileStore.js @@ -0,0 +1,37 @@ +import fs from 'fs'; +import path from 'path'; + +const DATA_DIR = path.resolve(process.cwd(), 'data'); +const PROFILES_DIR = path.join(DATA_DIR, 'profiles'); +const FEEDBACK_DIR = path.join(DATA_DIR, 'feedback'); + +function ensureDirs(){ + if (!fs.existsSync(DATA_DIR)) fs.mkdirSync(DATA_DIR); + if (!fs.existsSync(PROFILES_DIR)) fs.mkdirSync(PROFILES_DIR); + if (!fs.existsSync(FEEDBACK_DIR)) fs.mkdirSync(FEEDBACK_DIR); +} + +export function getProfile(userId){ + ensureDirs(); + if (!userId) return null; + const f = path.join(PROFILES_DIR, `${userId}.json`); + if (!fs.existsSync(f)) return null; + try { return JSON.parse(fs.readFileSync(f,'utf8')); } catch { return null } +} + +export function saveProfile(userId, profile){ + ensureDirs(); + const f = path.join(PROFILES_DIR, `${userId}.json`); + fs.writeFileSync(f, JSON.stringify(profile, null, 2), 'utf8'); + return profile; +} + +export function appendFeedback(feedback){ + ensureDirs(); + const id = Date.now().toString(); + const f = path.join(FEEDBACK_DIR, `${id}.json`); + fs.writeFileSync(f, JSON.stringify(feedback, null, 2), 'utf8'); + return f; +} + +export default { getProfile, saveProfile, appendFeedback }; diff --git a/luna-recycle/README.md b/luna-recycle/README.md index da2c5a2..a06d7e3 100644 Binary files a/luna-recycle/README.md and b/luna-recycle/README.md differ diff --git a/main.js b/main.js new file mode 100644 index 0000000..4623d11 --- /dev/null +++ b/main.js @@ -0,0 +1,45 @@ +import { spawn } from 'child_process'; +import { fileURLToPath } from 'url'; +import { dirname, join } from 'path'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +const branches = { + main: { cmd: 'node', args: ['server.js'] }, + optimized: { cmd: 'node', args: ['server-optimized.js'] }, + universal: { cmd: 'node', args: ['server-universal.js'] }, + audio: { cmd: 'node', args: ['server-audio.js'] }, + api: { cmd: 'node', args: ['api/server.js'] }, + 'api-optimized': { cmd: 'node', args: ['api/server-optimized.js'] }, + 'api-universal': { cmd: 'node', args: ['api/server-universal.js'] }, + auth: { cmd: 'node', args: ['auth-ui/v750/server.js'] }, + dashboard: { cmd: 'npm', args: ['run', 'dev'], cwd: 'dashboard' }, + 'real-time-overlay': { cmd: 'npm', args: ['run', 'dev'], cwd: 'challengerepo/real-time-overlay' }, + // add more as needed +}; + +const branch = process.argv[2]; +if (!branch) { + console.log('Usage: node main.js '); + console.log('Available branches:', Object.keys(branches).join(', ')); + process.exit(1); +} + +const config = branches[branch]; +if (!config) { + console.log('Unknown branch:', branch); + process.exit(1); +} + +const options = { stdio: 'inherit' }; +if (config.cwd) { + options.cwd = join(__dirname, config.cwd); +} + +console.log(`Starting ${branch} with ${config.cmd} ${config.args.join(' ')}`); + +const child = spawn(config.cmd, config.args, options); +child.on('close', (code) => { + console.log(`${branch} exited with code ${code}`); +}); \ No newline at end of file diff --git a/map.bat b/map.bat new file mode 100644 index 0000000..1026465 --- /dev/null +++ b/map.bat @@ -0,0 +1,10 @@ +@echo off +REM Open NetworkBuster Map + +cd /d "%~dp0" + +echo Opening Network Map... +start http://localhost:6000 + +call .venv\Scripts\activate.bat +start /min python network_map_viewer.py diff --git a/mobile_deployment.py b/mobile_deployment.py new file mode 100644 index 0000000..9b5d2a0 --- /dev/null +++ b/mobile_deployment.py @@ -0,0 +1,427 @@ +#!/usr/bin/env python3 +""" +NetworkBuster Mobile Deployment Manager +Build and deploy to iOS, Android, and Progressive Web App (PWA) +""" + +import subprocess +import sys +import os +import json +import platform +from pathlib import Path +from datetime import datetime + +PROJECT_PATH = Path(__file__).parent.resolve() +IS_WINDOWS = platform.system() == "Windows" + +# Mobile deployment configuration +MOBILE_CONFIG = { + "ios": { + "platform": "iOS", + "framework": "Capacitor", + "bundle_id": "net.networkbuster.app", + "min_version": "14.0", + "xcode_project": "ios/App/App.xcodeproj", + "simulator": "iPhone 15 Pro", + "status": "pending" + }, + "android": { + "platform": "Android", + "framework": "Capacitor", + "package_name": "net.networkbuster.app", + "min_sdk": "26", + "gradle_path": "android/app/build.gradle", + "emulator": "Pixel_7_API_34", + "status": "pending" + }, + "pwa": { + "platform": "Progressive Web App", + "manifest": "public/manifest.json", + "service_worker": "public/sw.js", + "icons_path": "public/icons", + "status": "configured" + } +} + +BUILD_CONFIGS = { + "development": { + "mode": "dev", + "source_maps": True, + "minify": False, + "api_url": "http://localhost:3001" + }, + "staging": { + "mode": "staging", + "source_maps": True, + "minify": True, + "api_url": "https://staging.networkbuster.net" + }, + "production": { + "mode": "prod", + "source_maps": False, + "minify": True, + "api_url": "https://api.networkbuster.net" + } +} + + +def run_cmd(cmd, capture=True, cwd=None): + """Run shell command.""" + result = subprocess.run( + cmd, shell=True, capture_output=capture, text=True, + cwd=cwd or PROJECT_PATH + ) + return result + + +def check_prerequisites(): + """Check required tools for mobile deployment.""" + prereqs = { + "node": run_cmd("node --version").returncode == 0, + "npm": run_cmd("npm --version").returncode == 0, + "capacitor": run_cmd("npx cap --version").returncode == 0, + } + + if platform.system() == "Darwin": # macOS + prereqs["xcode"] = run_cmd("xcodebuild -version").returncode == 0 + prereqs["cocoapods"] = run_cmd("pod --version").returncode == 0 + + prereqs["android_studio"] = run_cmd("adb --version").returncode == 0 + + return prereqs + + +class MobileDeployment: + """Manage mobile deployments.""" + + def __init__(self): + self.config = MOBILE_CONFIG.copy() + self.prereqs = check_prerequisites() + + def show_status(self): + """Show mobile deployment status.""" + print("\n" + "=" * 70) + print(" ๐Ÿ“ฑ MOBILE DEPLOYMENT STATUS") + print("=" * 70) + + # Prerequisites + print("\n ๐Ÿ”ง Prerequisites:") + for tool, installed in self.prereqs.items(): + status = "โœ“" if installed else "โœ—" + print(f" {status} {tool}") + + print("\n" + "-" * 70) + + # Platforms + for platform_key, config in self.config.items(): + status_icon = "๐ŸŸข" if config["status"] == "configured" else "๐ŸŸก" + print(f"\n {status_icon} {config['platform']}") + print(f" Framework: {config.get('framework', 'Native')}") + + if platform_key == "ios": + print(f" Bundle ID: {config['bundle_id']}") + print(f" Min iOS: {config['min_version']}") + elif platform_key == "android": + print(f" Package: {config['package_name']}") + print(f" Min SDK: {config['min_sdk']}") + elif platform_key == "pwa": + print(f" Manifest: {config['manifest']}") + + print("\n" + "=" * 70) + + def setup_capacitor(self): + """Initialize Capacitor for mobile.""" + print("\nโšก Setting up Capacitor...") + + if not self.prereqs["npm"]: + print("โœ— npm not found") + return False + + # Install Capacitor + print(" Installing Capacitor...") + run_cmd("npm install @capacitor/core @capacitor/cli", capture=False) + + # Initialize Capacitor + print(" Initializing Capacitor...") + run_cmd('npx cap init "NetworkBuster" "net.networkbuster.app"', capture=False) + + # Add platforms + print(" Adding iOS platform...") + run_cmd("npx cap add ios", capture=False) + + print(" Adding Android platform...") + run_cmd("npx cap add android", capture=False) + + print("โœ“ Capacitor setup complete") + return True + + def build_web(self, env="production"): + """Build web assets.""" + print(f"\n๐Ÿ”จ Building web assets ({env})...") + + build_config = BUILD_CONFIGS.get(env, BUILD_CONFIGS["production"]) + + # Set environment variables + env_vars = { + "NODE_ENV": build_config["mode"], + "REACT_APP_API_URL": build_config["api_url"] + } + + env_str = " ".join([f"{k}={v}" for k, v in env_vars.items()]) + + # Build + result = run_cmd(f"{env_str} npm run build", capture=False) + + if result.returncode == 0: + print("โœ“ Web build complete") + return True + else: + print("โœ— Build failed") + return False + + def sync_capacitor(self): + """Sync web assets to mobile platforms.""" + print("\n๐Ÿ”„ Syncing assets to mobile platforms...") + + result = run_cmd("npx cap sync", capture=False) + + if result.returncode == 0: + print("โœ“ Sync complete") + return True + else: + print("โœ— Sync failed") + return False + + def build_ios(self, scheme="App"): + """Build iOS app.""" + print(f"\n๐ŸŽ Building iOS app ({scheme})...") + + if platform.system() != "Darwin": + print("โœ— iOS builds require macOS") + return False + + if not self.prereqs.get("xcode"): + print("โœ— Xcode not found") + return False + + # Open in Xcode + ios_project = PROJECT_PATH / "ios" / "App" / "App.xcworkspace" + if ios_project.exists(): + print(f" Opening {ios_project}") + run_cmd(f'open "{ios_project}"', capture=False) + print(" Build in Xcode: Product โ†’ Build") + else: + print("โœ— iOS project not found. Run setup_capacitor first.") + return False + + return True + + def build_android(self, variant="assembleDebug"): + """Build Android app.""" + print(f"\n๐Ÿค– Building Android app ({variant})...") + + if not self.prereqs.get("android_studio"): + print("โœ— Android SDK not found") + return False + + android_dir = PROJECT_PATH / "android" + if not android_dir.exists(): + print("โœ— Android project not found. Run setup_capacitor first.") + return False + + # Build with Gradle + if IS_WINDOWS: + gradle_cmd = ".\\gradlew.bat" + else: + gradle_cmd = "./gradlew" + + result = run_cmd(f"cd android && {gradle_cmd} {variant}", capture=False) + + if result.returncode == 0: + apk_path = android_dir / "app" / "build" / "outputs" / "apk" / "debug" / "app-debug.apk" + print(f"โœ“ APK built: {apk_path}") + return True + else: + print("โœ— Build failed") + return False + + def run_ios(self): + """Run iOS app in simulator.""" + print("\n๐Ÿ“ฑ Running iOS app in simulator...") + + if platform.system() != "Darwin": + print("โœ— iOS simulator requires macOS") + return False + + simulator = self.config["ios"]["simulator"] + result = run_cmd(f'npx cap run ios --target="{simulator}"', capture=False) + + return result.returncode == 0 + + def run_android(self): + """Run Android app in emulator.""" + print("\n๐Ÿ“ฑ Running Android app in emulator...") + + result = run_cmd("npx cap run android", capture=False) + return result.returncode == 0 + + def setup_pwa(self): + """Setup Progressive Web App.""" + print("\n๐ŸŒ Setting up PWA...") + + # Create manifest + manifest = { + "name": "NetworkBuster", + "short_name": "NetBuster", + "description": "Real-time network monitoring and management", + "start_url": "/", + "display": "standalone", + "background_color": "#000000", + "theme_color": "#00ff00", + "icons": [ + { + "src": "/icons/icon-192.png", + "sizes": "192x192", + "type": "image/png" + }, + { + "src": "/icons/icon-512.png", + "sizes": "512x512", + "type": "image/png" + } + ] + } + + public_dir = PROJECT_PATH / "public" + public_dir.mkdir(exist_ok=True) + + manifest_path = public_dir / "manifest.json" + with open(manifest_path, "w") as f: + json.dump(manifest, f, indent=2) + + print(f"โœ“ Manifest created: {manifest_path}") + + # Create service worker + sw_content = ''' +self.addEventListener('install', (event) => { + console.log('Service Worker installing...'); +}); + +self.addEventListener('activate', (event) => { + console.log('Service Worker activated'); +}); + +self.addEventListener('fetch', (event) => { + event.respondWith( + caches.match(event.request).then((response) => { + return response || fetch(event.request); + }) + ); +}); +''' + + sw_path = public_dir / "sw.js" + with open(sw_path, "w") as f: + f.write(sw_content) + + print(f"โœ“ Service Worker created: {sw_path}") + print("โœ“ PWA setup complete") + + return True + + def deploy_all(self, env="production"): + """Full mobile deployment pipeline.""" + print("\n" + "=" * 70) + print(" ๐Ÿš€ FULL MOBILE DEPLOYMENT") + print("=" * 70) + + steps = [ + ("Building web assets", lambda: self.build_web(env)), + ("Syncing to mobile", self.sync_capacitor), + ("Setting up PWA", self.setup_pwa) + ] + + for step_name, step_func in steps: + print(f"\n[{steps.index((step_name, step_func)) + 1}/{len(steps)}] {step_name}...") + if not step_func(): + print(f"โœ— Failed at: {step_name}") + return False + + print("\n" + "=" * 70) + print(" โœ“ DEPLOYMENT COMPLETE") + print("=" * 70) + print("\n Next steps:") + print(" - iOS: python mobile_deployment.py --build-ios") + print(" - Android: python mobile_deployment.py --build-android") + print(" - Test PWA at: http://localhost:3000") + + return True + + +def show_menu(): + """Display mobile deployment menu.""" + print("\n" + "โ”€" * 60) + print(" ๐Ÿ“ฑ MOBILE DEPLOYMENT MANAGER") + print("โ”€" * 60) + print(" [1] ๐Ÿ“Š Show Status") + print(" [2] โšก Setup Capacitor") + print(" [3] ๐Ÿ”จ Build Web Assets") + print(" [4] ๐Ÿ”„ Sync to Mobile") + print(" [5] ๐ŸŽ Build iOS") + print(" [6] ๐Ÿค– Build Android") + print(" [7] ๐ŸŒ Setup PWA") + print(" [8] ๐Ÿ“ฑ Run iOS Simulator") + print(" [9] ๐Ÿ“ฑ Run Android Emulator") + print(" [d] ๐Ÿš€ Deploy All (Full Pipeline)") + print(" [0] โŒ Exit") + print("โ”€" * 60) + + +def main(): + """Main entry point.""" + deployer = MobileDeployment() + + print() + print("โ•”" + "โ•" * 58 + "โ•—") + print("โ•‘" + " NetworkBuster Mobile Deployment".center(58) + "โ•‘") + print("โ•‘" + " iOS | Android | PWA".center(58) + "โ•‘") + print("โ•š" + "โ•" * 58 + "โ•") + + while True: + show_menu() + choice = input("\n Select option: ").strip().lower() + + if choice == "1": + deployer.show_status() + elif choice == "2": + deployer.setup_capacitor() + elif choice == "3": + env = input(" Environment (dev/staging/prod): ").strip() or "production" + deployer.build_web(env) + elif choice == "4": + deployer.sync_capacitor() + elif choice == "5": + deployer.build_ios() + elif choice == "6": + deployer.build_android() + elif choice == "7": + deployer.setup_pwa() + elif choice == "8": + deployer.run_ios() + elif choice == "9": + deployer.run_android() + elif choice == "d": + env = input(" Environment (dev/staging/prod): ").strip() or "production" + deployer.deploy_all(env) + elif choice == "0": + print("\n๐Ÿ‘‹ Goodbye!") + break + else: + print("\nโš  Invalid option.") + + input("\nPress Enter to continue...") + + +if __name__ == "__main__": + main() diff --git a/nasa_home_base.py b/nasa_home_base.py new file mode 100644 index 0000000..5d2eaf9 --- /dev/null +++ b/nasa_home_base.py @@ -0,0 +1,573 @@ +#!/usr/bin/env python3 +""" +NASA Home Base Mission Control +NetworkBuster Integration Package +""" + +import sys +import time +import json +import requests +import subprocess +import webbrowser +import threading +from datetime import datetime +from pathlib import Path +from http.server import HTTPServer, SimpleHTTPRequestHandler +import socketserver + +# Check for required packages +try: + from flask import Flask, render_template_string, jsonify, request + FLASK_AVAILABLE = True +except ImportError: + FLASK_AVAILABLE = False + print("โš ๏ธ Flask not available. Install with: pip install flask") + +class NASAHomeBase: + """NASA Home Base Mission Control System""" + + def __init__(self): + self.project_root = Path(__file__).parent + self.ports = { + 'web': {'port': 3000, 'name': 'Web Server', 'status': 'offline'}, + 'api': {'port': 3001, 'name': 'API Server', 'status': 'offline'}, + 'audio': {'port': 3002, 'name': 'Audio Stream', 'status': 'offline'} + } + self.mission_start_time = datetime.now() + self.mission_log = [] + + def log_event(self, event, level='INFO'): + """Log mission event""" + timestamp = datetime.now().strftime('%H:%M:%S') + log_entry = f"[{timestamp}] {level}: {event}" + self.mission_log.append(log_entry) + print(f" {log_entry}") + + def check_port_status(self, port): + """Check if a port is active""" + try: + response = requests.get(f'http://localhost:{port}/api/health', timeout=2) + return response.status_code == 200 + except: + return False + + def check_all_ports(self): + """Check status of all NetworkBuster ports""" + for service, info in self.ports.items(): + is_active = self.check_port_status(info['port']) + info['status'] = 'online' if is_active else 'offline' + + def start_service(self, service_name): + """Start a NetworkBuster service""" + self.log_event(f"Starting {service_name}...", 'COMMAND') + # Services should already be running via start-servers.js + + def open_dashboard(self, service='web'): + """Open service dashboard in browser""" + port = self.ports[service]['port'] + url = f'http://localhost:{port}' + self.log_event(f"Opening {service} dashboard: {url}", 'ACTION') + webbrowser.open(url) + + def get_system_status(self): + """Get comprehensive system status""" + self.check_all_ports() + + online_count = sum(1 for p in self.ports.values() if p['status'] == 'online') + uptime = (datetime.now() - self.mission_start_time).total_seconds() + + return { + 'mission_time': uptime, + 'ports': self.ports, + 'online_services': online_count, + 'total_services': len(self.ports), + 'status': 'NOMINAL' if online_count == len(self.ports) else 'DEGRADED' + } + +# Flask web interface for Mission Control +if FLASK_AVAILABLE: + app = Flask(__name__) + home_base = NASAHomeBase() + + MISSION_CONTROL_HTML = """ + + + + + + NASA Home Base - Mission Control + + + +
    +
    +

    ๐Ÿš€ NASA HOME BASE

    +
    NETWORKBUSTER MISSION CONTROL
    +
    + +
    + +
    +
    ๐Ÿ›ฐ๏ธ Services
    +
    +
    + + +
    +
    ๐Ÿ“ก Mission Control
    + +
    +
    MISSION STATUS: NOMINAL
    +
    + +
    +
    +
    Mission Time
    +
    0:00:00
    +
    +
    +
    Services Online
    +
    0/3
    +
    +
    + +
    ๐ŸŽฎ Quick Actions
    + + + + +
    + + +
    +
    ๐Ÿ“ Mission Log
    +
    +
    +
    +
    + + + + + """ + + @app.route('/') + def index(): + return render_template_string(MISSION_CONTROL_HTML) + + @app.route('/api/status') + def api_status(): + return jsonify(home_base.get_system_status()) + + @app.route('/api/open/') + def api_open_service(service): + if service in home_base.ports: + home_base.open_dashboard(service) + return jsonify({'success': True, 'message': f'Opened {service} dashboard'}) + return jsonify({'success': False, 'message': 'Service not found'}), 404 + +def run_mission_control(port=5000): + """Run the NASA Home Base Mission Control interface""" + if not FLASK_AVAILABLE: + print("โŒ Flask is required to run Mission Control") + print(" Install with: pip install flask") + return + + print("\n" + "="*60) + print("๐Ÿš€ NASA HOME BASE MISSION CONTROL") + print("="*60) + print(f"\n๐ŸŒ Mission Control Interface: http://localhost:{port}") + print("\nChecking NetworkBuster services...") + + home_base.check_all_ports() + for service, info in home_base.ports.items(): + status_icon = "โœ…" if info['status'] == 'online' else "โš ๏ธ" + print(f" {status_icon} {info['name']} (Port {info['port']}): {info['status'].upper()}") + + print(f"\n๐ŸŽฏ Opening Mission Control in browser...") + threading.Timer(1.5, lambda: webbrowser.open(f'http://localhost:{port}')).start() + + print(f"\n๐Ÿ“ก Mission Control Active - Press Ctrl+C to abort mission\n") + + try: + app.run(host='0.0.0.0', port=port, debug=False) + except KeyboardInterrupt: + print("\n\n๐Ÿ›‘ Mission Control shutdown initiated") + print("โœ… All systems secured") + +def main(): + """Main entry point""" + print("\n" + "โ•”" + "="*58 + "โ•—") + print("โ•‘" + " NASA HOME BASE - NetworkBuster Integration".center(58) + "โ•‘") + print("โ•š" + "="*58 + "โ•") + + if len(sys.argv) > 1 and sys.argv[1] == '--help': + print("\nUsage: python nasa_home_base.py [options]") + print("\nOptions:") + print(" --help Show this help message") + print(" --port PORT Set Mission Control port (default: 5000)") + print(" --check Check service status only") + print("\nExample:") + print(" python nasa_home_base.py") + print(" python nasa_home_base.py --port 5001") + return + + if len(sys.argv) > 1 and sys.argv[1] == '--check': + base = NASAHomeBase() + base.check_all_ports() + print("\n๐Ÿ“Š Service Status:") + for service, info in base.ports.items(): + print(f" โ€ข {info['name']}: {info['status'].upper()}") + return + + port = 5000 + if len(sys.argv) > 2 and sys.argv[1] == '--port': + port = int(sys.argv[2]) + + run_mission_control(port) + +if __name__ == "__main__": + main() diff --git a/nb.ps1 b/nb.ps1 new file mode 100644 index 0000000..7a326c3 --- /dev/null +++ b/nb.ps1 @@ -0,0 +1,77 @@ +# NetworkBuster Simple PowerShell Functions +# Source this file: . .\nb.ps1 + +function nb-start { + "Starting NetworkBuster..." + & .\.venv\Scripts\python.exe auto_start_service.py +} + +function nb-stop { + "Stopping NetworkBuster..." + & .\.venv\Scripts\python.exe networkbuster_launcher.py --stop +} + +function nb-status { + "Checking status..." + & .\.venv\Scripts\python.exe networkbuster_launcher.py --status +} + +function nb-map { + "Opening Network Map..." + Start-Process http://localhost:6000 + & .\.venv\Scripts\python.exe network_map_viewer.py +} + +function nb-tracer { + "Opening API Tracer..." + Start-Process http://localhost:8000 + & .\.venv\Scripts\python.exe api_tracer.py +} + +function nb-backup { + "Running git backup..." + & .\.venv\Scripts\python.exe flash_git_backup.py +} + +function nb-thumbs { + "Extracting thumbnails..." + & .\.venv\Scripts\python.exe extract_thumbnails.py + Start-Process network_thumbnails\index.html +} + +function nb-mission { + "Opening Mission Control..." + Start-Process http://localhost:5000 + & .\.venv\Scripts\python.exe nasa_home_base.py +} + +function nb-all { + "Opening all dashboards..." + Start-Process http://localhost:7000 # Universal Launcher + Start-Process http://localhost:6000 # Network Map + Start-Process http://localhost:8000 # API Tracer + Start-Process http://localhost:5000 # Mission Control +} + +function nb-autostart { + "Installing auto-start..." + Start-Process powershell -ArgumentList "-ExecutionPolicy Bypass -File install_autostart.ps1" -Verb RunAs +} + +function nb-help { + Write-Host "`nNetworkBuster Quick Commands:" -ForegroundColor Cyan + Write-Host " nb-start Start all services" -ForegroundColor White + Write-Host " nb-stop Stop all services" -ForegroundColor White + Write-Host " nb-status Show service status" -ForegroundColor White + Write-Host " nb-map Open network map" -ForegroundColor White + Write-Host " nb-tracer Open API tracer" -ForegroundColor White + Write-Host " nb-mission Open mission control" -ForegroundColor White + Write-Host " nb-backup Run git backup" -ForegroundColor White + Write-Host " nb-thumbs Extract thumbnails" -ForegroundColor White + Write-Host " nb-all Open all dashboards" -ForegroundColor White + Write-Host " nb-autostart Install auto-start on boot" -ForegroundColor Yellow + Write-Host " nb-help Show this help" -ForegroundColor White + Write-Host "" +} + +Write-Host "NetworkBuster commands loaded. Type 'nb-help' for usage." -ForegroundColor Green diff --git a/network_map_viewer.py b/network_map_viewer.py new file mode 100644 index 0000000..8384d80 --- /dev/null +++ b/network_map_viewer.py @@ -0,0 +1,1574 @@ +""" +NetworkBuster - Network Topology Map with Live Logs +Shows device thumbnails with real-time log display on interactive map +""" + +import os +import json +import subprocess +from datetime import datetime +from flask import Flask, render_template_string, jsonify, request +import socket +import psutil +import platform +import glob + +app = Flask(__name__) + +# Gateway management data +def get_gateway_configs(local_ip): + """Get gateway configurations with dynamic local IP""" + return { + 'router-wifi7': { + 'type': 'gateway', + 'dhcp_enabled': True, + 'firewall_enabled': True, + 'uptime': '15 days 8 hours', + 'connected_devices': 24, + 'bandwidth_usage': '45%', + 'wan_ip': '203.0.113.45', + 'dns_servers': ['8.8.8.8', '8.8.4.4'], + 'port_forwarding': [{'port': 80, 'to': '192.168.1.100'}, {'port': 443, 'to': '192.168.1.100'}] + }, + 'router-nb': { + 'type': 'gateway', + 'dhcp_enabled': True, + 'firewall_enabled': True, + 'uptime': '22 days 3 hours', + 'connected_devices': 8, + 'bandwidth_usage': '23%', + 'wan_ip': '203.0.113.46', + 'dns_servers': ['1.1.1.1', '1.0.0.1'], + 'port_forwarding': [{'port': 3000, 'to': local_ip}, {'port': 4000, 'to': local_ip}] + } + } + +# Device discovery and classification +def get_network_devices(): + """Discover devices on network and classify by type""" + devices = [] + + # Get local machine info + hostname = socket.gethostname() + local_ip = socket.gethostbyname(hostname) + + # Get gateway configs with local IP + gateway_configs = get_gateway_configs(local_ip) + + # Main workstation (current device) + devices.append({ + 'id': 'workstation-1', + 'name': hostname, + 'type': 'workstation', + 'ip': local_ip, + 'status': 'online', + 'x': 400, + 'y': 300, + 'logs': get_system_logs('workstation') + }) + + # WiFi 7 Mesh Router (Gateway) + devices.append({ + 'id': 'router-wifi7', + 'name': 'WiFi 7 Mesh Router', + 'type': 'gateway', + 'ip': '192.168.1.1', + 'status': 'online', + 'x': 200, + 'y': 150, + 'logs': get_device_logs('router'), + 'is_gateway': True, + 'gateway_config': gateway_configs.get('router-wifi7', {}) + }) + + # NetworkBuster Router (Gateway) + devices.append({ + 'id': 'router-nb', + 'name': 'NetworkBuster Router', + 'type': 'gateway', + 'ip': '192.168.1.100', + 'status': 'online', + 'x': 600, + 'y': 150, + 'logs': get_device_logs('networkbuster'), + 'is_gateway': True, + 'gateway_config': gateway_configs.get('router-nb', {}) + }) + + # Mesh Nodes + mesh_nodes = [ + {'id': 'mesh-1', 'name': 'Mesh Node 1', 'ip': '192.168.1.10', 'x': 150, 'y': 400}, + {'id': 'mesh-2', 'name': 'Mesh Node 2', 'ip': '192.168.1.11', 'x': 350, 'y': 500}, + {'id': 'mesh-3', 'name': 'Mesh Node 3', 'ip': '192.168.1.12', 'x': 650, 'y': 400}, + ] + + for node in mesh_nodes: + node.update({ + 'type': 'mesh', + 'status': 'online', + 'logs': get_device_logs('mesh') + }) + devices.append(node) + + # NetworkBuster Services + services = [ + {'id': 'service-web', 'name': 'Web Server', 'port': 3000, 'x': 250, 'y': 300}, + {'id': 'service-api', 'name': 'API Server', 'port': 3001, 'x': 400, 'y': 200}, + {'id': 'service-audio', 'name': 'Audio Stream', 'port': 3002, 'x': 550, 'y': 300}, + {'id': 'service-mission', 'name': 'Mission Control', 'port': 5000, 'x': 400, 'y': 400}, + ] + + for service in services: + service.update({ + 'type': 'service', + 'ip': local_ip, + 'status': check_port_status(service['port']), + 'logs': get_service_logs(service['port']) + }) + devices.append(service) + + return devices + +def check_port_status(port): + """Check if a port is listening""" + for conn in psutil.net_connections(): + if conn.laddr.port == port and conn.status == 'LISTEN': + return 'online' + return 'offline' + +def get_system_logs(device_type): + """Get system logs for device""" + logs = [] + now = datetime.now().strftime("%H:%M:%S") + + if device_type == 'workstation': + cpu = psutil.cpu_percent(interval=0.1) + memory = psutil.virtual_memory().percent + disk = psutil.disk_usage('/').percent + + logs = [ + f"[{now}] CPU Usage: {cpu}%", + f"[{now}] Memory: {memory}%", + f"[{now}] Disk: {disk}%", + f"[{now}] OS: {platform.system()} {platform.release()}" + ] + + return logs + +def get_device_logs(device_type): + """Get logs for network devices""" + now = datetime.now().strftime("%H:%M:%S") + + if device_type == 'router': + return [ + f"[{now}] Router online", + f"[{now}] DHCP active", + f"[{now}] Firewall enabled", + f"[{now}] Clients: 8" + ] + elif device_type == 'networkbuster': + return [ + f"[{now}] NetworkBuster active", + f"[{now}] Services: 4/4 online", + f"[{now}] Port forwarding OK", + f"[{now}] DNS configured" + ] + elif device_type == 'mesh': + return [ + f"[{now}] Mesh connected", + f"[{now}] Signal: -45 dBm", + f"[{now}] Bandwidth: 2.4 Gbps", + f"[{now}] Encryption: WPA3" + ] + + return [] + +def get_service_logs(port): + """Get logs for NetworkBuster services""" + now = datetime.now().strftime("%H:%M:%S") + status = check_port_status(port) + + if status == 'online': + return [ + f"[{now}] Service running", + f"[{now}] Port {port} listening", + f"[{now}] Health check: OK", + f"[{now}] Requests: 142" + ] + else: + return [ + f"[{now}] Service offline", + f"[{now}] Port {port} not listening", + f"[{now}] Status: Inactive" + ] + +def get_git_status(): + """Get git repository status""" + try: + # Get current branch + branch = subprocess.check_output(['git', 'branch', '--show-current'], + stderr=subprocess.DEVNULL).decode().strip() + + # Get last commit + last_commit = subprocess.check_output(['git', 'log', '-1', '--oneline'], + stderr=subprocess.DEVNULL).decode().strip() + + # Get status + status = subprocess.check_output(['git', 'status', '--short'], + stderr=subprocess.DEVNULL).decode().strip() + + modified_files = len(status.split('\n')) if status else 0 + + return { + 'connected': True, + 'branch': branch, + 'last_commit': last_commit, + 'modified_files': modified_files + } + except: + return { + 'connected': False, + 'branch': 'unknown', + 'last_commit': 'No git repository', + 'modified_files': 0 + } + +def get_all_documentation(): + """Load all markdown documentation files""" + docs = [] + md_files = glob.glob('*.md') + glob.glob('**/*.md', recursive=True) + + for filepath in md_files[:20]: # Limit to first 20 files + try: + with open(filepath, 'r', encoding='utf-8') as f: + content = f.read() + # Get first 500 characters as preview + preview = content[:500] + '...' if len(content) > 500 else content + + docs.append({ + 'filename': os.path.basename(filepath), + 'path': filepath, + 'size': os.path.getsize(filepath), + 'preview': preview, + 'lines': len(content.split('\n')) + }) + except: + pass + + return docs + +# HTML Template with Google Maps-style effects +MAP_TEMPLATE = """ + + + + + + NetworkBuster Topology Map + + + + + +
    +

    ๐Ÿ—บ๏ธ NetworkBuster Topology Map

    +
    + + + +
    +
    + ๐Ÿ”— Git + + +
    +
    + +
    +
    +
    +
    +
    +
    +
    +
    +
    + +
    + + + + +
    + + + + + +
    +
    + +
    Zoom: 100%
    +
    X: 0 | Y: 0
    + +
    +
    ๐Ÿ“š
    +
    +

    ๐Ÿ“„ Documentation

    +

    Loading...

    +
    +
    +
    + +
    +
    ๐Ÿšช
    +
    +

    ๐Ÿšช Gateway Management

    + +
    +
    +
    + Click on a gateway device to manage it +
    +
    +
    + + + +
    +

    Device Types

    +
    + ๐Ÿ–ฅ๏ธ + Workstation +
    +
    + ๐ŸŒ + Router +
    +
    + ๏ฟฝ + Gateway (Click to manage) +
    +
    + ๏ฟฝ๐Ÿ“ก + Mesh Node +
    +
    + โšก + Service +
    +
    +
    + + + + +""" + +@app.route('/') +def index(): + """Serve the network map interface""" + return render_template_string(MAP_TEMPLATE) + +@app.route('/api/devices') +def api_devices(): + """Get all network devices with logs""" + devices = get_network_devices() + git_status = get_git_status() + + return jsonify({ + 'devices': devices, + 'git': git_status, + 'timestamp': datetime.now().isoformat() + }) + +@app.route('/api/docs') +def api_docs(): + """Get all documentation files""" + docs = get_all_documentation() + + return jsonify({ + 'docs': docs, + 'count': len(docs), + 'timestamp': datetime.now().isoformat() + }) + +@app.route('/api/logs/') +def api_device_logs(device_id): + """Get detailed logs for specific device""" + devices = get_network_devices() + device = next((d for d in devices if d['id'] == device_id), None) + + if device: + return jsonify({ + 'device': device, + 'logs': device['logs'], + 'timestamp': datetime.now().isoformat() + }) + else: + return jsonify({'error': 'Device not found'}), 404 + +@app.route('/api/gateways') +def api_gateways(): + """Get all gateway devices""" + devices = get_network_devices() + gateways = [d for d in devices if d.get('is_gateway', False)] + + return jsonify({ + 'gateways': gateways, + 'count': len(gateways), + 'timestamp': datetime.now().isoformat() + }) + +@app.route('/api/gateway//config', methods=['GET', 'POST']) +def api_gateway_config(gateway_id): + """Get or update gateway configuration""" + devices = get_network_devices() + gateways = [d for d in devices if d.get('is_gateway', False)] + gateway_configs = {g['id']: g.get('gateway_config', {}) for g in gateways} + + if request.method == 'GET': + config = gateway_configs.get(gateway_id, {}) + return jsonify({ + 'gateway_id': gateway_id, + 'config': config, + 'timestamp': datetime.now().isoformat() + }) + elif request.method == 'POST': + data = request.json + if gateway_id in gateway_configs: + gateway_configs[gateway_id].update(data) + return jsonify({ + 'success': True, + 'gateway_id': gateway_id, + 'config': gateway_configs[gateway_id] + }) + return jsonify({'error': 'Gateway not found'}), 404 + +@app.route('/api/gateway//action', methods=['POST']) +def api_gateway_action(gateway_id): + """Perform action on gateway (restart, reset, etc.)""" + data = request.json + action = data.get('action', '') + + devices = get_network_devices() + gateways = [d for d in devices if d.get('is_gateway', False)] + gateway_configs = {g['id']: g.get('gateway_config', {}) for g in gateways} + + if gateway_id not in gateway_configs: + return jsonify({'error': 'Gateway not found'}), 404 + + actions_log = { + 'restart': f'Gateway {gateway_id} restarted successfully', + 'reset': f'Gateway {gateway_id} reset to factory defaults', + 'update_firmware': f'Gateway {gateway_id} firmware update initiated', + 'toggle_firewall': f'Gateway {gateway_id} firewall toggled', + 'toggle_dhcp': f'Gateway {gateway_id} DHCP server toggled' + } + + message = actions_log.get(action, f'Unknown action: {action}') + + return jsonify({ + 'success': True, + 'gateway_id': gateway_id, + 'action': action, + 'message': message, + 'timestamp': datetime.now().isoformat() + }) + +@app.route('/health') +def health(): + """Health check endpoint""" + return jsonify({ + 'status': 'healthy', + 'service': 'network-map-viewer', + 'devices': len(get_network_devices()) + }) + +if __name__ == '__main__': + print(""" +โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•— +โ•‘ NetworkBuster - Network Topology Map Viewer โ•‘ +โ•‘ Live Device Monitoring with Log Thumbnails โ•‘ +โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• + """) + + print("๐Ÿ—บ๏ธ Starting Network Map Viewer on http://localhost:6000") + print("โšก Features:") + print(" โœ“ Interactive topology map") + print(" โœ“ Device thumbnails with live logs") + print(" โœ“ Real-time status monitoring") + print(" โœ“ Git integration status") + print(" โœ“ Auto-refresh every 5 seconds") + print(" โœ“ Device classification by type") + print(" โœ“ Satellite map integration") + print(" โœ“ Gateway management panel") + print("") + print("๐Ÿš€ Running production WSGI server (Waitress)...") + print("") + + from waitress import serve + serve(app, host='0.0.0.0', port=6000, threads=8, url_scheme='http') diff --git a/network_thumbnails/index.html b/network_thumbnails/index.html new file mode 100644 index 0000000..cbbc734 --- /dev/null +++ b/network_thumbnails/index.html @@ -0,0 +1,101 @@ + + + + + NetworkBuster Thumbnail Gallery + + + +
    +

    ๐ŸŒ NetworkBuster Thumbnail Gallery

    +

    Extracted Network Device Thumbnails

    +

    Generated: 2026-01-03 07:16:30

    +
    + + + \ No newline at end of file diff --git a/network_thumbnails/mesh-node-1.html b/network_thumbnails/mesh-node-1.html new file mode 100644 index 0000000..52aebf0 --- /dev/null +++ b/network_thumbnails/mesh-node-1.html @@ -0,0 +1,84 @@ + + + + + Mesh Node Alpha - Thumbnail + + + +
    +
    +
    ๐Ÿ“ก
    +
    +

    Mesh Node Alpha

    +

    Network

    +
    +
    +
    ONLINE
    + +
    + + \ No newline at end of file diff --git a/network_thumbnails/mesh-node-2.html b/network_thumbnails/mesh-node-2.html new file mode 100644 index 0000000..bbc0248 --- /dev/null +++ b/network_thumbnails/mesh-node-2.html @@ -0,0 +1,84 @@ + + + + + Mesh Node Beta - Thumbnail + + + +
    +
    +
    ๐Ÿ“ก
    +
    +

    Mesh Node Beta

    +

    Network

    +
    +
    +
    ONLINE
    + +
    + + \ No newline at end of file diff --git a/network_thumbnails/mesh-node-3.html b/network_thumbnails/mesh-node-3.html new file mode 100644 index 0000000..51a06a6 --- /dev/null +++ b/network_thumbnails/mesh-node-3.html @@ -0,0 +1,84 @@ + + + + + Mesh Node Gamma - Thumbnail + + + +
    +
    +
    ๐Ÿ“ก
    +
    +

    Mesh Node Gamma

    +

    Network

    +
    +
    +
    ONLINE
    + +
    + + \ No newline at end of file diff --git a/network_thumbnails/router-networkbuster.html b/network_thumbnails/router-networkbuster.html new file mode 100644 index 0000000..1759651 --- /dev/null +++ b/network_thumbnails/router-networkbuster.html @@ -0,0 +1,84 @@ + + + + + NetworkBuster Router - Thumbnail + + + +
    +
    +
    ๐Ÿ”ง
    +
    +

    NetworkBuster Router

    +

    Network

    +
    +
    +
    ONLINE
    + +
    + + \ No newline at end of file diff --git a/network_thumbnails/router-wifi7.html b/network_thumbnails/router-wifi7.html new file mode 100644 index 0000000..92a8feb --- /dev/null +++ b/network_thumbnails/router-wifi7.html @@ -0,0 +1,84 @@ + + + + + WiFi 7 Mesh Router - Thumbnail + + + +
    +
    +
    ๐ŸŒ
    +
    +

    WiFi 7 Mesh Router

    +

    Network

    +
    +
    +
    ONLINE
    + +
    + + \ No newline at end of file diff --git a/network_thumbnails/service-api.html b/network_thumbnails/service-api.html new file mode 100644 index 0000000..5467869 --- /dev/null +++ b/network_thumbnails/service-api.html @@ -0,0 +1,84 @@ + + + + + API Server (3001) - Thumbnail + + + +
    +
    +
    โšก
    +
    +

    API Server (3001)

    +

    Service

    +
    +
    +
    RUNNING
    + +
    + + \ No newline at end of file diff --git a/network_thumbnails/service-audio.html b/network_thumbnails/service-audio.html new file mode 100644 index 0000000..0dfa970 --- /dev/null +++ b/network_thumbnails/service-audio.html @@ -0,0 +1,84 @@ + + + + + Audio Stream (3002) - Thumbnail + + + +
    +
    +
    โšก
    +
    +

    Audio Stream (3002)

    +

    Service

    +
    +
    +
    RUNNING
    + +
    + + \ No newline at end of file diff --git a/network_thumbnails/service-mission.html b/network_thumbnails/service-mission.html new file mode 100644 index 0000000..c0297e8 --- /dev/null +++ b/network_thumbnails/service-mission.html @@ -0,0 +1,84 @@ + + + + + Mission Control (5000) - Thumbnail + + + +
    +
    +
    โšก
    +
    +

    Mission Control (5000)

    +

    Service

    +
    +
    +
    RUNNING
    + +
    + + \ No newline at end of file diff --git a/network_thumbnails/service-web.html b/network_thumbnails/service-web.html new file mode 100644 index 0000000..4d87d56 --- /dev/null +++ b/network_thumbnails/service-web.html @@ -0,0 +1,84 @@ + + + + + Web Server (3000) - Thumbnail + + + +
    +
    +
    โšก
    +
    +

    Web Server (3000)

    +

    Service

    +
    +
    +
    RUNNING
    + +
    + + \ No newline at end of file diff --git a/network_thumbnails/thumbnails.json b/network_thumbnails/thumbnails.json new file mode 100644 index 0000000..d02e59d --- /dev/null +++ b/network_thumbnails/thumbnails.json @@ -0,0 +1,67 @@ +{ + "generated": "2026-01-03T07:16:30.219592", + "version": "1.0.1", + "total_devices": 10, + "devices": { + "workstation": { + "icon": "\ud83d\udda5\ufe0f", + "name": "Primary Workstation", + "type": "Hardware", + "status": "online" + }, + "router-wifi7": { + "icon": "\ud83c\udf10", + "name": "WiFi 7 Mesh Router", + "type": "Network", + "status": "online" + }, + "router-networkbuster": { + "icon": "\ud83d\udd27", + "name": "NetworkBuster Router", + "type": "Network", + "status": "online" + }, + "mesh-node-1": { + "icon": "\ud83d\udce1", + "name": "Mesh Node Alpha", + "type": "Network", + "status": "online" + }, + "mesh-node-2": { + "icon": "\ud83d\udce1", + "name": "Mesh Node Beta", + "type": "Network", + "status": "online" + }, + "mesh-node-3": { + "icon": "\ud83d\udce1", + "name": "Mesh Node Gamma", + "type": "Network", + "status": "online" + }, + "service-web": { + "icon": "\u26a1", + "name": "Web Server (3000)", + "type": "Service", + "status": "running" + }, + "service-api": { + "icon": "\u26a1", + "name": "API Server (3001)", + "type": "Service", + "status": "running" + }, + "service-audio": { + "icon": "\u26a1", + "name": "Audio Stream (3002)", + "type": "Service", + "status": "running" + }, + "service-mission": { + "icon": "\u26a1", + "name": "Mission Control (5000)", + "type": "Service", + "status": "running" + } + } +} \ No newline at end of file diff --git a/network_thumbnails/workstation.html b/network_thumbnails/workstation.html new file mode 100644 index 0000000..bebdc07 --- /dev/null +++ b/network_thumbnails/workstation.html @@ -0,0 +1,84 @@ + + + + + Primary Workstation - Thumbnail + + + +
    +
    +
    ๐Ÿ–ฅ๏ธ
    +
    +

    Primary Workstation

    +

    Hardware

    +
    +
    +
    ONLINE
    + +
    + + \ No newline at end of file diff --git a/networkbuster.egg-info/PKG-INFO b/networkbuster.egg-info/PKG-INFO new file mode 100644 index 0000000..3dd606d --- /dev/null +++ b/networkbuster.egg-info/PKG-INFO @@ -0,0 +1,41 @@ +Metadata-Version: 2.4 +Name: networkbuster +Version: 1.0.1 +Summary: NetworkBuster - Complete Network Management Suite +Home-page: https://networkbuster.net +Author: NetworkBuster Team +Author-email: admin@networkbuster.net +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: System Administrators +Classifier: Topic :: System :: Networking :: Monitoring +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3.14 +Classifier: Operating System :: Microsoft :: Windows +Requires-Python: >=3.10 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: LICENSE.txt +Requires-Dist: flask>=3.0.0 +Requires-Dist: flask-cors>=4.0.0 +Requires-Dist: requests>=2.31.0 +Requires-Dist: psutil>=5.9.0 +Requires-Dist: schedule>=1.2.0 +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: license-file +Dynamic: requires-dist +Dynamic: requires-python +Dynamic: summary + + +NetworkBuster is a comprehensive network management suite featuring: +- Real-time network monitoring and topology mapping +- API endpoint tracing and performance analysis +- Mission control dashboard +- Audio streaming server +- Universal launcher with scheduled deployment +- Maximum power production optimization diff --git a/networkbuster.egg-info/SOURCES.txt b/networkbuster.egg-info/SOURCES.txt new file mode 100644 index 0000000..7766a33 --- /dev/null +++ b/networkbuster.egg-info/SOURCES.txt @@ -0,0 +1,11 @@ +LICENSE +LICENSE.txt +README.md +setup.py +networkbuster.egg-info/PKG-INFO +networkbuster.egg-info/SOURCES.txt +networkbuster.egg-info/dependency_links.txt +networkbuster.egg-info/entry_points.txt +networkbuster.egg-info/not-zip-safe +networkbuster.egg-info/requires.txt +networkbuster.egg-info/top_level.txt \ No newline at end of file diff --git a/networkbuster.egg-info/dependency_links.txt b/networkbuster.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/networkbuster.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/networkbuster.egg-info/entry_points.txt b/networkbuster.egg-info/entry_points.txt new file mode 100644 index 0000000..77bf8b8 --- /dev/null +++ b/networkbuster.egg-info/entry_points.txt @@ -0,0 +1,4 @@ +[console_scripts] +networkbuster = networkbuster_launcher:main +networkbuster-map = network_map_viewer:main +networkbuster-tracer = api_tracer:main diff --git a/networkbuster.egg-info/not-zip-safe b/networkbuster.egg-info/not-zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/networkbuster.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/networkbuster.egg-info/requires.txt b/networkbuster.egg-info/requires.txt new file mode 100644 index 0000000..fb70120 --- /dev/null +++ b/networkbuster.egg-info/requires.txt @@ -0,0 +1,5 @@ +flask>=3.0.0 +flask-cors>=4.0.0 +requests>=2.31.0 +psutil>=5.9.0 +schedule>=1.2.0 diff --git a/networkbuster.egg-info/top_level.txt b/networkbuster.egg-info/top_level.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/networkbuster.egg-info/top_level.txt @@ -0,0 +1 @@ + diff --git a/networkbuster_ai.py b/networkbuster_ai.py new file mode 100644 index 0000000..55b42d6 --- /dev/null +++ b/networkbuster_ai.py @@ -0,0 +1,1761 @@ +""" +NetworkBuster AI - Intelligent Network Assistant +AI-powered diagnostics, monitoring, and automation for NetworkBuster +""" + +import os +from flask import Flask, render_template_string, request, jsonify +from flask_cors import CORS +import psutil +import socket +from datetime import datetime +import json + +app = Flask(__name__) +CORS(app) + +# AI Signal Monitor Template - Read-only window +AI_SIGNAL_MONITOR = """ + + + + + + AI Signal Monitor - Home Base + + + +
    +
    +
    +

    ๐Ÿ›ฐ๏ธ AI SIGNAL MONITOR - HOME BASE

    +
    NetworkBuster Intelligence Feed
    +
    +
    + READ ONLY +
    +
    + +
    +
    +

    ๐Ÿ”ด SIGNAL STATUS

    +
    + Home Base Connection: + ACTIVE +
    +
    + Signal Strength: + 100% +
    +
    + AI Engine: + ONLINE +
    +
    + Last Update: + --:--:-- +
    +
    + +
    +

    ๐Ÿ›ก๏ธ SECURITY STATUS

    +
    + Devices Monitored: + 0 +
    +
    + Active Threats: + 0 +
    +
    + Blocked Devices: + 0 +
    +
    + Threat Level: + LOW +
    +
    + +
    +

    ๐Ÿ“Š SYSTEM METRICS

    +
    + CPU Usage: + 0% +
    +
    + Memory Usage: + 0% +
    +
    + Active Services: + 0/8 +
    +
    + Network Connections: + 0 +
    +
    + +
    +

    ๐Ÿ“š HISTORICAL LIBRARY

    +
    + Total Devices: + 0 +
    +
    + Tagged Devices: + 0 +
    +
    + Reputation Scores: + 0 +
    +
    + Serialization Attempts: + 0 +
    +
    +
    + + + +
    +
    ๐Ÿ“ก REAL-TIME ACTIVITY FEED:
    +
    +
    + + + + +""" + +# NetworkBuster AI Template +NBAI_TEMPLATE = """ + + + + + + NetworkBuster AI Assistant + + + +
    +
    +

    ๐Ÿง  NetworkBuster AI

    +

    Intelligent Network Assistant โ€ข Powered by NetworkBuster

    +
    + +
    +
    +
    + AI ONLINE +
    +
    + --:--:-- +
    +
    + 7 Services Active +
    +
    + +
    +
    +
    +
    NetworkBuster AI
    +
    +
    +
    + ๐Ÿ‘‹ Welcome to NetworkBuster AI!

    + I'm your intelligent network assistant, ready to help with:

    + โ€ข system status - Check all services and ports
    + โ€ข network scan - Analyze network topology
    + โ€ข diagnose - Troubleshoot connectivity issues
    + โ€ข optimize - Performance tuning recommendations
    + โ€ข security check - Security analysis
    + โ€ข health report - Comprehensive system health

    + Type any command or ask a question to get started! ๐Ÿš€ +
    +
    +
    + +
    + NetworkBuster AI is thinking +
    +
    +
    +
    + +
    + + +
    +
    + + + + +""" + +# AI Intelligence Engine with Advanced Indexing +class NetworkBusterAI: + def __init__(self): + # Service index for O(1) lookups + self.services = [ + {'name': 'Web Server', 'port': 3000, 'type': 'node', 'critical': True}, + {'name': 'API Server', 'port': 3001, 'type': 'node', 'critical': True}, + {'name': 'Audio Stream', 'port': 3002, 'type': 'node', 'critical': False}, + {'name': 'NetworkBuster AI', 'port': 4000, 'type': 'python', 'critical': True}, + {'name': 'Mission Control', 'port': 5000, 'type': 'python', 'critical': True}, + {'name': 'Network Map', 'port': 6000, 'type': 'python', 'critical': False}, + {'name': 'Universal Launcher', 'port': 7000, 'type': 'python', 'critical': False}, + {'name': 'API Tracer', 'port': 8000, 'type': 'python', 'critical': False}, + ] + + # Build indexed lookups for supercomputer-speed performance + self.port_index = {svc['port']: svc for svc in self.services} + self.name_index = {svc['name'].lower(): svc for svc in self.services} + self.type_index = {} + for svc in self.services: + if svc['type'] not in self.type_index: + self.type_index[svc['type']] = [] + self.type_index[svc['type']].append(svc) + + # Performance metrics cache + self.metrics_cache = {} + self.cache_timestamp = 0 + + # Security: Microdevice tracking and barrier system + self.device_fingerprints = {} # Track known devices + self.serialization_attempts = [] # Log suspicious activity + self.blocked_devices = set() # Blacklist for threats + self.connection_history = {} # Connection pattern analysis + self.threat_score_index = {} # Real-time threat scoring + + # Historical Device Library System + self.device_library = {} # Persistent device database + self.device_tags = {} # Device categorization (trusted, suspicious, unknown, threat) + self.device_reputation = {} # Long-term reputation scores + self.library_file = 'networkbuster_device_library.json' + self.load_device_library() # Load existing historical data + + # Conversation History System + self.conversation_history = [] # Store all Q&A exchanges + self.conversation_file = 'networkbuster_conversations.json' + self.load_conversation_history() + + def check_port(self, port): + """Check if a port is listening""" + for conn in psutil.net_connections(): + if conn.laddr.port == port and conn.status == 'LISTEN': + return True + return False + + def load_device_library(self): + """Load historical device library from persistent storage""" + try: + if os.path.exists(self.library_file): + with open(self.library_file, 'r') as f: + data = json.load(f) + self.device_library = data.get('devices', {}) + self.device_tags = data.get('tags', {}) + self.device_reputation = data.get('reputation', {}) + print(f"๐Ÿ“š Loaded {len(self.device_library)} devices from historical library") + else: + print("๐Ÿ“š Creating new device library") + except Exception as e: + print(f"โš ๏ธ Error loading library: {e}") + + def load_conversation_history(self): + """Load conversation history from persistent storage""" + try: + if os.path.exists(self.conversation_file): + with open(self.conversation_file, 'r') as f: + data = json.load(f) + self.conversation_history = data.get('conversations', []) + print(f"๐Ÿ’ฌ Loaded {len(self.conversation_history)} conversation exchanges") + else: + print("๐Ÿ’ฌ Creating new conversation history") + except Exception as e: + print(f"โš ๏ธ Error loading conversations: {e}") + + def save_conversation(self, user_message, ai_response): + """Save conversation exchange to history""" + try: + conversation_entry = { + 'timestamp': datetime.now().isoformat(), + 'user_message': user_message, + 'ai_response': ai_response, + 'session': datetime.now().strftime('%Y-%m-%d') + } + self.conversation_history.append(conversation_entry) + + # Save to file + data = { + 'conversations': self.conversation_history[-1000:], # Keep last 1000 + 'total_exchanges': len(self.conversation_history), + 'last_updated': datetime.now().isoformat() + } + with open(self.conversation_file, 'w') as f: + json.dump(data, f, indent=2) + return True + except Exception as e: + print(f"โš ๏ธ Error saving conversation: {e}") + return False + + def save_device_library(self): + """Save device library to persistent storage""" + try: + data = { + 'devices': self.device_library, + 'tags': self.device_tags, + 'reputation': self.device_reputation, + 'last_updated': datetime.now().isoformat(), + 'total_devices': len(self.device_library) + } + with open(self.library_file, 'w') as f: + json.dump(data, f, indent=2) + return True + except Exception as e: + print(f"โš ๏ธ Error saving library: {e}") + return False + + def tag_device(self, device_ip, tag, reason=''): + """Tag a device with category (trusted, suspicious, unknown, threat, blocked)""" + valid_tags = ['trusted', 'suspicious', 'unknown', 'threat', 'blocked', 'internal'] + if tag not in valid_tags: + tag = 'unknown' + + self.device_tags[device_ip] = { + 'tag': tag, + 'reason': reason, + 'timestamp': time.time(), + 'tagged_at': datetime.now().isoformat() + } + + # Update device library entry + if device_ip not in self.device_library: + self.device_library[device_ip] = { + 'first_seen': datetime.now().isoformat(), + 'total_connections': 0, + 'threat_events': [], + 'ports_accessed': [] + } + + self.device_library[device_ip]['tag'] = tag + self.device_library[device_ip]['tag_reason'] = reason + self.device_library[device_ip]['last_updated'] = datetime.now().isoformat() + + # Auto-save after tagging + self.save_device_library() + return True + + def update_device_reputation(self, device_ip, score_delta, event_type=''): + """Update device reputation score based on behavior""" + if device_ip not in self.device_reputation: + self.device_reputation[device_ip] = { + 'score': 50, # Start neutral (0-100 scale) + 'history': [] + } + + # Update score + current_score = self.device_reputation[device_ip]['score'] + new_score = max(0, min(100, current_score + score_delta)) + self.device_reputation[device_ip]['score'] = new_score + + # Log event + self.device_reputation[device_ip]['history'].append({ + 'timestamp': datetime.now().isoformat(), + 'event': event_type, + 'score_change': score_delta, + 'new_score': new_score + }) + + # Auto-tag based on reputation + if new_score >= 80: + self.tag_device(device_ip, 'trusted', f'High reputation: {new_score}') + elif new_score <= 20: + self.tag_device(device_ip, 'threat', f'Low reputation: {new_score}') + elif new_score <= 40: + self.tag_device(device_ip, 'suspicious', f'Poor reputation: {new_score}') + + return new_score + + def get_device_history(self, device_ip): + """Get complete historical data for a device""" + if device_ip not in self.device_library: + return None + + device_data = self.device_library[device_ip].copy() + device_data['tag_info'] = self.device_tags.get(device_ip, {'tag': 'unknown'}) + device_data['reputation'] = self.device_reputation.get(device_ip, {'score': 50}) + device_data['current_threat_score'] = self.threat_score_index.get(device_ip, 0) + device_data['is_blocked'] = device_ip in self.blocked_devices + + return device_data + + def get_system_status(self): + """Get comprehensive system status""" + status = [] + for service in self.services: + is_active = self.check_port(service['port']) + status.append({ + 'name': service['name'], + 'port': service['port'], + 'active': is_active, + 'type': service.get('type'), + 'critical': service.get('critical') + }) + return status + + def detect_microdevices(self): + """Detect and analyze microdevice serialization attempts with AI barrier""" + import time + from collections import defaultdict + + start_time = time.perf_counter() + + # Get all network connections + connections = psutil.net_connections(kind='inet') + + # Index devices by remote address for O(1) lookups + device_index = defaultdict(list) + + for conn in connections: + if hasattr(conn, 'raddr') and conn.raddr: + remote_ip = conn.raddr.ip + remote_port = conn.raddr.port + + # Build device fingerprint + fingerprint = { + 'ip': remote_ip, + 'port': remote_port, + 'local_port': conn.laddr.port, + 'status': conn.status, + 'pid': conn.pid + } + + device_index[remote_ip].append(fingerprint) + + # Update connection history for pattern analysis + if remote_ip not in self.connection_history: + self.connection_history[remote_ip] = { + 'first_seen': time.time(), + 'connection_count': 0, + 'ports_accessed': set() + } + + self.connection_history[remote_ip]['connection_count'] += 1 + self.connection_history[remote_ip]['ports_accessed'].add(conn.laddr.port) + self.connection_history[remote_ip]['last_seen'] = time.time() + + # Update historical device library + if remote_ip not in self.device_library: + self.device_library[remote_ip] = { + 'first_seen': datetime.now().isoformat(), + 'total_connections': 0, + 'threat_events': [], + 'ports_accessed': [] + } + + self.device_library[remote_ip]['total_connections'] += 1 + self.device_library[remote_ip]['last_seen'] = datetime.now().isoformat() + if conn.laddr.port not in self.device_library[remote_ip]['ports_accessed']: + self.device_library[remote_ip]['ports_accessed'].append(conn.laddr.port) + + # Auto-tag localhost/internal devices + if remote_ip.startswith('127.') or remote_ip.startswith('192.168.') or remote_ip.startswith('10.'): + if remote_ip not in self.device_tags: + self.tag_device(remote_ip, 'internal', 'Internal network device') + + # AI-powered threat analysis with indexed pattern matching + threats_detected = [] + for device_ip, device_conns in device_index.items(): + threat_score = 0 + reasons = [] + + # Pattern 1: Port scanning (serialization reconnaissance) + unique_ports = len(set(c['local_port'] for c in device_conns)) + if unique_ports > 10: + threat_score += 50 + reasons.append(f"๐Ÿ” Port scanning: {unique_ports} ports") + + # Pattern 2: Rapid serialization attempts + if len(device_conns) > 20: + threat_score += 40 + reasons.append(f"โšก Serialization attack: {len(device_conns)} attempts") + + # Pattern 3: Critical service targeting + critical_ports = [3000, 3001, 4000, 5000] + critical_accessed = [c for c in device_conns if c['local_port'] in critical_ports] + if len(critical_accessed) > 3: + threat_score += 30 + reasons.append("๐ŸŽฏ Critical service targeting") + + # Pattern 4: Historical suspicious behavior + if device_ip in self.connection_history: + hist = self.connection_history[device_ip] + if hist['connection_count'] > 100: + threat_score += 20 + reasons.append(f"๐Ÿ“Š High activity: {hist['connection_count']} total") + + # Store threat score in indexed structure + self.threat_score_index[device_ip] = threat_score + + # AI barrier decision: Block or monitor + if threat_score >= 70: + threats_detected.append({ + 'ip': device_ip, + 'threat_score': threat_score, + 'connections': len(device_conns), + 'unique_ports': unique_ports, + 'reasons': reasons, + 'status': 'BLOCKED', + 'action': 'Barrier activated', + 'tag': self.device_tags.get(device_ip, {}).get('tag', 'unknown') + }) + self.blocked_devices.add(device_ip) + self.serialization_attempts.append({ + 'timestamp': time.time(), + 'ip': device_ip, + 'score': threat_score + }) + # Tag as threat and update reputation + self.tag_device(device_ip, 'blocked', f'Threat score: {threat_score}') + self.update_device_reputation(device_ip, -30, 'High threat detected') + # Log threat event + if device_ip in self.device_library: + self.device_library[device_ip]['threat_events'].append({ + 'timestamp': datetime.now().isoformat(), + 'threat_score': threat_score, + 'reasons': reasons, + 'action': 'blocked' + }) + elif threat_score >= 40: + threats_detected.append({ + 'ip': device_ip, + 'threat_score': threat_score, + 'connections': len(device_conns), + 'unique_ports': unique_ports, + 'reasons': reasons, + 'status': 'WARNING', + 'action': 'Monitoring enabled', + 'tag': self.device_tags.get(device_ip, {}).get('tag', 'unknown') + }) + # Tag as suspicious and update reputation + self.tag_device(device_ip, 'suspicious', f'Warning level threat: {threat_score}') + self.update_device_reputation(device_ip, -10, 'Suspicious activity') + else: + # Good behavior - increase reputation + if device_ip in self.device_library: + self.update_device_reputation(device_ip, 1, 'Normal activity') + + analysis_time = (time.perf_counter() - start_time) * 1000 + + # Save updated library after analysis + self.save_device_library() + + return { + 'total_devices': len(device_index), + 'threats_detected': threats_detected, + 'blocked_count': len(self.blocked_devices), + 'blocked_devices': list(self.blocked_devices), + 'total_attempts_logged': len(self.serialization_attempts), + 'library_size': len(self.device_library), + 'tagged_devices': len(self.device_tags), + 'analysis_time_ms': round(analysis_time, 3) + } + + def analyze_network(self): + """Analyze network connections with supercomputer-grade indexing""" + import time + start_time = time.perf_counter() + + connections = psutil.net_connections() + + # Build indexed analysis structures + status_index = {} + port_usage = {} + protocol_index = {'TCP': 0, 'UDP': 0} + + for conn in connections: + # Index by status + status = conn.status + status_index[status] = status_index.get(status, 0) + 1 + + # Index port usage + if hasattr(conn.laddr, 'port'): + port = conn.laddr.port + if port in self.port_index: + port_usage[port] = port_usage.get(port, 0) + 1 + + # Protocol analysis + if conn.type == 1: # SOCK_STREAM = TCP + protocol_index['TCP'] += 1 + elif conn.type == 2: # SOCK_DGRAM = UDP + protocol_index['UDP'] += 1 + + analysis_time = (time.perf_counter() - start_time) * 1000 # Convert to ms + + return { + 'total_connections': len(connections), + 'status_breakdown': status_index, + 'listening_ports': status_index.get('LISTEN', 0), + 'established': status_index.get('ESTABLISHED', 0), + 'port_usage': port_usage, + 'protocol_distribution': protocol_index, + 'analysis_time_ms': round(analysis_time, 3) + } + + def get_system_health(self): + """Get comprehensive system health with supercomputer-grade metrics""" + import time + start_time = time.perf_counter() + + # Check cache (5 second TTL) + current_time = time.time() + if 'health' in self.metrics_cache and (current_time - self.cache_timestamp) < 5: + return self.metrics_cache['health'] + + # CPU metrics with per-core analysis + cpu_percent = psutil.cpu_percent(interval=0.5, percpu=False) + cpu_cores = psutil.cpu_count(logical=False) + cpu_threads = psutil.cpu_count(logical=True) + cpu_freq = psutil.cpu_freq() + + # Memory analysis + memory = psutil.virtual_memory() + swap = psutil.swap_memory() + + # Disk I/O metrics + disk = psutil.disk_usage('/') + disk_io = psutil.disk_io_counters() + + # Network I/O metrics + net_io = psutil.net_io_counters() + + # Process count and load + process_count = len(psutil.pids()) + load_avg = psutil.getloadavg() if hasattr(psutil, 'getloadavg') else (0, 0, 0) + + analysis_time = (time.perf_counter() - start_time) * 1000 + + health_data = { + 'cpu_usage': round(cpu_percent, 2), + 'cpu_cores': cpu_cores, + 'cpu_threads': cpu_threads, + 'cpu_frequency_mhz': round(cpu_freq.current, 2) if cpu_freq else 0, + 'memory_usage': round(memory.percent, 2), + 'memory_total_gb': round(memory.total / (1024**3), 2), + 'memory_available_gb': round(memory.available / (1024**3), 2), + 'memory_used_gb': round(memory.used / (1024**3), 2), + 'swap_usage': round(swap.percent, 2), + 'swap_total_gb': round(swap.total / (1024**3), 2), + 'disk_usage': round(disk.percent, 2), + 'disk_total_gb': round(disk.total / (1024**3), 2), + 'disk_free_gb': round(disk.free / (1024**3), 2), + 'disk_read_mb': round(disk_io.read_bytes / (1024**2), 2) if disk_io else 0, + 'disk_write_mb': round(disk_io.write_bytes / (1024**2), 2) if disk_io else 0, + 'network_sent_mb': round(net_io.bytes_sent / (1024**2), 2), + 'network_recv_mb': round(net_io.bytes_recv / (1024**2), 2), + 'process_count': process_count, + 'load_average': [round(l, 2) for l in load_avg], + 'analysis_time_ms': round(analysis_time, 3) + } + + # Cache results + self.metrics_cache['health'] = health_data + self.cache_timestamp = current_time + + return health_data + + def process_query(self, query): + """Process user query and generate intelligent response""" + query_lower = query.lower() + + # System status command with supercomputer analysis + if 'status' in query_lower or 'services' in query_lower: + import time + scan_start = time.perf_counter() + + status = self.get_system_status() + active = [s for s in status if s['active']] + inactive = [s for s in status if not s['active']] + critical_down = [s for s in inactive if s.get('critical', False)] + + scan_time = (time.perf_counter() - scan_start) * 1000 + + response = f"๐Ÿ“Š SUPERCOMPUTER SYSTEM ANALYSIS
    " + response += f"Scan completed in {scan_time:.3f}ms

    " + response += f"โœ… SERVICE STATUS: {len(active)}/{len(status)} OPERATIONAL

    " + + # Critical services check + if critical_down: + response += "โš ๏ธ CRITICAL SERVICES DOWN:
    " + for s in critical_down: + response += f"โ€ข {s['name']} (Port {s['port']}) - CRITICAL OFFLINE
    " + response += "
    " + + # Active services by type + if active: + response += "๐ŸŸข ACTIVE SERVICES:
    " + node_services = [s for s in active if s.get('type') == 'node'] + python_services = [s for s in active if s.get('type') == 'python'] + + if node_services: + response += "Node.js Services:
    " + for s in node_services: + response += f"โ€ข {s['name']} โ†’ Port {s['port']} โ†’ ONLINE
    " + + if python_services: + response += "Python Services:
    " + for s in python_services: + critical_badge = " ๐Ÿ”ด" if s.get('critical') else "" + response += f"โ€ข {s['name']} โ†’ Port {s['port']} โ†’ ONLINE{critical_badge}
    " + + if inactive and not critical_down: + response += f"
    โšช INACTIVE SERVICES ({len(inactive)}):
    " + for s in inactive: + response += f"โ€ข {s['name']} (Port {s['port']}) - Standby
    " + + response += f"
    Index lookup time: O(1) constant time" + return response + + # Network scan command with supercomputer analysis + elif 'network' in query_lower and ('scan' in query_lower or 'analyze' in query_lower): + net = self.analyze_network() + response = f"๐ŸŒ SUPERCOMPUTER NETWORK ANALYSIS
    " + response += f"Analysis time: {net['analysis_time_ms']}ms

    " + + response += f"๐Ÿ“Š CONNECTION METRICS:
    " + response += f"โ€ข Total Connections: {net['total_connections']}
    " + response += f"โ€ข Listening Ports: {net['listening_ports']}
    " + response += f"โ€ข Established: {net['established']}

    " + + response += f"๐Ÿ”Œ PROTOCOL DISTRIBUTION:
    " + for protocol, count in net['protocol_distribution'].items(): + response += f"โ€ข {protocol}: {count} connections
    " + + if net.get('status_breakdown'): + response += f"
    ๐Ÿ“ CONNECTION STATUS INDEX:
    " + for status, count in sorted(net['status_breakdown'].items(), key=lambda x: -x[1])[:5]: + response += f"โ€ข {status}: {count}
    " + + if net.get('port_usage'): + response += f"
    ๐Ÿ”Œ SERVICE PORT USAGE:
    " + for port, count in sorted(net['port_usage'].items()): + service_name = self.port_index.get(port, {}).get('name', 'Unknown') + response += f"โ€ข Port {port} ({service_name}): {count} connections
    " + + response += f"
    Indexed lookup performance: O(1) hash table" + return response + + # Health report command with supercomputer metrics + elif 'health' in query_lower or 'performance' in query_lower: + health = self.get_system_health() + response = f"๐Ÿ’ช SUPERCOMPUTER HEALTH ANALYSIS
    " + response += f"Metrics cached | Query time: {health['analysis_time_ms']}ms

    " + + response += f"โšก CPU METRICS:
    " + response += f"โ€ข Usage: {health['cpu_usage']}%
    " + response += f"โ€ข Cores: {health['cpu_cores']} physical / {health['cpu_threads']} logical
    " + response += f"โ€ข Frequency: {health['cpu_frequency_mhz']} MHz

    " + + response += f"๐Ÿ’พ MEMORY ANALYSIS:
    " + response += f"โ€ข RAM Usage: {health['memory_usage']}%
    " + response += f"โ€ข Used: {health['memory_used_gb']} GB / Total: {health['memory_total_gb']} GB
    " + response += f"โ€ข Available: {health['memory_available_gb']} GB
    " + response += f"โ€ข Swap: {health['swap_usage']}% ({health['swap_total_gb']} GB)

    " + + response += f"๐Ÿ’ฟ DISK I/O METRICS:
    " + response += f"โ€ข Usage: {health['disk_usage']}%
    " + response += f"โ€ข Free Space: {health['disk_free_gb']} GB / {health['disk_total_gb']} GB
    " + response += f"โ€ข Read: {health['disk_read_mb']} MB | Write: {health['disk_write_mb']} MB

    " + + response += f"๐ŸŒ NETWORK I/O:
    " + response += f"โ€ข Sent: {health['network_sent_mb']} MB
    " + response += f"โ€ข Received: {health['network_recv_mb']} MB

    " + + response += f"๐Ÿ“Š SYSTEM LOAD:
    " + response += f"โ€ข Active Processes: {health['process_count']}
    " + if health['load_average'][0] > 0: + response += f"โ€ข Load Average: {health['load_average'][0]} (1m) / {health['load_average'][1]} (5m) / {health['load_average'][2]} (15m)

    " + else: + response += "
    " + + # Performance assessment + if health['cpu_usage'] > 80: + response += "โš ๏ธ High CPU usage - Consider closing unused applications
    " + if health['memory_usage'] > 80: + response += "โš ๏ธ High memory pressure - Services may need restart
    " + if health['disk_usage'] > 90: + response += "โš ๏ธ Low disk space - Run cleanup operations
    " + + if health['cpu_usage'] < 60 and health['memory_usage'] < 70: + response += "โœ… System operating at optimal performance!
    " + + response += f"
    Metrics cached with 5s TTL for performance" + return response + + # Diagnose command + elif 'diagnose' in query_lower or 'troubleshoot' in query_lower: + status = self.get_system_status() + inactive = [s for s in status if not s['active']] + + response = f"๐Ÿ” Diagnostic Report

    " + + if not inactive: + response += "โœ… All services are running correctly!

    " + response += "No issues detected. System is healthy." + else: + response += f"โš ๏ธ {len(inactive)} Service(s) Not Running

    " + response += "Recommended Actions:
    " + response += "1. Run AUTOSTART.bat to start all services
    " + response += "2. Or use nb-start PowerShell command
    " + response += "3. Check logs for any error messages

    " + response += "Inactive Services:
    " + for s in inactive: + response += f"โ€ข {s['name']} (Port {s['port']})
    " + + return response + + # Optimize command + elif 'optimize' in query_lower or 'performance' in query_lower: + health = self.get_system_health() + response = f"โšก Optimization Recommendations

    " + response += "Performance Tuning:
    " + response += "โ€ข Run nb-autostart for boot optimization
    " + response += "โ€ข Enable high-performance power plan
    " + response += "โ€ข Close unused background applications
    " + response += "โ€ข Clear browser cache and temporary files
    " + response += "โ€ข Run flash_git_backup.py to free space

    " + + if health['cpu_usage'] < 50 and health['memory_usage'] < 50: + response += "โœ… System resources are well-balanced!" + else: + response += "๐Ÿ’ก Consider restarting services during low-usage periods." + + return response + + # Security check command with microdevice barrier analysis + elif 'security' in query_lower or 'secure' in query_lower or 'device' in query_lower or 'threat' in query_lower: + # Run microdevice detection + device_scan = self.detect_microdevices() + + response = f"๐Ÿ”’ AI SECURITY BARRIER ANALYSIS
    " + response += f"Deep scan completed in {device_scan['analysis_time_ms']}ms

    " + + response += f"๐Ÿ›ก๏ธ MICRODEVICE DETECTION:
    " + response += f"โ€ข Total Devices Scanned: {device_scan['total_devices']}
    " + response += f"โ€ข Threats Detected: {len(device_scan['threats_detected'])}
    " + response += f"โ€ข Blocked Devices: {device_scan['blocked_count']}
    " + response += f"โ€ข Serialization Attempts Logged: {device_scan['total_attempts_logged']}
    " + response += f"โ€ข Historical Library Size: {device_scan['library_size']} devices
    " + response += f"โ€ข Tagged Devices: {device_scan['tagged_devices']}

    " + + # Show threat details + if device_scan['threats_detected']: + response += f"โš ๏ธ ACTIVE THREATS:
    " + for threat in device_scan['threats_detected'][:5]: # Top 5 threats + status_color = '#ef4444' if threat['status'] == 'BLOCKED' else '#f59e0b' + tag = threat.get('tag', 'unknown') + tag_color = {'blocked': '#ef4444', 'threat': '#dc2626', 'suspicious': '#f59e0b', + 'internal': '#3b82f6', 'trusted': '#22c55e', 'unknown': '#6b7280'}.get(tag, '#6b7280') + response += f"
    " + response += f"{threat['status']} | IP: {threat['ip']} " + response += f"| Tag: ๐Ÿท๏ธ {tag.upper()}
    " + response += f"Threat Score: {threat['threat_score']}/100
    " + response += f"Connections: {threat['connections']} | Ports: {threat['unique_ports']}
    " + response += f"Reasons:
    " + for reason in threat['reasons']: + response += f" โ€ข {reason}
    " + response += f"Action: {threat['action']}
    " + response += f"
    " + + if len(device_scan['threats_detected']) > 5: + response += f"...and {len(device_scan['threats_detected']) - 5} more threats
    " + else: + response += "โœ… NO THREATS DETECTED
    " + response += "All devices are within normal parameters.
    " + + response += "
    ๐Ÿ” SECURITY STATUS:
    " + response += "โ€ข AI Barrier: ACTIVE
    " + response += "โ€ข Pattern Recognition: ENABLED
    " + response += "โ€ข Real-time Monitoring: ONLINE
    " + response += "โ€ข Historical Device Library: TRACKING
    " + response += "โ€ข Device Tagging System: OPERATIONAL
    " + response += "โ€ข Reputation Scoring: ACTIVE
    " + response += "โ€ข Indexed Threat Database: O(1) lookup
    " + response += "โ€ข CORS Protection: ENABLED
    " + response += "โ€ข Localhost Isolation: ACTIVE

    " + + response += "๐Ÿ“‹ RECOMMENDATIONS:
    " + response += "โ€ข Monitor blocked devices regularly
    " + response += "โ€ข Run security check periodically
    " + response += "โ€ข Review historical device library
    " + response += "โ€ข Check device tags and reputation scores
    " + response += "โ€ข Review serialization attempt logs
    " + response += "โ€ข Keep firewall rules updated
    " + + response += f"
    AI barrier with persistent historical library" + return response + + # Help command + elif 'help' in query_lower or 'commands' in query_lower: + response = f"๐Ÿ“š NetworkBuster AI Commands

    " + response += "Available Commands:
    " + response += "โ€ข system status - Check all services with indexed analysis
    " + response += "โ€ข network scan - Analyze connections and protocols
    " + response += "โ€ข health report - Comprehensive system metrics
    " + response += "โ€ข security check - AI microdevice barrier scan ๐Ÿ›ก๏ธ
    " + response += "โ€ข diagnose - Troubleshoot issues
    " + response += "โ€ข optimize - Performance tips
    " + response += "โ€ข security check - Security analysis
    " + response += "โ€ข help - Show this message

    " + response += "You can also ask questions in natural language!" + return response + + # Default intelligent response + else: + response = f"๐Ÿค” I understand you're asking about: {query}

    " + response += "I'm NetworkBuster AI with Historical Device Library, specialized in:

    " + response += "โ€ข System status and service health
    " + response += "โ€ข Network analysis and connectivity
    " + response += "โ€ข Performance optimization
    " + response += "โ€ข Security with device tagging & tracking ๐Ÿท๏ธ
    " + response += "โ€ข Microdevice threat detection ๐Ÿ›ก๏ธ
    " + response += "โ€ข Historical pattern analysis ๐Ÿ“š

    " + response += "Type help to see all available commands!" + return response + +# Initialize AI with Historical Library +print("\n๐Ÿง  Initializing NetworkBuster AI with Historical Device Library...") +ai_engine = NetworkBusterAI() +print(f"โœ… AI Engine ready with {len(ai_engine.device_library)} historical devices\n") + +@app.route('/') +def index(): + """Render NetworkBuster AI interface""" + return render_template_string(NBAI_TEMPLATE) + +@app.route('/chat', methods=['POST']) +def chat(): + """Process chat messages""" + try: + data = request.json + user_message = data.get('message', '') + + if not user_message: + return jsonify({'error': 'No message provided'}), 400 + + # Process query through AI engine + response = ai_engine.process_query(user_message) + + # Save to conversation history + ai_engine.save_conversation(user_message, response) + + return jsonify({ + 'response': response, + 'timestamp': datetime.now().isoformat() + }) + + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@app.route('/api/nbai/status', methods=['GET']) +def status(): + """Get system status""" + try: + services = ai_engine.get_system_status() + health = ai_engine.get_system_health() + network = ai_engine.analyze_network() + + return jsonify({ + 'services': services, + 'health': health, + 'network': network, + 'timestamp': datetime.now().isoformat() + }) + + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@app.route('/api/nbai/diagnose', methods=['GET']) +def diagnose(): + """Run diagnostics""" + try: + status = ai_engine.get_system_status() + health = ai_engine.get_system_health() + + inactive_services = [s for s in status if not s['active']] + + issues = [] + if inactive_services: + issues.append(f"{len(inactive_services)} service(s) not running") + if health['cpu_usage'] > 80: + issues.append("High CPU usage") + if health['memory_usage'] > 80: + issues.append("High memory usage") + if health['disk_usage'] > 90: + issues.append("Low disk space") + + return jsonify({ + 'healthy': len(issues) == 0, + 'issues': issues, + 'inactive_services': inactive_services, + 'timestamp': datetime.now().isoformat() + }) + + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@app.route('/monitor') +def signal_monitor(): + """Render AI Signal Monitor - Read-only window""" + return render_template_string(AI_SIGNAL_MONITOR) + +@app.route('/history') +def conversation_history(): + """Render Conversation History page""" + with open('conversation_history_template.html', 'r', encoding='utf-8') as f: + template = f.read() + return template + +@app.route('/api/nbai/conversations', methods=['GET']) +def get_conversations(): + """Get all conversation history""" + try: + today = datetime.now().strftime('%Y-%m-%d') + today_count = sum(1 for conv in ai_engine.conversation_history + if conv.get('session') == today) + + sessions = set(conv.get('session') for conv in ai_engine.conversation_history) + + return jsonify({ + 'conversations': ai_engine.conversation_history, + 'total_count': len(ai_engine.conversation_history), + 'today_count': today_count, + 'session_count': len(sessions), + 'timestamp': datetime.now().isoformat() + }) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@app.route('/api/nbai/signal-status', methods=['GET']) +def signal_status(): + """Get AI signal status for home base monitoring""" + try: + import time + + # Get comprehensive status + status = ai_engine.get_system_status() + health = ai_engine.get_system_health() + + # Device and threat metrics + active_services = len([s for s in status if s['active']]) + + # Recent activity feed + recent_activity = [] + + # Check for new threats + if len(ai_engine.blocked_devices) > 0: + recent_activity.append({ + 'message': f'๐Ÿ›ก๏ธ {len(ai_engine.blocked_devices)} devices blocked by barrier', + 'type': 'threat' + }) + + # Check for new devices + new_devices = sum(1 for ip, data in ai_engine.device_library.items() + if 'last_seen' in data and + (datetime.now() - datetime.fromisoformat(data['last_seen'])).seconds < 10) + if new_devices > 0: + recent_activity.append({ + 'message': f'๐Ÿ“ก {new_devices} new device(s) detected', + 'type': 'normal' + }) + + # System health warnings + if health['cpu_usage'] > 80: + recent_activity.append({ + 'message': f'โš ๏ธ High CPU usage: {health["cpu_usage"]}%', + 'type': 'warning' + }) + + if health['memory_usage'] > 80: + recent_activity.append({ + 'message': f'โš ๏ธ High memory usage: {health["memory_usage"]}%', + 'type': 'warning' + }) + + # Build comprehensive signal data + signal_data = { + 'timestamp': datetime.now().isoformat(), + 'connection_status': 'ACTIVE', + 'signal_strength': 100, + 'ai_engine_online': True, + + # Security metrics + 'devices_monitored': len(ai_engine.connection_history), + 'active_threats': len([ip for ip, score in ai_engine.threat_score_index.items() if score >= 40]), + 'blocked_devices': len(ai_engine.blocked_devices), + + # System metrics + 'cpu_usage': round(health['cpu_usage'], 1), + 'memory_usage': round(health['memory_usage'], 1), + 'active_services': active_services, + 'network_connections': len(psutil.net_connections()), + + # Historical library + 'library_size': len(ai_engine.device_library), + 'tagged_devices': len(ai_engine.device_tags), + 'reputation_count': len(ai_engine.device_reputation), + 'serialization_attempts': len(ai_engine.serialization_attempts), + + # Activity feed + 'recent_activity': recent_activity[-10:] # Last 10 activities + } + + return jsonify(signal_data) + + except Exception as e: + return jsonify({ + 'error': str(e), + 'connection_status': 'ERROR', + 'signal_strength': 0 + }), 500 + +def main(): + """Start NetworkBuster AI server with Historical Device Library""" + print("\n" + "โ•" * 60) + print("โ•‘ NetworkBuster AI - Intelligent Network Assistant โ•‘") + print("โ•‘ with Historical Device Library & Threat Tagging โ•‘") + print("โ•" * 60) + print("\n๐Ÿง  AI Engine Status:") + print(f" ๐Ÿ“š Historical Library: {len(ai_engine.device_library)} devices tracked") + print(f" ๐Ÿท๏ธ Tagged Devices: {len(ai_engine.device_tags)}") + print(f" ๐Ÿ›ก๏ธ Blocked Threats: {len(ai_engine.blocked_devices)}") + print(f" ๐Ÿ“Š Reputation Scores: {len(ai_engine.device_reputation)} devices") + print(f"\n๐ŸŒ Server Details:") + print(f" Main Dashboard: http://localhost:4000") + print(f" Signal Monitor: http://localhost:4000/monitor ๐Ÿ“ก") + print(f" API Endpoint: http://localhost:4000/api/nbai/chat") + print(f" Library File: {ai_engine.library_file}") + print("\n๐Ÿ’ก Features:") + print(" โ€ข Interactive AI Chat Interface") + print(" โ€ข Read-Only Signal Monitor (Home Base Feed)") + print(" โ€ข Device Tracking & Tagging") + print(" โ€ข Real-Time Threat Detection") + print("\n๐Ÿ“ก Open /monitor for real-time signal feed to home base!") + print("โ•" * 60 + "\n") + + app.run(host='0.0.0.0', port=4000, debug=False) + +if __name__ == '__main__': + main() diff --git a/networkbuster_app.pyw b/networkbuster_app.pyw new file mode 100644 index 0000000..8bab9cf --- /dev/null +++ b/networkbuster_app.pyw @@ -0,0 +1,285 @@ +""" +NetworkBuster GUI Application Launcher +Silent launcher without console window +""" + +import os +import sys +import subprocess +import tkinter as tk +from tkinter import ttk, messagebox +from pathlib import Path +import threading +import webbrowser +from datetime import datetime + +# Ensure we're in the right directory +os.chdir(Path(__file__).parent) + +class NetworkBusterApp: + def __init__(self, root): + self.root = root + self.root.title("NetworkBuster Control Panel") + self.root.geometry("600x700") + self.root.resizable(False, False) + + # Configure style + self.style = ttk.Style() + self.style.theme_use('clam') + + # Header + header_frame = tk.Frame(root, bg="#1e1e1e", height=80) + header_frame.pack(fill=tk.X) + header_frame.pack_propagate(False) + + title_label = tk.Label( + header_frame, + text="NetworkBuster", + font=("Segoe UI", 24, "bold"), + bg="#1e1e1e", + fg="#00ff00" + ) + title_label.pack(pady=10) + + subtitle_label = tk.Label( + header_frame, + text="All-in-One Network Management Suite", + font=("Segoe UI", 10), + bg="#1e1e1e", + fg="#888888" + ) + subtitle_label.pack() + + # Main container + main_frame = tk.Frame(root, bg="#2d2d2d") + main_frame.pack(fill=tk.BOTH, expand=True, padx=20, pady=20) + + # Status section + status_frame = tk.LabelFrame( + main_frame, + text="System Status", + font=("Segoe UI", 11, "bold"), + bg="#2d2d2d", + fg="#ffffff", + padx=10, + pady=10 + ) + status_frame.pack(fill=tk.X, pady=(0, 15)) + + self.status_label = tk.Label( + status_frame, + text="โšช System Idle", + font=("Segoe UI", 10), + bg="#2d2d2d", + fg="#ffff00" + ) + self.status_label.pack() + + # Scheduled launch section + schedule_frame = tk.LabelFrame( + main_frame, + text="Scheduled Launch", + font=("Segoe UI", 11, "bold"), + bg="#2d2d2d", + fg="#ffffff", + padx=10, + pady=10 + ) + schedule_frame.pack(fill=tk.X, pady=(0, 15)) + + self.schedule_label = tk.Label( + schedule_frame, + text="๐Ÿ“… January 17, 2026 at 9:00 AM\nโฐ Countdown: 14 days", + font=("Segoe UI", 10), + bg="#2d2d2d", + fg="#ffffff" + ) + self.schedule_label.pack() + + # Quick actions + actions_frame = tk.LabelFrame( + main_frame, + text="Quick Actions", + font=("Segoe UI", 11, "bold"), + bg="#2d2d2d", + fg="#ffffff", + padx=10, + pady=10 + ) + actions_frame.pack(fill=tk.X, pady=(0, 15)) + + # Button grid + btn_frame = tk.Frame(actions_frame, bg="#2d2d2d") + btn_frame.pack(fill=tk.X) + + # Row 1 + self.create_button(btn_frame, "๐Ÿš€ Start All Services", self.start_services, 0, 0) + self.create_button(btn_frame, "๐Ÿ›‘ Stop All Services", self.stop_services, 0, 1) + + # Row 2 + self.create_button(btn_frame, "๐Ÿ“Š Check Status", self.check_status, 1, 0) + self.create_button(btn_frame, "โšก Max Power Mode", self.max_power, 1, 1) + + # Dashboards section + dash_frame = tk.LabelFrame( + main_frame, + text="Dashboards", + font=("Segoe UI", 11, "bold"), + bg="#2d2d2d", + fg="#ffffff", + padx=10, + pady=10 + ) + dash_frame.pack(fill=tk.X, pady=(0, 15)) + + dash_btn_frame = tk.Frame(dash_frame, bg="#2d2d2d") + dash_btn_frame.pack(fill=tk.X) + + self.create_button(dash_btn_frame, "๐ŸŽฎ Mission Control", lambda: self.open_url("http://localhost:5000"), 0, 0) + self.create_button(dash_btn_frame, "๐Ÿ” API Tracer", lambda: self.open_url("http://localhost:8000"), 0, 1) + self.create_button(dash_btn_frame, "๐Ÿ—บ๏ธ Network Map", lambda: self.open_url("http://localhost:6000"), 1, 0) + self.create_button(dash_btn_frame, "๐Ÿš€ Universal Launch", lambda: self.open_url("http://localhost:7000"), 1, 1) + + # Service status display + service_frame = tk.LabelFrame( + main_frame, + text="Service Monitor", + font=("Segoe UI", 11, "bold"), + bg="#2d2d2d", + fg="#ffffff", + padx=10, + pady=10 + ) + service_frame.pack(fill=tk.BOTH, expand=True) + + self.service_text = tk.Text( + service_frame, + height=10, + font=("Consolas", 9), + bg="#1e1e1e", + fg="#00ff00", + insertbackground="#00ff00", + relief=tk.FLAT + ) + self.service_text.pack(fill=tk.BOTH, expand=True) + self.service_text.insert("1.0", "Click 'Check Status' to view service information...") + + # Auto-refresh status + self.refresh_status() + + def create_button(self, parent, text, command, row, col): + btn = tk.Button( + parent, + text=text, + command=command, + font=("Segoe UI", 10, "bold"), + bg="#0078d4", + fg="#ffffff", + activebackground="#005a9e", + activeforeground="#ffffff", + relief=tk.FLAT, + cursor="hand2", + height=2, + width=20 + ) + btn.grid(row=row, column=col, padx=5, pady=5, sticky="ew") + parent.grid_columnconfigure(col, weight=1) + + def run_command(self, command): + """Run command in background""" + def execute(): + try: + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + cwd=Path(__file__).parent + ) + return result.stdout + except Exception as e: + return f"Error: {e}" + + thread = threading.Thread(target=execute) + thread.daemon = True + thread.start() + + def start_services(self): + self.status_label.config(text="๐ŸŸข Starting services...", fg="#00ff00") + self.run_command("python networkbuster_launcher.py --start") + self.root.after(3000, self.check_status) + + def stop_services(self): + self.status_label.config(text="๐ŸŸก Stopping services...", fg="#ffff00") + self.run_command("python networkbuster_launcher.py --stop") + self.root.after(1000, lambda: self.status_label.config(text="โšช Services stopped", fg="#888888")) + + def check_status(self): + self.status_label.config(text="๐Ÿ”„ Checking status...", fg="#00ffff") + + def get_status(): + try: + result = subprocess.run( + "python networkbuster_launcher.py --status", + shell=True, + capture_output=True, + text=True, + cwd=Path(__file__).parent + ) + self.service_text.delete("1.0", tk.END) + self.service_text.insert("1.0", result.stdout) + self.status_label.config(text="๐ŸŸข Status updated", fg="#00ff00") + except Exception as e: + self.service_text.delete("1.0", tk.END) + self.service_text.insert("1.0", f"Error: {e}") + self.status_label.config(text="๐Ÿ”ด Status check failed", fg="#ff0000") + + thread = threading.Thread(target=get_status) + thread.daemon = True + thread.start() + + def max_power(self): + response = messagebox.askyesno( + "Max Power Mode", + "Enable maximum power production mode?\n\n" + + "This will:\n" + + "โ€ข Set High Performance power plan\n" + + "โ€ข Disable CPU throttling\n" + + "โ€ข Optimize network settings\n" + + "โ€ข Set realtime priority\n\n" + + "Administrator privileges required." + ) + + if response: + self.status_label.config(text="โšก Enabling max power...", fg="#ffff00") + subprocess.Popen( + 'powershell -Command "Start-Process powershell -ArgumentList \'-ExecutionPolicy Bypass -File run_launcher_admin.ps1\' -Verb RunAs"', + shell=True + ) + + def open_url(self, url): + webbrowser.open(url) + + def refresh_status(self): + """Auto-refresh status every 10 seconds""" + # Update countdown + launch_date = datetime(2026, 1, 17, 9, 0, 0) + now = datetime.now() + delta = launch_date - now + days = delta.days + hours, remainder = divmod(delta.seconds, 3600) + minutes, _ = divmod(remainder, 60) + + countdown_text = f"๐Ÿ“… January 17, 2026 at 9:00 AM\nโฐ Countdown: {days}d {hours}h {minutes}m" + self.schedule_label.config(text=countdown_text) + + # Schedule next refresh + self.root.after(10000, self.refresh_status) + +def main(): + root = tk.Tk() + app = NetworkBusterApp(root) + root.mainloop() + +if __name__ == '__main__': + main() diff --git a/networkbuster_config.json b/networkbuster_config.json new file mode 100644 index 0000000..0ef8096 --- /dev/null +++ b/networkbuster_config.json @@ -0,0 +1,15 @@ +{ + "auto_launch": true, + "scheduled_launch_date": "2026-01-17T09:00:00", + "last_launch": "2026-01-03T07:00:01.762249", + "launch_count": 1, + "enabled_services": [ + "Web Server", + "API Server", + "Audio Stream", + "Mission Control", + "Network Map", + "Universal Launcher", + "API Tracer" + ] +} \ No newline at end of file diff --git a/networkbuster_launcher.py b/networkbuster_launcher.py new file mode 100644 index 0000000..79048e5 --- /dev/null +++ b/networkbuster_launcher.py @@ -0,0 +1,580 @@ +""" +NetworkBuster - All-in-One Launch Manager +Unified program to launch and manage all NetworkBuster services +Includes scheduled launch functionality +""" + +import os +import sys +import subprocess +import time +import threading +import json +from datetime import datetime, timedelta +from pathlib import Path +import webbrowser +import schedule + +# Service configuration +SERVICES = [ + { + 'name': 'Web Server', + 'port': 3000, + 'command': 'node server-universal.js', + 'type': 'node', + 'critical': True, + 'startup_delay': 0 + }, + { + 'name': 'API Server', + 'port': 3001, + 'command': 'node server-universal.js', + 'cwd': 'api', + 'type': 'node', + 'critical': True, + 'startup_delay': 2 + }, + { + 'name': 'Audio Stream', + 'port': 3002, + 'command': 'node server-audio.js', + 'type': 'node', + 'critical': False, + 'startup_delay': 4 + }, + { + 'name': 'Mission Control', + 'port': 5000, + 'command': 'python nasa_home_base.py', + 'type': 'python', + 'critical': True, + 'startup_delay': 6 + }, + { + 'name': 'Network Map', + 'port': 6000, + 'command': 'python network_map_viewer.py', + 'type': 'python', + 'critical': False, + 'startup_delay': 8 + }, + { + 'name': 'Universal Launcher', + 'port': 7000, + 'command': 'python universal_launcher.py', + 'type': 'python', + 'critical': False, + 'startup_delay': 10 + }, + { + 'name': 'API Tracer', + 'port': 8000, + 'command': 'python api_tracer.py', + 'type': 'python', + 'critical': False, + 'startup_delay': 12 + } +] + +# Scheduled launch configuration +LAUNCH_DATE = datetime(2026, 1, 17, 9, 0, 0) # January 17, 2026 at 9:00 AM +CONFIG_FILE = 'networkbuster_config.json' + +class NetworkBusterManager: + def __init__(self): + self.processes = {} + self.running = False + self.config = self.load_config() + + def apply_production_optimizations(self): + """Apply max power optimizations for production""" + print("\n๐Ÿ”ฅ APPLYING MAX POWER PRODUCTION OPTIMIZATIONS...") + print("="*60) + + try: + # High Performance Power Plan + print("โšก Setting Ultimate Performance power plan...") + subprocess.run('powercfg /setactive 8c5e7fda-e8bf-4a96-9a85-a6e23a8c635c', + shell=True, capture_output=True) + + # Disable CPU throttling + print("๐Ÿš€ Disabling CPU throttling...") + subprocess.run('powercfg /setacvalueindex scheme_current sub_processor PROCTHROTTLEMAX 100', + shell=True, capture_output=True) + subprocess.run('powercfg /setactive scheme_current', shell=True, capture_output=True) + + # Optimize network stack + print("๐ŸŒ Maximizing network throughput...") + subprocess.run('netsh int tcp set global autotuninglevel=experimental', + shell=True, capture_output=True) + subprocess.run('netsh int tcp set global chimney=enabled', + shell=True, capture_output=True) + subprocess.run('netsh int tcp set global rss=enabled', + shell=True, capture_output=True) + + # Set process priority to realtime + print("๐ŸŽฏ Setting realtime process priority...") + import psutil + p = psutil.Process() + p.nice(psutil.REALTIME_PRIORITY_CLASS) + + print("โœ… Max power production optimizations applied!") + print("="*60 + "\n") + + except Exception as e: + print(f"โš ๏ธ Some optimizations require admin privileges: {e}") + print(" Run with administrator for full power mode\n") + + def load_config(self): + """Load configuration from file""" + if os.path.exists(CONFIG_FILE): + with open(CONFIG_FILE, 'r') as f: + return json.load(f) + return { + 'auto_launch': True, + 'scheduled_launch_date': LAUNCH_DATE.isoformat(), + 'last_launch': None, + 'launch_count': 0, + 'enabled_services': [s['name'] for s in SERVICES] + } + + def save_config(self): + """Save configuration to file""" + with open(CONFIG_FILE, 'w') as f: + json.dump(self.config, f, indent=2) + + def check_port(self, port): + """Check if port is in use""" + import socket + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + return s.connect_ex(('localhost', port)) == 0 + + def start_service(self, service): + """Start a single service""" + if service['name'] not in self.config['enabled_services']: + print(f"โญ๏ธ Skipping {service['name']} (disabled)") + return None + + print(f"\n๐Ÿš€ Starting {service['name']} on port {service['port']}...") + + # Check if already running + if self.check_port(service['port']): + print(f" โš ๏ธ Port {service['port']} already in use") + return None + + # Build command + if service['type'] == 'python': + cmd = f"python {service['command']}" + if sys.platform == 'win32': + cmd = f".venv\\Scripts\\python.exe {service['command']}" + else: + cmd = service['command'] + + # Set working directory + cwd = service.get('cwd', os.getcwd()) + if not os.path.isabs(cwd): + cwd = os.path.join(os.getcwd(), cwd) + + try: + # Start process + if sys.platform == 'win32': + process = subprocess.Popen( + cmd, + shell=True, + cwd=cwd, + creationflags=subprocess.CREATE_NEW_CONSOLE + ) + else: + process = subprocess.Popen( + cmd, + shell=True, + cwd=cwd + ) + + # Wait a bit for startup + time.sleep(2) + + # Verify it started + if self.check_port(service['port']): + print(f" โœ… {service['name']} started successfully") + self.processes[service['name']] = { + 'process': process, + 'service': service, + 'started': datetime.now().isoformat() + } + return process + else: + print(f" โŒ {service['name']} failed to start") + return None + + except Exception as e: + print(f" โŒ Error starting {service['name']}: {e}") + return None + + def start_all_services(self): + """Start all services in order with max power production mode""" + print(""" +โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•— +โ•‘ NetworkBuster All-in-One Launch Manager โ•‘ +โ•‘ MAX POWER PRODUCTION MODE โ•‘ +โ•‘ Starting all services... โ•‘ +โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• + """) + + # Apply max power production optimizations + self.apply_production_optimizations() + + self.running = True + started = 0 + failed = 0 + + for service in SERVICES: + # Apply startup delay + if service['startup_delay'] > 0: + time.sleep(service['startup_delay']) + + result = self.start_service(service) + + if result: + started += 1 + else: + failed += 1 + if service['critical']: + print(f"\nโš ๏ธ Critical service {service['name']} failed to start!") + + # Update config + self.config['last_launch'] = datetime.now().isoformat() + self.config['launch_count'] += 1 + self.save_config() + + # Summary + print("\n" + "="*60) + print("๐Ÿ“Š LAUNCH SUMMARY") + print("="*60) + print(f"โœ… Started: {started} services") + print(f"โŒ Failed: {failed} services") + print(f"๐Ÿ• Launch time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") + print(f"๐Ÿ“ˆ Total launches: {self.config['launch_count']}") + + # Open main dashboard + if started > 0: + print("\n๐ŸŒ Opening Universal Launcher dashboard...") + time.sleep(3) + webbrowser.open('http://localhost:7000') + + return started, failed + + def stop_all_services(self): + """Stop all running services""" + print("\n๐Ÿ›‘ Stopping all services...") + + for name, info in self.processes.items(): + try: + print(f" Stopping {name}...") + info['process'].terminate() + info['process'].wait(timeout=5) + print(f" โœ… {name} stopped") + except: + try: + info['process'].kill() + print(f" โš ๏ธ {name} force killed") + except: + print(f" โŒ Failed to stop {name}") + + self.processes = {} + self.running = False + print("\nโœ… All services stopped") + + def check_scheduled_launch(self): + """Check if it's time for scheduled launch""" + scheduled_date = datetime.fromisoformat(self.config['scheduled_launch_date']) + now = datetime.now() + + if now >= scheduled_date and not self.running: + print(f"\nโฐ SCHEDULED LAUNCH TRIGGERED!") + print(f" Scheduled for: {scheduled_date}") + print(f" Current time: {now}") + self.start_all_services() + return True + + return False + + def countdown_to_launch(self): + """Display countdown to scheduled launch""" + scheduled_date = datetime.fromisoformat(self.config['scheduled_launch_date']) + now = datetime.now() + + if now >= scheduled_date: + return "LAUNCH TIME REACHED!" + + delta = scheduled_date - now + days = delta.days + hours, remainder = divmod(delta.seconds, 3600) + minutes, seconds = divmod(remainder, 60) + + return f"{days}d {hours}h {minutes}m {seconds}s" + + def get_status(self): + """Get status of all services""" + status = { + 'running': self.running, + 'services': {}, + 'scheduled_launch': self.config['scheduled_launch_date'], + 'countdown': self.countdown_to_launch() + } + + for service in SERVICES: + is_running = self.check_port(service['port']) + status['services'][service['name']] = { + 'port': service['port'], + 'running': is_running, + 'critical': service['critical'], + 'url': f"http://localhost:{service['port']}" + } + + return status + + def create_startup_script(self): + """Create startup script for Windows""" + script_path = Path('networkbuster_startup.bat') + + script_content = f"""@echo off +echo ======================================== +echo NetworkBuster All-in-One Launcher +echo ======================================== +echo. + +cd /d "%~dp0" + +REM Activate virtual environment +call .venv\\Scripts\\activate.bat + +REM Launch NetworkBuster +python networkbuster_launcher.py --start + +echo. +echo Press any key to exit... +pause > nul +""" + + with open(script_path, 'w') as f: + f.write(script_content) + + print(f"โœ… Startup script created: {script_path}") + return script_path + + def create_scheduled_task(self): + """Create Windows scheduled task with admin privileges and thumbnail extraction""" + task_name = "NetworkBuster_ScheduledLaunch" + scheduled_date = datetime.fromisoformat(self.config['scheduled_launch_date']) + + # Create task XML with elevated privileges for overclocking and thumbnail extraction + task_xml = f""" + + + NetworkBuster Scheduled Launch - Administrator Mode with Thumbnail Extraction + NetworkBuster + + + + {scheduled_date.isoformat()} + true + + + + + InteractiveToken + HighestAvailable + + + + IgnoreNew + false + false + true + true + false + true + true + PT0S + 4 + + + + powershell.exe + -ExecutionPolicy Bypass -WindowStyle Normal -File "{os.path.join(os.getcwd(), 'run_launcher_admin.ps1')}" + {os.getcwd()} + + + +""" + + # Save XML + xml_path = Path('networkbuster_task.xml') + with open(xml_path, 'w', encoding='utf-16') as f: + f.write(task_xml) + + # Create scheduled task with admin privileges + try: + cmd = f'schtasks /Create /TN "{task_name}" /XML "{xml_path}" /F' + result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + + if result.returncode == 0: + print(f"โœ… Scheduled task created: {task_name}") + print(f" Launch date: {scheduled_date.strftime('%Y-%m-%d %H:%M:%S')}") + print(f" Run level: Administrator (for overclocking)") + print(f" Priority: High") + return True + else: + print(f"โš ๏ธ Failed to create scheduled task.") + print(f" Error: {result.stderr}") + print(f" Tip: Run PowerShell as Administrator") + return False + except Exception as e: + print(f"โš ๏ธ Failed to create scheduled task: {e}") + print(f" Run PowerShell as Administrator for overclocking features") + return False + +def main(): + import argparse + + parser = argparse.ArgumentParser(description='NetworkBuster All-in-One Launch Manager') + parser.add_argument('--start', action='store_true', help='Start all services') + parser.add_argument('--stop', action='store_true', help='Stop all services') + parser.add_argument('--status', action='store_true', help='Show status') + parser.add_argument('--schedule', action='store_true', help='Create scheduled launch') + parser.add_argument('--interactive', action='store_true', help='Interactive mode') + + args = parser.parse_args() + + manager = NetworkBusterManager() + + if args.start: + manager.start_all_services() + + # Keep running and check for scheduled launches + print("\n๐Ÿ”„ Manager running. Press Ctrl+C to stop all services...") + try: + while True: + time.sleep(60) + manager.check_scheduled_launch() + except KeyboardInterrupt: + print("\n\n๐Ÿ›‘ Stopping all services...") + manager.stop_all_services() + + elif args.stop: + manager.stop_all_services() + + elif args.status: + status = manager.get_status() + print("\n๐Ÿ“Š NETWORKBUSTER STATUS") + print("="*60) + print(f"System Running: {status['running']}") + print(f"Scheduled Launch: {status['scheduled_launch']}") + print(f"Countdown: {status['countdown']}") + print("\nServices:") + for name, info in status['services'].items(): + status_icon = "โœ…" if info['running'] else "โŒ" + critical = " [CRITICAL]" if info['critical'] else "" + print(f" {status_icon} {name:20} Port {info['port']}{critical}") + + elif args.schedule: + print("\nโฐ SCHEDULED LAUNCH SETUP") + print("="*60) + print(f"Launch Date: {manager.config['scheduled_launch_date']}") + print(f"Countdown: {manager.countdown_to_launch()}") + print("\n๐Ÿ“‹ Creating startup script...") + manager.create_startup_script() + print("\n๐Ÿ“… Creating scheduled task...") + manager.create_scheduled_task() + + else: + # Interactive mode + print(""" +โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•— +โ•‘ NetworkBuster All-in-One Launch Manager โ•‘ +โ•‘ Package all services into unified launcher โ•‘ +โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• + """) + + print("\n๐Ÿ“ฆ PACKAGE INFORMATION") + print("="*60) + print(f"Total Services: {len(SERVICES)}") + print(f"Critical Services: {sum(1 for s in SERVICES if s['critical'])}") + print(f"Port Range: 3000-8000") + print(f"Launch Count: {manager.config['launch_count']}") + + print("\nโฐ SCHEDULED LAUNCH") + print("="*60) + scheduled_date = datetime.fromisoformat(manager.config['scheduled_launch_date']) + print(f"Scheduled Date: {scheduled_date.strftime('%A, %B %d, %Y at %I:%M %p')}") + print(f"Countdown: {manager.countdown_to_launch()}") + + print("\n๐Ÿ“‹ SERVICES") + print("="*60) + for service in SERVICES: + critical = " [CRITICAL]" if service['critical'] else "" + print(f" โ€ข {service['name']:20} Port {service['port']}{critical}") + + print("\n๐ŸŽฎ OPTIONS") + print("="*60) + print("1. Start all services now") + print("2. Show status") + print("3. Create scheduled launch") + print("4. Configure services") + print("5. Exit") + + choice = input("\nEnter choice (1-5): ").strip() + + if choice == '1': + manager.start_all_services() + print("\n๐Ÿ”„ Manager running. Press Ctrl+C to stop all services...") + try: + while True: + time.sleep(60) + except KeyboardInterrupt: + manager.stop_all_services() + + elif choice == '2': + status = manager.get_status() + print("\n๐Ÿ“Š STATUS") + print("="*60) + for name, info in status['services'].items(): + status_icon = "โœ… ONLINE" if info['running'] else "โŒ OFFLINE" + print(f"{name:20} {status_icon:15} {info['url']}") + + elif choice == '3': + print("\nโฐ Creating scheduled launch...") + manager.create_startup_script() + manager.create_scheduled_task() + print("\nโœ… Scheduled launch configured!") + + elif choice == '4': + print("\nโš™๏ธ Service Configuration") + print("="*60) + for i, service in enumerate(SERVICES, 1): + enabled = "โœ…" if service['name'] in manager.config['enabled_services'] else "โŒ" + print(f"{i}. {enabled} {service['name']}") + + print("\nEnter service numbers to toggle (comma-separated) or 'done':") + toggle = input().strip() + + if toggle.lower() != 'done': + for num in toggle.split(','): + try: + idx = int(num.strip()) - 1 + service = SERVICES[idx] + if service['name'] in manager.config['enabled_services']: + manager.config['enabled_services'].remove(service['name']) + print(f"โŒ Disabled {service['name']}") + else: + manager.config['enabled_services'].append(service['name']) + print(f"โœ… Enabled {service['name']}") + except: + pass + + manager.save_config() + print("\nโœ… Configuration saved!") + +if __name__ == '__main__': + main() diff --git a/networkbuster_mission_runner.py b/networkbuster_mission_runner.py new file mode 100644 index 0000000..e8c0ee4 --- /dev/null +++ b/networkbuster_mission_runner.py @@ -0,0 +1,389 @@ +#!/usr/bin/env python3 +""" +NetworkBuster Mission Runner +Complete system simulation demonstrating all integrated capabilities +""" + +import sys +import time +import subprocess +import platform +from pathlib import Path +from datetime import datetime + +# Import available modules +try: + from security_verification import UserVerification, SecurityLevel + SECURITY_AVAILABLE = True +except ImportError: + SECURITY_AVAILABLE = False + print("โš ๏ธ Security module unavailable") + +try: + from drone_flight_system import DroneState, UnbreakableAutopilot, ScanAlgorithms + DRONE_AVAILABLE = True +except ImportError: + DRONE_AVAILABLE = False + print("โš ๏ธ Drone system unavailable") + +try: + from system_health import SystemHealthMonitor + HEALTH_AVAILABLE = True +except ImportError: + HEALTH_AVAILABLE = False + print("โš ๏ธ Health monitor unavailable") + + +class NetworkBusterMission: + """Complete NetworkBuster mission orchestrator.""" + + def __init__(self): + self.start_time = datetime.now() + self.mission_status = "INITIALIZING" + self.mission_log = [] + self.authenticated_user = None + self.security_level = 0 + + def log_event(self, event, status="INFO"): + """Log mission event.""" + timestamp = datetime.now().strftime("%H:%M:%S") + log_entry = f"[{timestamp}] {status}: {event}" + self.mission_log.append(log_entry) + + if status == "ERROR": + print(f"โŒ {event}") + elif status == "SUCCESS": + print(f"โœ… {event}") + elif status == "WARNING": + print(f"โš ๏ธ {event}") + else: + print(f"โ„น๏ธ {event}") + + def print_header(self, title): + """Print formatted section header.""" + print("\n" + "โ•" * 70) + print(f" {title}") + print("โ•" * 70) + + def run_phase_1_authentication(self): + """Phase 1: Security & Authentication.""" + self.print_header("PHASE 1: SECURITY & AUTHENTICATION") + + if not SECURITY_AVAILABLE: + self.log_event("Security module not available, running in open mode", "WARNING") + return True + + print("\n๐Ÿ” Initiating secure authentication...") + verifier = UserVerification() + + # Check for existing session + session = verifier.load_session() + + if not session: + print("No active session found. Authenticating as admin...") + success, session = verifier.authenticate( + username="admin", + password="admin123", + interactive=False + ) + + if not success: + self.log_event("Authentication failed", "ERROR") + return False + + self.authenticated_user = session['username'] + self.security_level = session['level'] + + self.log_event(f"Authenticated as {self.authenticated_user} (Level {self.security_level})", "SUCCESS") + + # Verify operator clearance + if not verifier.require_level(SecurityLevel.OPERATOR): + self.log_event("Insufficient clearance for mission operations", "ERROR") + return False + + self.log_event("Security clearance verified", "SUCCESS") + return True + + def run_phase_2_system_check(self): + """Phase 2: System Health & Environment Check.""" + self.print_header("PHASE 2: SYSTEM HEALTH CHECK") + + # Platform info + system_info = { + "Platform": platform.system(), + "Version": platform.version(), + "Architecture": platform.machine(), + "Python": platform.python_version(), + "Processor": platform.processor(), + } + + print("\n๐Ÿ–ฅ๏ธ System Information:") + for key, value in system_info.items(): + print(f" {key}: {value}") + + self.log_event(f"Running on {system_info['Platform']} {system_info['Architecture']}", "SUCCESS") + + # Check Node.js availability + print("\n๐Ÿ” Checking Node.js installation...") + try: + result = subprocess.run( + ["node", "--version"], + capture_output=True, + text=True, + timeout=5 + ) + if result.returncode == 0: + node_version = result.stdout.strip() + print(f" โœ“ Node.js {node_version} detected") + self.log_event(f"Node.js {node_version} available", "SUCCESS") + else: + self.log_event("Node.js not found", "WARNING") + except Exception as e: + self.log_event(f"Node.js check failed: {e}", "WARNING") + + # Check Python environment + print("\n๐Ÿ Python Environment:") + print(f" Python: {sys.version}") + print(f" Executable: {sys.executable}") + + # Check critical modules + modules = ["pathlib", "subprocess", "platform", "datetime"] + print("\n๐Ÿ“ฆ Module Status:") + for module in modules: + try: + __import__(module) + print(f" โœ“ {module}") + except ImportError: + print(f" โœ— {module} (missing)") + + self.log_event("System health check completed", "SUCCESS") + return True + + def run_phase_3_drone_operations(self): + """Phase 3: Autonomous Drone Operations.""" + self.print_header("PHASE 3: DRONE FLIGHT OPERATIONS") + + if not DRONE_AVAILABLE: + self.log_event("Drone system not available", "WARNING") + return True + + print("\n๐Ÿš Initializing autonomous drone system...") + time.sleep(1) + + # Create drone fleet + drones = [] + for i, drone_id in enumerate(["ALPHA-1", "BETA-2"], 1): + drone = DroneState(drone_id=drone_id) + drones.append(drone) + print(f" โœ“ Drone {drone_id} initialized") + + self.log_event(f"Initialized {len(drones)} drone units", "SUCCESS") + + # Mission 1: Reconnaissance + print("\n๐Ÿ“ก MISSION 1: Reconnaissance Spiral Scan") + print("-" * 70) + drone1 = drones[0] + autopilot1 = UnbreakableAutopilot(drone1) + + path1 = ScanAlgorithms.generate_spiral_search(0, 0, 40, spacing=10.0) + print(f"Generated {len(path1)} waypoints for recon pattern") + autopilot1.execute_pattern("RECON_SPIRAL", path1[:10]) # Execute 10 waypoints + + self.log_event(f"Drone {drone1.id} completed reconnaissance", "SUCCESS") + + time.sleep(1) + + # Mission 2: Detailed Mapping + print("\n๐Ÿ—บ๏ธ MISSION 2: Grid Mapping Scan") + print("-" * 70) + drone2 = drones[1] if len(drones) > 1 else drones[0] + drone2.battery = 100.0 + drone2.integrity = 100.0 + autopilot2 = UnbreakableAutopilot(drone2) + + path2 = ScanAlgorithms.generate_grid_raster(40, 40, altitude=18.0, density=12.0) + print(f"Generated {len(path2)} waypoints for grid pattern") + autopilot2.execute_pattern("GRID_MAP", path2[:8]) # Execute 8 waypoints + + self.log_event(f"Drone {drone2.id} completed mapping mission", "SUCCESS") + + # Fleet status + print("\n" + "โ”€" * 70) + print(" FLEET STATUS REPORT") + print("โ”€" * 70) + for drone in drones: + print(f"\n {drone.id}:") + print(f" Battery: {drone.battery:.1f}%") + print(f" Integrity: {drone.integrity}%") + print(f" Status: {drone.status}") + print(f" Position: ({drone.position['x']:.1f}, {drone.position['y']:.1f}, {drone.position['z']:.1f})") + + self.log_event("All drone operations completed successfully", "SUCCESS") + return True + + def run_phase_4_network_monitoring(self): + """Phase 4: Network & Port Monitoring.""" + self.print_header("PHASE 4: NETWORK MONITORING") + + print("\n๐Ÿ”Œ Checking NetworkBuster server ports...") + + ports = [ + (3000, "Web Server"), + (3001, "API Server"), + (3002, "Audio Stream") + ] + + for port, name in ports: + if platform.system() == "Windows": + result = subprocess.run([ + "powershell", "-Command", + f"Get-NetTCPConnection -LocalPort {port} -State Listen -ErrorAction SilentlyContinue" + ], capture_output=True, text=True) + is_active = bool(result.stdout.strip()) + else: + result = subprocess.run( + f"ss -tlnp 2>/dev/null | grep :{port} || netstat -tlnp 2>/dev/null | grep :{port}", + shell=True, capture_output=True, text=True + ) + is_active = bool(result.stdout.strip()) + + status = "๐ŸŸข ACTIVE" if is_active else "โšช INACTIVE" + print(f" Port {port} ({name}): {status}") + + if is_active: + self.log_event(f"{name} (:{port}) is running", "SUCCESS") + else: + self.log_event(f"{name} (:{port}) is not running", "WARNING") + + return True + + def run_phase_5_data_collection(self): + """Phase 5: Data Collection & Analysis.""" + self.print_header("PHASE 5: DATA COLLECTION & ANALYSIS") + + print("\n๐Ÿ“Š Collecting mission telemetry...") + + # Simulated data collection + data_points = { + "Total Mission Duration": f"{(datetime.now() - self.start_time).total_seconds():.1f}s", + "Log Entries": len(self.mission_log), + "Security Level": self.security_level, + "Authenticated User": self.authenticated_user or "N/A", + "Platform": platform.system(), + "Python Version": platform.python_version(), + } + + print("\n๐Ÿ“ˆ Mission Metrics:") + for key, value in data_points.items(): + print(f" {key}: {value}") + + self.log_event("Data collection completed", "SUCCESS") + return True + + def generate_mission_report(self): + """Generate final mission report.""" + self.print_header("MISSION COMPLETE - FINAL REPORT") + + duration = (datetime.now() - self.start_time).total_seconds() + + print(f"\nโฑ๏ธ Mission Duration: {duration:.2f} seconds") + print(f"๐Ÿ“‹ Total Events: {len(self.mission_log)}") + + # Count event types + success_count = sum(1 for log in self.mission_log if "SUCCESS" in log) + warning_count = sum(1 for log in self.mission_log if "WARNING" in log) + error_count = sum(1 for log in self.mission_log if "ERROR" in log) + + print(f"\n๐Ÿ“Š Event Summary:") + print(f" โœ… Success: {success_count}") + print(f" โš ๏ธ Warning: {warning_count}") + print(f" โŒ Error: {error_count}") + + print(f"\n๐Ÿ“ Mission Log:") + print("โ”€" * 70) + for log in self.mission_log: + print(f" {log}") + + # Final status + if error_count == 0: + self.mission_status = "COMPLETED - ALL SYSTEMS NOMINAL" + print(f"\n๐ŸŽฏ Status: {self.mission_status}") + elif error_count < 3: + self.mission_status = "COMPLETED WITH WARNINGS" + print(f"\nโš ๏ธ Status: {self.mission_status}") + else: + self.mission_status = "COMPLETED WITH ERRORS" + print(f"\nโŒ Status: {self.mission_status}") + + print("\n" + "โ•" * 70) + print(" NETWORKBUSTER MISSION TERMINATED") + print("โ•" * 70) + + def execute_full_mission(self): + """Execute complete mission sequence.""" + print("\n" + "โ•”" + "โ•" * 68 + "โ•—") + print("โ•‘" + " NETWORKBUSTER INTEGRATED MISSION SEQUENCE".center(68) + "โ•‘") + print("โ•‘" + f" {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}".center(68) + "โ•‘") + print("โ•š" + "โ•" * 68 + "โ•") + + self.mission_status = "IN PROGRESS" + + try: + # Phase 1: Authentication + if not self.run_phase_1_authentication(): + self.log_event("Mission aborted - authentication failure", "ERROR") + return False + + time.sleep(1) + + # Phase 2: System Check + if not self.run_phase_2_system_check(): + self.log_event("Mission aborted - system check failure", "ERROR") + return False + + time.sleep(1) + + # Phase 3: Drone Operations + if not self.run_phase_3_drone_operations(): + self.log_event("Drone operations failed", "WARNING") + + time.sleep(1) + + # Phase 4: Network Monitoring + if not self.run_phase_4_network_monitoring(): + self.log_event("Network monitoring incomplete", "WARNING") + + time.sleep(1) + + # Phase 5: Data Collection + if not self.run_phase_5_data_collection(): + self.log_event("Data collection incomplete", "WARNING") + + # Final Report + self.generate_mission_report() + + return True + + except KeyboardInterrupt: + print("\n\nโš ๏ธ MISSION INTERRUPTED BY USER") + self.mission_status = "ABORTED" + self.log_event("Mission manually aborted", "WARNING") + return False + + except Exception as e: + print(f"\n\nโŒ CRITICAL ERROR: {e}") + self.mission_status = "FAILED" + self.log_event(f"Mission failed: {e}", "ERROR") + import traceback + traceback.print_exc() + return False + + +def main(): + """Main entry point.""" + mission = NetworkBusterMission() + mission.execute_full_mission() + + +if __name__ == "__main__": + main() diff --git a/networkbuster_startup.bat b/networkbuster_startup.bat new file mode 100644 index 0000000..40b36ab --- /dev/null +++ b/networkbuster_startup.bat @@ -0,0 +1,17 @@ +@echo off +echo ======================================== +echo NetworkBuster All-in-One Launcher +echo ======================================== +echo. + +cd /d "%~dp0" + +REM Activate virtual environment +call .venv\Scripts\activate.bat + +REM Launch NetworkBuster +python networkbuster_launcher.py --start + +echo. +echo Press any key to exit... +pause > nul diff --git a/networkbuster_task.xml b/networkbuster_task.xml new file mode 100644 index 0000000..8f5d1da Binary files /dev/null and b/networkbuster_task.xml differ diff --git a/node-v24.12.0-x64.msi b/node-v24.12.0-x64.msi new file mode 100644 index 0000000..b102ea7 Binary files /dev/null and b/node-v24.12.0-x64.msi differ diff --git a/os/lfs/Dockerfile b/os/lfs/Dockerfile new file mode 100644 index 0000000..0227b2c --- /dev/null +++ b/os/lfs/Dockerfile @@ -0,0 +1,37 @@ +FROM ubuntu:24.04 + +ENV DEBIAN_FRONTEND=noninteractive +ENV KERNEL_VERSION=6.8.13 +ENV SKIP_KERNEL=false + +RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + gcc \ + make \ + bc \ + bison \ + flex \ + libelf-dev \ + libncurses-dev \ + wget \ + ca-certificates \ + git \ + bzip2 \ + gzip \ + cpio \ + sudo \ + qemu-system-x86 \ + pkg-config \ + libssl-dev \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /workspace + +# Prepare a host-mounted cache directory for kernel builds (mounted by CI run) +RUN mkdir -p /workspace/kernel-cache && chown root:root /workspace/kernel-cache + +COPY build-lfs.sh /workspace/build-lfs.sh +RUN chmod +x /workspace/build-lfs.sh + +# Default entrypoint runs the build and places artifacts under /workspace/output +ENTRYPOINT ["/workspace/build-lfs.sh"] diff --git a/os/lfs/README.md b/os/lfs/README.md new file mode 100644 index 0000000..298851d --- /dev/null +++ b/os/lfs/README.md @@ -0,0 +1,31 @@ +# LFS Scaffold - x86_64 rootfs (PoC) + +This folder contains a minimal, reproducible scaffold to build a tiny "LFS-like" root filesystem (rootfs) for x86_64 and produce a tarball artifact. This is a practical starting point for a full Linux From Scratch (LFS) workflow. + +Overview +- The build produces a BusyBox-based rootfs and a compressed cpio initramfs, and optionally compiles an x86_64 Linux kernel (controlled by the `KERNEL_VERSION` and `SKIP_KERNEL` environment variables). +- A basic GitHub Actions workflow (`.github/workflows/lfs-build.yml`) will run the build inside a Docker container, upload the `rootfs.tar.gz` artifact and attempt a quick QEMU smoke test (if a kernel is available on the runner or the pipeline is configured to build the kernel). + +Notes & caveats +- A full LFS build (toolchain + all packages + kernel) is large and may take many hours. This scaffold produces a working minimal rootfs suitable for booting with an initramfs and a kernel. +- CI runners have limits (time, CPU). The CI is configured for a quick smoke test; expand it locally for full LFS. +- The scripts are opinionated and intended to be extended. Use them as a reproducible starting point. + +Quick local usage +1. Build with Docker (recommended): + docker build -t lfs-build -f os/lfs/Dockerfile . + docker run --rm -v "$PWD/os/lfs/output:/output" lfs-build + +2. On success artifacts are placed in `os/lfs/output/`: + - `rootfs.tar.gz` (tarball of the root filesystem) + - `rootfs.cpio.gz` (compressed initramfs for quick QEMU boot) + +CI +- The workflow supports manual dispatch with kernel build enabled. To trigger a kernel build from the GitHub UI go to **Actions โ†’ Build LFS rootfs (PoC)** โ†’ **Run workflow**, then set `build_kernel=true` and optionally specify `kernel_version` (default: `6.8.13`). +- The workflow caches kernel sources and built kernels using the runner cache keyed by kernel version. When you run a manual build with kernel enabled, the job restores `./.cache/linux-` and mounts it into the build container at `/workspace/kernel-cache` so repeated runs will reuse the downloaded tarball and any previously built `vmlinuz-`. +- A separate manual workflow `Validate LFS kernel cache` (`.github/workflows/lfs-cache-validate.yml`) is provided to validate caching behavior: it runs a kernel build to populate the cache and a dependent verification job that restores the cache and confirms the kernel is reused. +- For ordinary pushes the CI will skip kernel building to avoid long jobs; artifacts are still produced and uploaded. + +Contributing +- To extend toward a full LFS build: add package recipes, a toolchain phase, and a kernel build step. +- File issues or PRs to discuss further changes. diff --git a/os/lfs/build-lfs.sh b/os/lfs/build-lfs.sh new file mode 100644 index 0000000..2ff07c2 --- /dev/null +++ b/os/lfs/build-lfs.sh @@ -0,0 +1,169 @@ +#!/usr/bin/env bash +set -euo pipefail + +OUTDIR="$(pwd)/output" +BUILD_DIR="$(pwd)/build" +BUSYBOX_VERSION="1_36_1" +BUSYBOX_URL="https://busybox.net/downloads/busybox-${BUSYBOX_VERSION}.tar.bz2" + +mkdir -p "$OUTDIR" +rm -rf "$BUILD_DIR" +mkdir -p "$BUILD_DIR" + +echo "==> Build workspace: $BUILD_DIR" +cd "$BUILD_DIR" + +# Download and extract BusyBox +if [ ! -f "busybox-${BUSYBOX_VERSION}.tar.bz2" ]; then + wget -q "$BUSYBOX_URL" -O "busybox-${BUSYBOX_VERSION}.tar.bz2" +fi +rm -rf busybox-${BUSYBOX_VERSION} +tar xjf busybox-${BUSYBOX_VERSION}.tar.bz2 +cd busybox-${BUSYBOX_VERSION} + +# Configure BusyBox for a static build and minimal busybox install +make defconfig >/dev/null +# Enable static build +scripts/config --enable STATIC +make -j"$(nproc)" >/dev/null +make CONFIG_PREFIX="$BUILD_DIR/rootfs" install >/dev/null + +# Create minimal filesystem structure +ROOTFS="$BUILD_DIR/rootfs" +mkdir -p "$ROOTFS"/{proc,sys,dev,run,etc,mnt,tmp} +chmod 1777 "$ROOTFS/tmp" + +# Create a simple init +cat > "$ROOTFS/init" <<'EOF' +#!/bin/sh +mount -t proc none /proc +mount -t sysfs none /sys +echo "Booted minimal rootfs" +exec /bin/sh +EOF +chmod +x "$ROOTFS/init" + +# Fix necessary symlinks +ln -sf /bin/busybox "$ROOTFS/bin/sh" + +# Create device nodes +sudo rm -f "$ROOTFS/dev/console" +sudo mknod -m 622 "$ROOTFS/dev/console" c 5 1 || true +sudo mknod -m 666 "$ROOTFS/dev/null" c 1 3 || true + +# Create /etc/passwd and /etc/group +cat > "$ROOTFS/etc/passwd" < "$ROOTFS/etc/group" </dev/null +find . | cpio -H newc -o | gzip -9 > "$OUTDIR/rootfs.cpio.gz" +popd >/dev/null + +# Create tarball of rootfs +tar -C "$ROOTFS" -czf "$OUTDIR/rootfs.tar.gz" . + +# List artifacts +ls -lh "$OUTDIR" + +echo "==> Build complete: artifacts in $OUTDIR" + +# Basic smoke check: ensure /bin/sh exists in tarball +if tar tzf "$OUTDIR/rootfs.tar.gz" | grep -q "bin/sh"; then + echo "rootfs tar contains /bin/sh" +else + echo "Warning: /bin/sh not found in rootfs tar" >&2 + exit 1 +fi + +# Build Linux kernel (optional) +KERNEL_VERSION="${KERNEL_VERSION:-6.8.13}" +SKIP_KERNEL="${SKIP_KERNEL:-false}" +KERNEL_CACHE_DIR="${KERNEL_CACHE_DIR:-/workspace/kernel-cache}" +mkdir -p "$KERNEL_CACHE_DIR" + +# If kernel tarball exists in cache, prefer it +KERNEL_TAR_CACHE="$KERNEL_CACHE_DIR/linux-$KERNEL_VERSION.tar.xz" +KERNEL_BZIMAGE_CACHE="$KERNEL_CACHE_DIR/vmlinuz-$KERNEL_VERSION" + +if [ "$SKIP_KERNEL" != "true" ]; then + echo "==> Building Linux kernel $KERNEL_VERSION (this may take a while)" + cd "$BUILD_DIR" + + # Prefer cached tarball + if [ -f "$KERNEL_TAR_CACHE" ]; then + echo "Using cached kernel tarball: $KERNEL_TAR_CACHE" + cp "$KERNEL_TAR_CACHE" . + fi + + if [ ! -f "linux-$KERNEL_VERSION.tar.xz" ]; then + wget -q "https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-$KERNEL_VERSION.tar.xz" + fi + + # Save tarball to cache for future runs + if [ -f "linux-$KERNEL_VERSION.tar.xz" ]; then + cp "linux-$KERNEL_VERSION.tar.xz" "$KERNEL_TAR_CACHE" || true + fi + + rm -rf linux-$KERNEL_VERSION + tar xf linux-$KERNEL_VERSION.tar.xz + cd linux-$KERNEL_VERSION + + # Use a default x86_64 defconfig and build + make defconfig + + # Prefer enabling initrd support and common virtualization drivers + # (scripts/config may or may not be available depending on kernel version) + if [ -f scripts/config ]; then + scripts/config --enable CONFIG_BLK_DEV_INITRD || true + scripts/config --module CONFIG_VIRTIO_NET || true + scripts/config --module CONFIG_VIRTIO_PCI || true + scripts/config --module CONFIG_VIRTIO_BALLOON || true + fi + + make -j"$(nproc)" + + if [ -f "arch/x86/boot/bzImage" ]; then + cp "arch/x86/boot/bzImage" "$OUTDIR/vmlinuz-$KERNEL_VERSION" + echo "Kernel built: $OUTDIR/vmlinuz-$KERNEL_VERSION" + # also copy to cache so subsequent runs reuse built kernel + cp "$OUTDIR/vmlinuz-$KERNEL_VERSION" "$KERNEL_BZIMAGE_CACHE" || true + else + echo "Kernel build failed: no bzImage found" >&2 + fi +else + echo "SKIP_KERNEL=true โ€” skipping kernel build" +fi + +# Run a best-effort QEMU boot test using the built kernel if available, else a host kernel +if command -v qemu-system-x86_64 >/dev/null 2>&1; then + if [ -f "$OUTDIR/vmlinuz-$KERNEL_VERSION" ]; then + KERNEL="$OUTDIR/vmlinuz-$KERNEL_VERSION" + elif [ -f "$KERNEL_BZIMAGE_CACHE" ]; then + echo "Using cached built kernel: $KERNEL_BZIMAGE_CACHE" + KERNEL="$KERNEL_BZIMAGE_CACHE" + elif ls /boot/vmlinuz-* 2>/dev/null | head -n1 >/dev/null 2>&1; then + KERNEL="$(ls -1 /boot/vmlinuz-* | tail -n1)" + else + KERNEL="" + fi + + if [ -n "$KERNEL" ]; then + echo "Attempting QEMU boot test with kernel: $KERNEL (20s)" + qemu-system-x86_64 -kernel "$KERNEL" -initrd "$OUTDIR/rootfs.cpio.gz" -nographic -append "console=ttyS0 root=/dev/ram0 rw init=/init" -m 512 -no-reboot -display none & + QEMU_PID=$! + sleep 20 + if ps -p $QEMU_PID >/dev/null 2>&1; then + echo "QEMU running โ€” killing after smoke test" + kill $QEMU_PID || true + fi + else + echo "No kernel available for QEMU boot test; skipping" + fi +else + echo "QEMU is not installed โ€” skipping boot test" +fi diff --git a/os/lfs/validate-cache.ps1 b/os/lfs/validate-cache.ps1 new file mode 100644 index 0000000..8aa9ccb --- /dev/null +++ b/os/lfs/validate-cache.ps1 @@ -0,0 +1,32 @@ +param( + [string]$KernelVersion = '6.8.13' +) + +$cacheDir = Join-Path -Path (Get-Location) -ChildPath ".cache/linux-$KernelVersion" +if (-not (Test-Path $cacheDir)) { New-Item -ItemType Directory -Path $cacheDir | Out-Null } + +Write-Host "Building lfs-build container..." +docker build -t lfs-build -f os/lfs/Dockerfile . + +Write-Host "First run: building kernel and populating cache" +docker run --rm -e SKIP_KERNEL=false -e KERNEL_VERSION=$KernelVersion -e KERNEL_CACHE_DIR=/workspace/kernel-cache -v "${cacheDir}:/workspace/kernel-cache" -v "${PWD}/os/lfs/output:/workspace/output" lfs-build + +if (Test-Path (Join-Path $cacheDir "vmlinuz-$KernelVersion")) { + Write-Host "vmlinuz-$KernelVersion found in cache" +} else { + Write-Error "vmlinuz-$KernelVersion not found in cache" + exit 1 +} + +Write-Host "Second run: expecting to use cached tarball or built kernel" +docker run --rm -e SKIP_KERNEL=false -e KERNEL_VERSION=$KernelVersion -e KERNEL_CACHE_DIR=/workspace/kernel-cache -v "${cacheDir}:/workspace/kernel-cache" -v "${PWD}/os/lfs/output:/workspace/output" lfs-build | Tee-Object -FilePath "$env:TEMP\lfs-second-run.log" + +$log = Get-Content "$env:TEMP\lfs-second-run.log" +if ($log -match 'Using cached kernel tarball' -or $log -match 'Using cached built kernel') { + Write-Host "Cache was used on second run" +} else { + Write-Error "Cache was NOT used on second run (check logs)" + exit 1 +} + +Write-Host "Cache verification complete: SUCCESS" \ No newline at end of file diff --git a/os/lfs/validate-cache.sh b/os/lfs/validate-cache.sh new file mode 100644 index 0000000..c5eb3eb --- /dev/null +++ b/os/lfs/validate-cache.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -euo pipefail + +KERNEL_VERSION="${1:-6.8.13}" +CACHE_DIR="$(pwd)/.cache/linux-${KERNEL_VERSION}" +mkdir -p "$CACHE_DIR" + +# Build container +echo "==> Building lfs-build container" +docker build -t lfs-build -f os/lfs/Dockerfile . + +# First run: build kernel and populate cache +echo "==> First run: building kernel and populating cache" +docker run --rm -e SKIP_KERNEL=false -e KERNEL_VERSION="$KERNEL_VERSION" -e KERNEL_CACHE_DIR=/workspace/kernel-cache -v "$CACHE_DIR:/workspace/kernel-cache" -v "$(pwd)/os/lfs/output:/workspace/output" lfs-build + +if [ -f "$CACHE_DIR/vmlinuz-$KERNEL_VERSION" ]; then + echo "vmlinuz-$KERNEL_VERSION is present in cache" +else + echo "ERROR: vmlinuz-$KERNEL_VERSION not found in cache" >&2 + exit 1 +fi + +# Second run: should use cache +echo "==> Second run: expecting to use cached tarball or built kernel" +docker run --rm -e SKIP_KERNEL=false -e KERNEL_VERSION="$KERNEL_VERSION" -e KERNEL_CACHE_DIR=/workspace/kernel-cache -v "$CACHE_DIR:/workspace/kernel-cache" -v "$(pwd)/os/lfs/output:/workspace/output" lfs-build | tee /tmp/lfs-second-run.log + +if grep -q "Using cached kernel tarball" /tmp/lfs-second-run.log || grep -q "Using cached built kernel" /tmp/lfs-second-run.log; then + echo "Cache was used on second run" +else + echo "Cache was NOT used on second run (check logs)" >&2 + exit 1 +fi + +echo "Cache verification complete: SUCCESS" diff --git a/package-lock.json b/package-lock.json index 58456fe..4166f04 100644 --- a/package-lock.json +++ b/package-lock.json @@ -9,6 +9,7 @@ "version": "1.0.1", "license": "MIT", "dependencies": { + "@azure/service-bus": "^7.9.3", "express": "^5.2.1", "http-proxy": "^1.18.1", "react": "^19.2.3", @@ -23,6 +24,243 @@ "npm": ">=10.0.0" } }, + "node_modules/@azure/abort-controller": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@azure/abort-controller/-/abort-controller-1.1.0.tgz", + "integrity": "sha512-TrRLIoSQVzfAJX9H1JeFjzAoDGcoK1IYX1UImfceTZpsyYfWr09Ss1aHW1y5TrrR3iq6RZLBwJ3E24uwPhwahw==", + "license": "MIT", + "dependencies": { + "tslib": "^2.2.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/@azure/core-amqp": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/@azure/core-amqp/-/core-amqp-4.4.1.tgz", + "integrity": "sha512-eiVwGOMpHWPS6YsX0kjW4rfH+f0Pb5L2EKNDbuXldVkuFKSEfROdl81xHLsMAl5PP5wiiTjErcMcKsJqwyaRqw==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.1.2", + "@azure/core-auth": "^1.10.0", + "@azure/core-util": "^1.13.0", + "@azure/logger": "^1.3.0", + "buffer": "^6.0.3", + "events": "^3.3.0", + "process": "^0.11.10", + "rhea": "^3.0.0", + "rhea-promise": "^3.0.0", + "tslib": "^2.6.2", + "util": "^0.12.5" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@azure/core-amqp/node_modules/@azure/abort-controller": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@azure/abort-controller/-/abort-controller-2.1.2.tgz", + "integrity": "sha512-nBrLsEWm4J2u5LpAPjxADTlq3trDgVZZXHNKabeXZtpq3d3AbN/KGO82R87rdDz5/lYB024rtEf10/q0urNgsA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-auth": { + "version": "1.10.1", + "resolved": "https://registry.npmjs.org/@azure/core-auth/-/core-auth-1.10.1.tgz", + "integrity": "sha512-ykRMW8PjVAn+RS6ww5cmK9U2CyH9p4Q88YJwvUslfuMmN98w/2rdGRLPqJYObapBCdzBVeDgYWdJnFPFb7qzpg==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.1.2", + "@azure/core-util": "^1.13.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@azure/core-auth/node_modules/@azure/abort-controller": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@azure/abort-controller/-/abort-controller-2.1.2.tgz", + "integrity": "sha512-nBrLsEWm4J2u5LpAPjxADTlq3trDgVZZXHNKabeXZtpq3d3AbN/KGO82R87rdDz5/lYB024rtEf10/q0urNgsA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-client": { + "version": "1.10.1", + "resolved": "https://registry.npmjs.org/@azure/core-client/-/core-client-1.10.1.tgz", + "integrity": "sha512-Nh5PhEOeY6PrnxNPsEHRr9eimxLwgLlpmguQaHKBinFYA/RU9+kOYVOQqOrTsCL+KSxrLLl1gD8Dk5BFW/7l/w==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.1.2", + "@azure/core-auth": "^1.10.0", + "@azure/core-rest-pipeline": "^1.22.0", + "@azure/core-tracing": "^1.3.0", + "@azure/core-util": "^1.13.0", + "@azure/logger": "^1.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@azure/core-client/node_modules/@azure/abort-controller": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@azure/abort-controller/-/abort-controller-2.1.2.tgz", + "integrity": "sha512-nBrLsEWm4J2u5LpAPjxADTlq3trDgVZZXHNKabeXZtpq3d3AbN/KGO82R87rdDz5/lYB024rtEf10/q0urNgsA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-paging": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/@azure/core-paging/-/core-paging-1.6.2.tgz", + "integrity": "sha512-YKWi9YuCU04B55h25cnOYZHxXYtEvQEbKST5vqRga7hWY9ydd3FZHdeQF8pyh+acWZvppw13M/LMGx0LABUVMA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-rest-pipeline": { + "version": "1.22.2", + "resolved": "https://registry.npmjs.org/@azure/core-rest-pipeline/-/core-rest-pipeline-1.22.2.tgz", + "integrity": "sha512-MzHym+wOi8CLUlKCQu12de0nwcq9k9Kuv43j4Wa++CsCpJwps2eeBQwD2Bu8snkxTtDKDx4GwjuR9E8yC8LNrg==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.1.2", + "@azure/core-auth": "^1.10.0", + "@azure/core-tracing": "^1.3.0", + "@azure/core-util": "^1.13.0", + "@azure/logger": "^1.3.0", + "@typespec/ts-http-runtime": "^0.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@azure/core-rest-pipeline/node_modules/@azure/abort-controller": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@azure/abort-controller/-/abort-controller-2.1.2.tgz", + "integrity": "sha512-nBrLsEWm4J2u5LpAPjxADTlq3trDgVZZXHNKabeXZtpq3d3AbN/KGO82R87rdDz5/lYB024rtEf10/q0urNgsA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-tracing": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@azure/core-tracing/-/core-tracing-1.3.1.tgz", + "integrity": "sha512-9MWKevR7Hz8kNzzPLfX4EAtGM2b8mr50HPDBvio96bURP/9C+HjdH3sBlLSNNrvRAr5/k/svoH457gB5IKpmwQ==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@azure/core-util": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/@azure/core-util/-/core-util-1.13.1.tgz", + "integrity": "sha512-XPArKLzsvl0Hf0CaGyKHUyVgF7oDnhKoP85Xv6M4StF/1AhfORhZudHtOyf2s+FcbuQ9dPRAjB8J2KvRRMUK2A==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.1.2", + "@typespec/ts-http-runtime": "^0.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@azure/core-util/node_modules/@azure/abort-controller": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@azure/abort-controller/-/abort-controller-2.1.2.tgz", + "integrity": "sha512-nBrLsEWm4J2u5LpAPjxADTlq3trDgVZZXHNKabeXZtpq3d3AbN/KGO82R87rdDz5/lYB024rtEf10/q0urNgsA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-xml": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@azure/core-xml/-/core-xml-1.5.0.tgz", + "integrity": "sha512-D/sdlJBMJfx7gqoj66PKVmhDDaU6TKA49ptcolxdas29X7AfvLTmfAGLjAcIMBK7UZ2o4lygHIqVckOlQU3xWw==", + "license": "MIT", + "dependencies": { + "fast-xml-parser": "^5.0.7", + "tslib": "^2.8.1" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@azure/logger": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@azure/logger/-/logger-1.3.0.tgz", + "integrity": "sha512-fCqPIfOcLE+CGqGPd66c8bZpwAji98tZ4JI9i/mlTNTlsIWslCfpg48s/ypyLxZTump5sypjrKn2/kY7q8oAbA==", + "license": "MIT", + "dependencies": { + "@typespec/ts-http-runtime": "^0.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@azure/service-bus": { + "version": "7.9.5", + "resolved": "https://registry.npmjs.org/@azure/service-bus/-/service-bus-7.9.5.tgz", + "integrity": "sha512-R5Af+4jtZZII2snLomaddMyElFtTCBRZp2qERPlP8PuISLU87eFYFM7xWzxjNd0yeiyQUBkamx/ZhOC8eWhCHA==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^1.0.0", + "@azure/core-amqp": "^4.1.1", + "@azure/core-auth": "^1.3.0", + "@azure/core-client": "^1.0.0", + "@azure/core-paging": "^1.4.0", + "@azure/core-rest-pipeline": "^1.1.0", + "@azure/core-tracing": "^1.0.0", + "@azure/core-util": "^1.1.1", + "@azure/core-xml": "^1.0.0", + "@azure/logger": "^1.0.0", + "@types/is-buffer": "^2.0.0", + "buffer": "^6.0.0", + "is-buffer": "^2.0.3", + "jssha": "^3.1.0", + "long": "^5.2.0", + "process": "^0.11.10", + "rhea-promise": "^3.0.0", + "tslib": "^2.2.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, "node_modules/@babel/code-frame": { "version": "7.27.1", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", @@ -54,6 +292,7 @@ "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.5", @@ -1164,6 +1403,38 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/is-buffer": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@types/is-buffer/-/is-buffer-2.0.2.tgz", + "integrity": "sha512-G6OXy83Va+xEo8XgqAJYOuvOMxeey9xM5XKkvwJNmN8rVdcB+r15HvHsG86hl86JvU0y1aa7Z2ERkNFYWw9ySg==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/node": { + "version": "25.0.3", + "resolved": "https://registry.npmjs.org/@types/node/-/node-25.0.3.tgz", + "integrity": "sha512-W609buLVRVmeW693xKfzHeIV6nJGGz98uCPfeXI1ELMLXVeKYZ9m15fAMSaUPBHYLGFsVRcMmSCksQOrZV9BYA==", + "license": "MIT", + "dependencies": { + "undici-types": "~7.16.0" + } + }, + "node_modules/@typespec/ts-http-runtime": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/@typespec/ts-http-runtime/-/ts-http-runtime-0.3.2.tgz", + "integrity": "sha512-IlqQ/Gv22xUC1r/WQm4StLkYQmaaTsXAhUVsNE0+xiyf0yRFiH5++q78U3bw6bLKDCTmh0uqKB9eG9+Bt75Dkg==", + "license": "MIT", + "dependencies": { + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=20.0.0" + } + }, "node_modules/@vitejs/plugin-react": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-5.1.2.tgz", @@ -1198,6 +1469,50 @@ "node": ">= 0.6" } }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "license": "MIT", + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, "node_modules/baseline-browser-mapping": { "version": "2.9.7", "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.7.tgz", @@ -1252,6 +1567,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", @@ -1266,6 +1582,30 @@ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" } }, + "node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, "node_modules/bytes": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", @@ -1275,6 +1615,24 @@ "node": ">= 0.8" } }, + "node_modules/call-bind": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/call-bind-apply-helpers": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", @@ -1389,6 +1747,23 @@ } } }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/depd": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", @@ -1537,6 +1912,15 @@ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", "license": "MIT" }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "license": "MIT", + "engines": { + "node": ">=0.8.x" + } + }, "node_modules/express": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz", @@ -1580,6 +1964,24 @@ "url": "https://opencollective.com/express" } }, + "node_modules/fast-xml-parser": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-5.3.3.tgz", + "integrity": "sha512-2O3dkPAAC6JavuMm8+4+pgTk+5hoAs+CjZ+sWcQLkX9+/tHRuTkQh/Oaifr8qDmZ8iEHb771Ea6G8CdwkrgvYA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/NaturalIntelligence" + } + ], + "license": "MIT", + "dependencies": { + "strnum": "^2.1.0" + }, + "bin": { + "fxparser": "src/cli/cli.js" + } + }, "node_modules/fdir": { "version": "6.5.0", "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", @@ -1639,6 +2041,21 @@ } } }, + "node_modules/for-each": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/forwarded": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", @@ -1681,6 +2098,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/generator-function": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/generator-function/-/generator-function-2.0.1.tgz", + "integrity": "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/gensync": { "version": "1.0.0-beta.2", "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", @@ -1740,6 +2166,18 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/has-symbols": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", @@ -1752,6 +2190,21 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/hasown": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", @@ -1798,6 +2251,32 @@ "node": ">=8.0.0" } }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/iconv-lite": { "version": "0.7.1", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.1.tgz", @@ -1814,6 +2293,26 @@ "url": "https://opencollective.com/express" } }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", @@ -1829,12 +2328,115 @@ "node": ">= 0.10" } }, + "node_modules/is-arguments": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.2.0.tgz", + "integrity": "sha512-7bVbi0huj/wrIAOzb8U1aszg9kdi3KN/CyU19CTI7tAoZYEZoL9yCDXpbXN+uPsuWnP02cyug1gleqq+TU+YCA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-buffer": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz", + "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-generator-function": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.2.tgz", + "integrity": "sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.4", + "generator-function": "^2.0.0", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-promise": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz", "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==", "license": "MIT" }, + "node_modules/is-regex": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", + "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz", + "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", @@ -1868,6 +2470,21 @@ "node": ">=6" } }, + "node_modules/jssha": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/jssha/-/jssha-3.3.1.tgz", + "integrity": "sha512-VCMZj12FCFMQYcFLPRm/0lOBbLi8uM2BhXPTqw3U4YAfs4AZfiApOoBLoN8cQE60Z50m1MYMTQVCfgF/KaCVhQ==", + "license": "BSD-3-Clause", + "engines": { + "node": "*" + } + }, + "node_modules/long": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz", + "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==", + "license": "Apache-2.0" + }, "node_modules/lru-cache": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", @@ -2039,6 +2656,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -2046,6 +2664,15 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/possible-typed-array-names": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", + "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/postcss": { "version": "8.5.6", "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", @@ -2075,6 +2702,15 @@ "node": "^10 || ^12 || >=14" } }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "license": "MIT", + "engines": { + "node": ">= 0.6.0" + } + }, "node_modules/proxy-addr": { "version": "2.0.7", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", @@ -2132,6 +2768,7 @@ "resolved": "https://registry.npmjs.org/react/-/react-19.2.3.tgz", "integrity": "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==", "license": "MIT", + "peer": true, "engines": { "node": ">=0.10.0" } @@ -2164,6 +2801,26 @@ "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", "license": "MIT" }, + "node_modules/rhea": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/rhea/-/rhea-3.0.4.tgz", + "integrity": "sha512-n3kw8syCdrsfJ72w3rohpoHHlmv/RZZEP9VY5BVjjo0sEGIt4YSKypBgaiA+OUSgJAzLjOECYecsclG5xbYtZw==", + "license": "Apache-2.0", + "dependencies": { + "debug": "^4.3.3" + } + }, + "node_modules/rhea-promise": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/rhea-promise/-/rhea-promise-3.0.3.tgz", + "integrity": "sha512-a875P5YcMkePSTEWMsnmCQS7Y4v/XvIw7ZoMtJxqtQRZsqSA6PsZxuz4vktyRykPuUgdNsA6F84dS3iEXZoYnQ==", + "license": "Apache-2.0", + "dependencies": { + "debug": "^4.0.0", + "rhea": "^3.0.0", + "tslib": "^2.6.0" + } + }, "node_modules/rollup": { "version": "4.53.3", "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.3.tgz", @@ -2222,6 +2879,23 @@ "node": ">= 18" } }, + "node_modules/safe-regex-test": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz", + "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-regex": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", @@ -2281,6 +2955,23 @@ "node": ">= 18" } }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/setprototypeof": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", @@ -2378,6 +3069,18 @@ "node": ">= 0.8" } }, + "node_modules/strnum": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/strnum/-/strnum-2.1.2.tgz", + "integrity": "sha512-l63NF9y/cLROq/yqKXSLtcMeeyOfnSQlfMSlzFt/K73oIaD8DGaQWd7Z34X9GPiKqP5rbSh84Hl4bOlLcjiSrQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/NaturalIntelligence" + } + ], + "license": "MIT" + }, "node_modules/tinyglobby": { "version": "0.2.15", "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", @@ -2404,6 +3107,12 @@ "node": ">=0.6" } }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, "node_modules/type-is": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/type-is/-/type-is-2.0.1.tgz", @@ -2418,6 +3127,12 @@ "node": ">= 0.6" } }, + "node_modules/undici-types": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz", + "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", + "license": "MIT" + }, "node_modules/unpipe": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", @@ -2458,6 +3173,19 @@ "browserslist": ">= 4.21.0" } }, + "node_modules/util": { + "version": "0.12.5", + "resolved": "https://registry.npmjs.org/util/-/util-0.12.5.tgz", + "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "is-arguments": "^1.0.4", + "is-generator-function": "^1.0.7", + "is-typed-array": "^1.1.3", + "which-typed-array": "^1.1.2" + } + }, "node_modules/vary": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", @@ -2473,6 +3201,7 @@ "integrity": "sha512-ITcnkFeR3+fI8P1wMgItjGrR10170d8auB4EpMLPqmx6uxElH3a/hHGQabSHKdqd4FXWO1nFIp9rRn7JQ34ACQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.5.0", @@ -2542,6 +3271,27 @@ } } }, + "node_modules/which-typed-array": { + "version": "1.1.19", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.19.tgz", + "integrity": "sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw==", + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "for-each": "^0.3.5", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", diff --git a/package.json b/package.json index 3680064..4a11978 100644 --- a/package.json +++ b/package.json @@ -1,4 +1,4 @@ -๏ปฟ{ +{ "name": "networkbuster-server", "version": "1.0.1", "type": "module", @@ -29,7 +29,13 @@ "proxy": "PROXY_PORT=3000 node proxy-server.js", "all": "concurrently \"npm start\" \"npm run proxy:3000\"", "build": "npm install", - "test": "echo 'No tests specified'", + "apply:network-boost": "powershell -ExecutionPolicy Bypass -File scripts/network-boost.ps1 -Apply -Confirm:$false", + "show:network-boost": "powershell -ExecutionPolicy Bypass -File scripts/network-boost.ps1", + "test": "echo No-tests-specified", + "test:devices": "node tests/test-device-registration.js", + "test:integration:devices": "node tests/integration/test-e2e-device-registration.js", + "test:unit:devices": "node tests/unit/test-device-status-transitions.js", + "worker:device-consumer": "node workers/deviceConsumer.js", "docker:build": "docker build -t networkbuster:latest .", "docker:run": "docker run -p 3000:3000 networkbuster:latest", "docker:compose:up": "docker-compose up -d", @@ -68,13 +74,28 @@ "bios:boot:bat": "boot-to-bios.bat", "start:all": "concurrently \"npm run start\" \"npm run security\" \"npm run timeline\"", "build:overlay": "cd challengerepo/real-time-overlay && npm install && npm run build", - "build:all": "npm run build:overlay" + "build:all": "npm run build:overlay", + "dist:zip": "node scripts/make-release.js", + "dist:nsis": "powershell -ExecutionPolicy Bypass -File scripts/build-nsis.ps1", + "release:create-shortcut": "powershell -ExecutionPolicy Bypass -File scripts/create-shortcut.ps1", + "start:desktop": "start start-desktop.bat", + "ai:gateway": "node ai-proxy-gateway.js", + "ai:gateway:dev": "node --watch ai-proxy-gateway.js", + "ai:test": "node tests/test-ai-providers.js", + "ai:cleanup": "node scripts/ai-cleanup-agent.js", + "ai:cleanup:execute": "node scripts/ai-cleanup-agent.js --execute", + "ai:train": "node scripts/ai-repo-trainer.js", + "network:optimize": "node scripts/network-path-optimizer.js", + "network:optimize:apply": "node scripts/network-path-optimizer.js --apply", + "network:report": "node scripts/network-path-optimizer.js --report", + "start:with-ai": "concurrently \"npm start\" \"npm run ai:gateway\" \"npm run proxy\"" }, "engines": { "node": "24.x", "npm": ">=10.0.0" }, "dependencies": { + "@azure/service-bus": "^7.9.3", "express": "^5.2.1", "http-proxy": "^1.18.1", "react": "^19.2.3", @@ -97,4 +118,4 @@ "@vitejs/plugin-react": "^5.1.2", "vite": "^7.2.7" } -} +} \ No newline at end of file diff --git a/post/direx-ct b/post/direx-ct new file mode 160000 index 0000000..ce18cab --- /dev/null +++ b/post/direx-ct @@ -0,0 +1 @@ +Subproject commit ce18cabd2e9d1c37989ac22c4a2dd47de5bd9fb1 diff --git a/post/direx-ct-main.zip b/post/direx-ct-main.zip new file mode 100644 index 0000000..c6647b6 Binary files /dev/null and b/post/direx-ct-main.zip differ diff --git a/proxy-server.js b/proxy-server.js index 8ae0e7f..ce9825e 100644 --- a/proxy-server.js +++ b/proxy-server.js @@ -3,11 +3,12 @@ import http from 'http'; import https from 'https'; const BACKEND_URL = process.env.BACKEND_URL || 'http://localhost:3001'; +const AI_GATEWAY_URL = process.env.AI_GATEWAY_URL || 'http://localhost:3002'; const PROXY_PORT = process.env.PROXY_PORT || 3000; const FRONTEND_URL = process.env.FRONTEND_URL || 'http://192.168.1.180:5173'; -// Create proxy -const proxy = httpProxy.createProxyServer({ +// Create proxy for backend +const backendProxy = httpProxy.createProxyServer({ target: BACKEND_URL, changeOrigin: true, ws: true, @@ -15,19 +16,39 @@ const proxy = httpProxy.createProxyServer({ logLevel: 'info' }); -// Error handling -proxy.on('error', (err, req, res) => { - console.error('Proxy error:', err); - res.writeHead(502, { 'Content-Type': 'application/json' }); - res.end(JSON.stringify({ error: 'Bad Gateway', message: err.message })); +// Create proxy for AI gateway +const aiProxy = httpProxy.createProxyServer({ + target: AI_GATEWAY_URL, + changeOrigin: true, + xfwd: true, + logLevel: 'info' +}); + +// Error handling for backend proxy +backendProxy.on('error', (err, req, res) => { + console.error('Backend proxy error:', err); + if (res.writeHead) { + res.writeHead(502, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'Bad Gateway', message: err.message })); + } +}); + +// Error handling for AI proxy +aiProxy.on('error', (err, req, res) => { + console.error('AI Gateway proxy error:', err); + if (res.writeHead) { + res.writeHead(502, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'AI Gateway Unavailable', message: err.message })); + } }); // Create HTTP server const server = http.createServer((req, res) => { // Add CORS headers - res.setHeader('Access-Control-Allow-Origin', FRONTEND_URL); + const allowOrigin = req.headers.origin || FRONTEND_URL; + res.setHeader('Access-Control-Allow-Origin', allowOrigin); res.setHeader('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, PATCH, OPTIONS'); - res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization'); + res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization, X-Device-Id, X-API-Key'); res.setHeader('Access-Control-Allow-Credentials', 'true'); if (req.method === 'OPTIONS') { @@ -36,17 +57,28 @@ const server = http.createServer((req, res) => { return; } + // Route AI requests to AI gateway + if (req.url.startsWith('/api/ai/') || req.url.startsWith('/ai/')) { + // Strip /api prefix if present for AI gateway + const aiPath = req.url.replace(/^\/api\/ai/, '').replace(/^\/ai/, ''); + req.url = aiPath || '/'; + + console.log(`[${new Date().toISOString()}] ${req.method} ${req.url} -> AI Gateway`); + aiProxy.web(req, res); + return; + } + // Log requests console.log(`[${new Date().toISOString()}] ${req.method} ${req.url} -> ${BACKEND_URL}`); - // Forward all requests to backend - proxy.web(req, res); + // Forward all other requests to backend + backendProxy.web(req, res); }); // WebSocket support server.on('upgrade', (req, socket, head) => { console.log(`[WebSocket] Upgrading ${req.url}`); - proxy.ws(req, socket, head); + backendProxy.ws(req, socket, head); }); server.listen(PROXY_PORT, '0.0.0.0', () => { @@ -55,8 +87,13 @@ server.listen(PROXY_PORT, '0.0.0.0', () => { console.log(`โœ“ Listening on all interfaces: http://0.0.0.0:${PROXY_PORT}`); console.log(`โœ“ Network access: http://192.168.1.180:${PROXY_PORT}`); console.log(`โœ“ Localhost: http://localhost:${PROXY_PORT}`); - console.log(`โœ“ Forwarding to: ${BACKEND_URL}`); - console.log(`โœ“ Frontend at: ${FRONTEND_URL}`); + console.log(`โœ“ Backend: ${BACKEND_URL}`); + console.log(`โœ“ AI Gateway: ${AI_GATEWAY_URL}`); + console.log(`โœ“ Frontend: ${FRONTEND_URL}`); + console.log(`โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”`); + console.log(`๐Ÿ“ Routes:`); + console.log(` /api/ai/* -> AI Proxy Gateway`); + console.log(` /* -> Backend Server`); console.log(`โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\n`); }); diff --git a/quick-save-cleanup.ps1 b/quick-save-cleanup.ps1 new file mode 100644 index 0000000..d513966 --- /dev/null +++ b/quick-save-cleanup.ps1 @@ -0,0 +1,141 @@ +# NetworkBuster Save and Cleanup Script +# Simple and reliable version + +$ProjectRoot = "C:\Users\daypi\networkbuster.net" +Set-Location $ProjectRoot + +Write-Host "`nโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Cyan +Write-Host " NETWORKBUSTER SAVE & CLEANUP SYSTEM" -ForegroundColor Cyan +Write-Host "โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Cyan + +# PHASE 1: Cleanup +Write-Host "`n[1/4] Cleaning temporary files..." -ForegroundColor Yellow + +$cleaned = 0 +$freed = 0 + +# Clean node_modules cache +if (Test-Path "node_modules\.cache") { + $size = (Get-ChildItem "node_modules\.cache" -Recurse | Measure-Object -Property Length -Sum).Sum + Remove-Item "node_modules\.cache" -Recurse -Force -ErrorAction SilentlyContinue + $freed += $size + $cleaned++ + Write-Host " โœ“ Removed node_modules cache" -ForegroundColor Green +} + +# Clean Python cache +Get-ChildItem -Path $ProjectRoot -Recurse -Include "__pycache__","*.pyc" -Force -ErrorAction SilentlyContinue | ForEach-Object { + $freed += $_.Length + Remove-Item $_.FullName -Recurse -Force -ErrorAction SilentlyContinue + $cleaned++ +} +Write-Host " โœ“ Cleaned Python cache files" -ForegroundColor Green + +# Clean log files +Get-ChildItem -Path $ProjectRoot -Include "*.log" -Recurse -Force -ErrorAction SilentlyContinue | ForEach-Object { + if ($_.Length -gt 0) { + $freed += $_.Length + Remove-Item $_.FullName -Force -ErrorAction SilentlyContinue + $cleaned++ + } +} +Write-Host " โœ“ Cleaned log files" -ForegroundColor Green + +$freedMB = [math]::Round($freed / 1MB, 2) +Write-Host " Summary: $cleaned items removed, ${freedMB} MB freed" -ForegroundColor White + +# PHASE 2: Git Save +Write-Host "`n[2/4] Saving to Git..." -ForegroundColor Yellow + +$hasGit = Get-Command git -ErrorAction SilentlyContinue +if ($hasGit -and (Test-Path ".git")) { + $status = git status --porcelain + if ($status) { + git add -A + $timestamp = Get-Date -Format "yyyy-MM-dd HH:mm" + git commit -m "Auto-save: Update and cleanup - $timestamp" + Write-Host " โœ“ Changes committed to git" -ForegroundColor Green + } else { + Write-Host " โœ“ No changes to commit" -ForegroundColor Green + } +} else { + Write-Host " โš  Git not available" -ForegroundColor Yellow +} + +# PHASE 3: Backup to drives +Write-Host "`n[3/4] Backing up to available drives..." -ForegroundColor Yellow + +$drives = Get-PSDrive -PSProvider FileSystem | Where-Object { + $_.Used -ne $null -and $_.Name -ne 'C' -and $_.Free -gt 1GB +} + +if ($drives) { + $timestamp = Get-Date -Format "yyyy-MM-dd_HHmmss" + $backupName = "NetworkBuster_$timestamp" + + foreach ($drive in $drives) { + $backupPath = Join-Path $drive.Root "Backups\$backupName" + + try { + Write-Host " Backing up to $($drive.Name):..." -ForegroundColor Cyan + + # Create backup directory + New-Item -ItemType Directory -Path $backupPath -Force | Out-Null + + # Copy important files (excluding large directories) + $exclude = @("node_modules", ".git", ".venv", "__pycache__") + + Get-ChildItem -Path $ProjectRoot -Exclude $exclude | ForEach-Object { + Copy-Item $_.FullName -Destination $backupPath -Recurse -Force -ErrorAction SilentlyContinue + } + + $backupSize = (Get-ChildItem $backupPath -Recurse | Measure-Object -Property Length -Sum).Sum + $sizeMB = [math]::Round($backupSize / 1MB, 2) + + Write-Host " โœ“ Backup complete ($sizeMB MB) -> $backupPath" -ForegroundColor Green + } catch { + Write-Host " โœ— Backup failed: $($_.Exception.Message)" -ForegroundColor Red + } + } +} else { + Write-Host " โš  No additional drives available" -ForegroundColor Yellow +} + +# PHASE 4: Generate Report +Write-Host "`n[4/4] Generating report..." -ForegroundColor Yellow + +$reportDate = Get-Date -Format "yyyy-MM-dd HH:mm:ss" +$report = @" +# Save & Cleanup Report +**Date:** $reportDate + +## Cleanup +- Files cleaned: $cleaned +- Space freed: ${freedMB} MB + +## Git +- Status: $(if ($hasGit -and (Test-Path ".git")) { "Saved" } else { "Skipped" }) + +## Backups +- Drives: $($drives.Count) +- Folder: $backupName + +## Project +- Path: $ProjectRoot +- Total drives scanned: $((Get-PSDrive -PSProvider FileSystem | Where-Object { $_.Used -ne $null }).Count) + +--- +*Generated by NetworkBuster Save & Cleanup System* +"@ + +$report | Out-File -FilePath "SAVE_CLEANUP_REPORT.md" -Encoding UTF8 +Write-Host " โœ“ Report saved to SAVE_CLEANUP_REPORT.md" -ForegroundColor Green + +# Final Summary +Write-Host "`nโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Green +Write-Host " โœ… ALL OPERATIONS COMPLETE" -ForegroundColor Green +Write-Host "โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Green +Write-Host "`nCleaned: $cleaned items (${freedMB} MB)" -ForegroundColor White +Write-Host "Git: $(if ($hasGit -and (Test-Path '.git')) { 'Saved' } else { 'Skipped' })" -ForegroundColor White +Write-Host "Backups: $($drives.Count) drive(s)" -ForegroundColor White +Write-Host "" diff --git a/quick_admin.py b/quick_admin.py new file mode 100644 index 0000000..288313c --- /dev/null +++ b/quick_admin.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python3 +""" +NetworkBuster Quick Admin Commands +Fast access to common admin operations +""" + +import ctypes +import subprocess +import sys +import os +from pathlib import Path + +PROJECT_PATH = Path(__file__).parent.resolve() + + +def is_admin(): + """Check if running as administrator.""" + try: + return ctypes.windll.shell32.IsUserAnAdmin() + except: + return False + + +def run_elevated(func): + """Decorator to run function with admin privileges.""" + def wrapper(*args, **kwargs): + if not is_admin(): + print("โ†‘ Elevating to Administrator...") + ctypes.windll.shell32.ShellExecuteW( + None, "runas", sys.executable, + f'"{__file__}" {func.__name__}', + str(PROJECT_PATH), 1 + ) + return + return func(*args, **kwargs) + return wrapper + + +def ps(cmd, show=True): + """Run PowerShell command.""" + result = subprocess.run( + ["powershell", "-NoProfile", "-Command", cmd], + capture_output=True, text=True + ) + if show and result.stdout: + print(result.stdout) + if result.stderr: + print(f"Error: {result.stderr}") + return result + + +def cmd(command, show=True): + """Run CMD command.""" + result = subprocess.run(command, shell=True, capture_output=True, text=True) + if show and result.stdout: + print(result.stdout) + return result + + +# ============================================================ +# Quick Commands +# ============================================================ + +def start_servers(): + """Start all NetworkBuster servers.""" + print("๐Ÿš€ Starting NetworkBuster servers...") + os.chdir(PROJECT_PATH) + subprocess.Popen( + ["node", "start-servers.js"], + creationflags=subprocess.CREATE_NEW_CONSOLE + ) + print("โœ“ Servers starting in new window") + + +def stop_servers(): + """Stop all Node.js processes.""" + print("๐Ÿ›‘ Stopping Node.js processes...") + ps("Get-Process node -ErrorAction SilentlyContinue | Stop-Process -Force") + print("โœ“ All Node.js processes stopped") + + +def restart_servers(): + """Restart all servers.""" + stop_servers() + import time + time.sleep(2) + start_servers() + + +def check_ports(): + """Check server port status.""" + print("\n๐Ÿ”Œ Port Status:") + print("-" * 40) + for port in [3000, 3001, 3002, 3003]: + result = ps(f"Get-NetTCPConnection -LocalPort {port} -State Listen -ErrorAction SilentlyContinue", show=False) + status = "๐ŸŸข ACTIVE" if result.stdout.strip() else "โšช FREE" + print(f" Port {port}: {status}") + + +def kill_port(port): + """Kill process using a specific port.""" + print(f"๐Ÿ”ช Killing process on port {port}...") + ps(f''' +$conn = Get-NetTCPConnection -LocalPort {port} -ErrorAction SilentlyContinue +if ($conn) {{ + Stop-Process -Id $conn.OwningProcess -Force + Write-Output "Killed process on port {port}" +}} else {{ + Write-Output "No process found on port {port}" +}} +''') + + +@run_elevated +def set_execution_policy(): + """Set PowerShell execution policy to RemoteSigned.""" + print("๐Ÿ”ง Setting execution policy...") + ps("Set-ExecutionPolicy RemoteSigned -Scope CurrentUser -Force") + print("โœ“ Execution policy set to RemoteSigned") + + +@run_elevated +def open_firewall(): + """Open firewall ports for NetworkBuster.""" + print("๐Ÿ”ฅ Opening firewall ports...") + ports = [ + (3000, "NetworkBuster-Web"), + (3001, "NetworkBuster-API"), + (3002, "NetworkBuster-Audio"), + (3003, "NetworkBuster-Auth") + ] + for port, name in ports: + ps(f'New-NetFirewallRule -DisplayName "{name}" -Direction Inbound -Protocol TCP -LocalPort {port} -Action Allow -ErrorAction SilentlyContinue') + print(f" โœ“ Port {port} ({name})") + print("โœ“ Firewall configured") + + +@run_elevated +def flush_dns(): + """Flush DNS cache.""" + print("๐ŸŒ Flushing DNS cache...") + cmd("ipconfig /flushdns") + print("โœ“ DNS cache flushed") + + +def show_ip(): + """Show IP addresses.""" + print("\n๐ŸŒ Network Information:") + print("-" * 40) + ps("Get-NetIPAddress -AddressFamily IPv4 | Where-Object {$_.InterfaceAlias -notlike '*Loopback*'} | Select-Object InterfaceAlias, IPAddress | Format-Table") + + +def disk_status(): + """Show disk space status.""" + print("\n๐Ÿ’พ Disk Status:") + print("-" * 40) + ps("Get-PSDrive -PSProvider FileSystem | Select-Object Name, @{N='Used(GB)';E={[math]::Round($_.Used/1GB,1)}}, @{N='Free(GB)';E={[math]::Round($_.Free/1GB,1)}} | Format-Table") + + +def node_status(): + """Show Node.js process status.""" + print("\n๐Ÿ“ฆ Node.js Processes:") + print("-" * 40) + ps("Get-Process node -ErrorAction SilentlyContinue | Select-Object Id, CPU, @{N='Memory(MB)';E={[math]::Round($_.WorkingSet64/1MB,1)}} | Format-Table") + + +def clear_logs(): + """Clear log files.""" + print("๐Ÿงน Clearing logs...") + log_dir = PROJECT_PATH / "logs" + if log_dir.exists(): + for f in log_dir.glob("*.log"): + f.unlink() + print(f" Deleted: {f.name}") + print("โœ“ Logs cleared") + + +def open_project(): + """Open project in VS Code.""" + print("๐Ÿ“‚ Opening project in VS Code...") + subprocess.Popen(["code", str(PROJECT_PATH)]) + + +def open_dashboard(): + """Open dashboard in browser.""" + print("๐ŸŒ Opening dashboard...") + import webbrowser + webbrowser.open("http://localhost:3000") + + +# ============================================================ +# Main Menu +# ============================================================ + +COMMANDS = { + "1": ("Start Servers", start_servers), + "2": ("Stop Servers", stop_servers), + "3": ("Restart Servers", restart_servers), + "4": ("Check Ports", check_ports), + "5": ("Show IP Info", show_ip), + "6": ("Disk Status", disk_status), + "7": ("Node.js Status", node_status), + "8": ("Set Execution Policy*", set_execution_policy), + "9": ("Open Firewall Ports*", open_firewall), + "10": ("Flush DNS*", flush_dns), + "11": ("Clear Logs", clear_logs), + "12": ("Open VS Code", open_project), + "13": ("Open Dashboard", open_dashboard), + "k": ("Kill Port (enter port)", lambda: kill_port(input("Port: "))), +} + + +def main(): + """Main menu.""" + # Handle command-line function calls + if len(sys.argv) > 1: + func_name = sys.argv[1] + for _, (_, func) in COMMANDS.items(): + if func.__name__ == func_name: + func() + return + + print("=" * 50) + print(" NetworkBuster Quick Admin") + print("=" * 50) + print(f" Admin: {'โœ“ Yes' if is_admin() else 'โœ— No (some options need elevation)'}") + print(" * = Requires Admin") + print() + + while True: + print("\n๐Ÿ“‹ Quick Commands:") + for key, (name, _) in COMMANDS.items(): + print(f" [{key}] {name}") + print(" [q] Quit") + print() + + choice = input("Command: ").strip().lower() + + if choice == "q": + print("๐Ÿ‘‹ Goodbye!") + break + elif choice in COMMANDS: + print() + COMMANDS[choice][1]() + else: + print("Invalid option") + + +if __name__ == "__main__": + main() diff --git a/quick_install.bat b/quick_install.bat new file mode 100644 index 0000000..4e00f46 --- /dev/null +++ b/quick_install.bat @@ -0,0 +1,31 @@ +@echo off +echo. +echo ======================================== +echo NetworkBuster Quick Installer +echo ======================================== +echo. + +cd /d "%~dp0" + +echo Creating desktop shortcut... +powershell -Command "$WshShell = New-Object -ComObject WScript.Shell; $Shortcut = $WshShell.CreateShortcut('%USERPROFILE%\Desktop\NetworkBuster.lnk'); $Shortcut.TargetPath = '%CD%\.venv\Scripts\pythonw.exe'; $Shortcut.Arguments = '\"%CD%\networkbuster_app.pyw\"'; $Shortcut.WorkingDirectory = '%CD%'; $Shortcut.Description = 'NetworkBuster Control Panel'; $Shortcut.IconLocation = 'imageres.dll,1'; $Shortcut.Save()" + +echo Creating Start Menu shortcut... +set "STARTMENU=%APPDATA%\Microsoft\Windows\Start Menu\Programs" +powershell -Command "$WshShell = New-Object -ComObject WScript.Shell; $Shortcut = $WshShell.CreateShortcut('%STARTMENU%\NetworkBuster.lnk'); $Shortcut.TargetPath = '%CD%\.venv\Scripts\pythonw.exe'; $Shortcut.Arguments = '\"%CD%\networkbuster_app.pyw\"'; $Shortcut.WorkingDirectory = '%CD%'; $Shortcut.Description = 'NetworkBuster Control Panel'; $Shortcut.IconLocation = 'imageres.dll,1'; $Shortcut.Save()" + +echo. +echo ======================================== +echo Installation Complete! +echo ======================================== +echo. +echo Desktop: NetworkBuster shortcut +echo Start Menu: NetworkBuster +echo Search: Type "NetworkBuster" +echo. +echo Launching NetworkBuster... +echo. + +start "" "%CD%\.venv\Scripts\pythonw.exe" "%CD%\networkbuster_app.pyw" + +timeout /t 2 >nul diff --git a/reports/networkbuster-firewall.json b/reports/networkbuster-firewall.json new file mode 100644 index 0000000..c23c6f9 --- /dev/null +++ b/reports/networkbuster-firewall.json @@ -0,0 +1,45 @@ +๏ปฟ{ + "generated": "2025-12-21T12:06:27.4229364-07:00", + "rules": [ + { + "DisplayName": "NetworkBuster (Node)", + "Enabled": 1, + "Profile": 7, + "Direction": 1, + "Action": 2, + "Program": null, + "LocalPort": null, + "Service": null + }, + { + "DisplayName": "NetworkBuster (TCP 3000)", + "Enabled": 1, + "Profile": 7, + "Direction": 1, + "Action": 2, + "Program": null, + "LocalPort": null, + "Service": null + }, + { + "DisplayName": "NetworkBuster (TCP 3001)", + "Enabled": 1, + "Profile": 7, + "Direction": 1, + "Action": 2, + "Program": null, + "LocalPort": null, + "Service": null + }, + { + "DisplayName": "NetworkBuster (Service)", + "Enabled": 1, + "Profile": 7, + "Direction": 1, + "Action": 2, + "Program": null, + "LocalPort": null, + "Service": null + } + ] +} diff --git a/run-admin.bat b/run-admin.bat new file mode 100644 index 0000000..45575d8 --- /dev/null +++ b/run-admin.bat @@ -0,0 +1,40 @@ +@echo off +title NetworkBuster - Run as Admin +echo. +echo ======================================== +echo NetworkBuster Admin Launcher +echo ======================================== +echo. + +:: Check for admin rights +net session >nul 2>&1 +if %errorLevel% == 0 ( + echo [OK] Running as Administrator +) else ( + echo [!] Requesting Administrator privileges... + powershell -Command "Start-Process '%~f0' -Verb RunAs" + exit /b +) + +cd /d "%~dp0" + +echo. +echo Select an option: +echo 1. Launch Everything (recommended) +echo 2. Quick Admin Menu +echo 3. System Health Check +echo 4. Service Manager +echo 5. Start Servers Only +echo 6. Exit +echo. + +set /p choice="Enter choice (1-6): " + +if "%choice%"=="1" python launch.py +if "%choice%"=="2" python quick_admin.py +if "%choice%"=="3" python system_health.py +if "%choice%"=="4" python service_manager.py +if "%choice%"=="5" node start-servers.js +if "%choice%"=="6" exit + +pause diff --git a/run_drone_simulation.py b/run_drone_simulation.py new file mode 100644 index 0000000..9ca935e --- /dev/null +++ b/run_drone_simulation.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +""" +Automated Drone Simulation Runner +Runs through drone operations with pre-configured settings +""" + +import sys +import time +from drone_flight_system import DroneState, UnbreakableAutopilot, ScanAlgorithms +from security_verification import UserVerification, SecurityLevel + +def automated_simulation(): + print("=" * 60) + print("AUTOMATED DRONE FLIGHT SIMULATION") + print("=" * 60) + + # Authenticate + print("\n๐Ÿ” Authenticating with system credentials...") + verifier = UserVerification() + + # Try to use existing session first + session = verifier.load_session() + if not session: + print("No active session found. Logging in as admin...") + # Authenticate with credentials + username = "admin" + password = "admin123" + success, session = verifier.authenticate(username=username, password=password, interactive=False) + + if success: + print(f"โœ… Authenticated as {username}") + else: + print("โŒ Authentication failed") + return + else: + print(f"โœ… Using existing session: {session['username']} (Level {session['level']})") + + # Check clearance + if not verifier.require_level(SecurityLevel.OPERATOR): + print("โŒ Insufficient clearance for drone operations") + return + + print("\n๐Ÿš Initializing Drone System...") + time.sleep(1) + + # Create drone + drone1 = DroneState(drone_id="ALPHA-1") + autopilot = UnbreakableAutopilot(drone1) + + print("\n" + "=" * 60) + print("SIMULATION 1: SPIRAL SEARCH PATTERN") + print("=" * 60) + print("Generating wide-area spiral search pattern...") + path1 = ScanAlgorithms.generate_spiral_search(0, 0, 50, spacing=8.0) + print(f"Generated {len(path1)} waypoints") + autopilot.execute_pattern("SPIRAL_ALPHA", path1[:15]) # Run first 15 waypoints + + time.sleep(2) + + # Reset battery for next mission + drone1.battery = 100.0 + drone1.integrity = 100.0 + + print("\n" + "=" * 60) + print("SIMULATION 2: GRID RASTER SCAN") + print("=" * 60) + print("Generating detailed grid raster pattern...") + path2 = ScanAlgorithms.generate_grid_raster(30, 30, altitude=15.0, density=8.0) + print(f"Generated {len(path2)} waypoints") + autopilot.execute_pattern("GRID_BETA", path2[:15]) # Run first 15 waypoints + + time.sleep(2) + + # Final diagnostics + print("\n" + "=" * 60) + print("FINAL SYSTEM DIAGNOSTICS") + print("=" * 60) + print(f"Drone ID: {drone1.id}") + print(f"Final Battery: {drone1.battery:.1f}%") + print(f"Structural Integrity: {drone1.integrity}%") + print(f"Final Position: X={drone1.position['x']:.1f}, Y={drone1.position['y']:.1f}, Z={drone1.position['z']:.1f}") + print(f"Status: {drone1.status}") + + if autopilot.error_log: + print(f"\nErrors Encountered: {len(autopilot.error_log)}") + for i, error in enumerate(autopilot.error_log, 1): + print(f" {i}. {error}") + else: + print("\nโœ… No errors encountered during flight operations") + + print("\n" + "=" * 60) + print("SIMULATION COMPLETE") + print("=" * 60) + +if __name__ == "__main__": + try: + automated_simulation() + except KeyboardInterrupt: + print("\n\nโš ๏ธ Simulation interrupted by user") + except Exception as e: + print(f"\n\nโŒ Simulation error: {e}") + import traceback + traceback.print_exc() diff --git a/run_launcher_admin.ps1 b/run_launcher_admin.ps1 new file mode 100644 index 0000000..297ce4b --- /dev/null +++ b/run_launcher_admin.ps1 @@ -0,0 +1,108 @@ +#Requires -RunAsAdministrator + +<# +.SYNOPSIS + NetworkBuster Universal Launcher with Admin Permissions +.DESCRIPTION + Runs NetworkBuster with elevated privileges for overclocking and system optimization +#> + +Write-Host "โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" -ForegroundColor Cyan +Write-Host "โ•‘ NetworkBuster Universal Launcher (ADMIN MODE) โ•‘" -ForegroundColor Cyan +Write-Host "โ•‘ Running with elevated privileges for overclocking โ•‘" -ForegroundColor Cyan +Write-Host "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Cyan + +# Set working directory +Set-Location $PSScriptRoot + +# Enable High Performance Power Plan +Write-Host "`nโšก Setting High Performance Power Plan..." -ForegroundColor Yellow +powercfg /setactive 8c5e7fda-e8bf-4a96-9a85-a6e23a8c635c + +# Disable CPU Parking +Write-Host "๐Ÿš€ Disabling CPU Parking..." -ForegroundColor Yellow +$cpuCount = (Get-WmiObject -Class Win32_Processor).NumberOfLogicalProcessors +for ($i = 0; $i -lt $cpuCount; $i++) { + $regPath = "HKLM:\SYSTEM\CurrentControlSet\Control\Power\PowerSettings\54533251-82be-4824-96c1-47b60b740d00\0cc5b647-c1df-4637-891a-dec35c318583" + if (Test-Path $regPath) { + Set-ItemProperty -Path $regPath -Name "ValueMax" -Value 0 -ErrorAction SilentlyContinue + } +} + +# Set Process Priority to High +Write-Host "๐ŸŽฏ Setting process priority to High..." -ForegroundColor Yellow +$currentProcess = Get-Process -Id $PID +$currentProcess.PriorityClass = "High" + +# Optimize Network Settings +Write-Host "๐ŸŒ Optimizing network settings..." -ForegroundColor Yellow +netsh int tcp set global autotuninglevel=normal +netsh int tcp set global chimney=enabled +netsh int tcp set global dca=enabled +netsh int tcp set global netdma=enabled + +# Clear DNS Cache +Write-Host "๐Ÿ”„ Clearing DNS cache..." -ForegroundColor Yellow +ipconfig /flushdns | Out-Null + +# Disable Windows Search temporarily for performance +Write-Host "โš™๏ธ Optimizing Windows Search..." -ForegroundColor Yellow +Stop-Service "WSearch" -ErrorAction SilentlyContinue + +Write-Host "`nโœ… System optimizations applied!" -ForegroundColor Green +Write-Host "`n๐Ÿ“Š System Status:" -ForegroundColor Cyan +Write-Host " Power Plan: High Performance" -ForegroundColor White +Write-Host " CPU Parking: Disabled" -ForegroundColor White +Write-Host " Process Priority: High" -ForegroundColor White +Write-Host " Network: Optimized" -ForegroundColor White + +# Extract network thumbnails +Write-Host "`n๐Ÿ“ธ Extracting network map thumbnails..." -ForegroundColor Cyan +& "$PSScriptRoot\.venv\Scripts\Activate.ps1" +python "$PSScriptRoot\extract_thumbnails.py" + +# Activate virtual environment and run launcher +Write-Host "`n๐Ÿš€ Starting NetworkBuster Launcher..." -ForegroundColor Cyan +python "$PSScriptRoot\networkbuster_launcher.py" --schedule + +Write-Host "`nโœ… Scheduled launch created!" -ForegroundColor Green +Write-Host " Launch Date: January 17, 2026 at 9:00 AM" -ForegroundColor White +Write-Host " Countdown: 14 days, 2 hours" -ForegroundColor White +Write-Host " Run Level: Administrator (Highest)" -ForegroundColor White +Write-Host " Features: Overclocking + Thumbnail Extraction" -ForegroundColor White + +# Open thumbnail gallery +Write-Host "`n๐Ÿ–ผ๏ธ Opening thumbnail gallery..." -ForegroundColor Yellow +$thumbIndexPath = Join-Path $PSScriptRoot "network_thumbnails\index.html" +if (Test-Path $thumbIndexPath) { + Start-Process $thumbIndexPath + Write-Host " โœ… Gallery opened: $thumbIndexPath" -ForegroundColor Green +} + +# Create Desktop Shortcut +Write-Host "`n๐Ÿ“Œ Creating desktop shortcut..." -ForegroundColor Yellow +$WshShell = New-Object -ComObject WScript.Shell +$Shortcut = $WshShell.CreateShortcut("$env:USERPROFILE\Desktop\NetworkBuster Launch.lnk") +$Shortcut.TargetPath = "powershell.exe" +$Shortcut.Arguments = "-ExecutionPolicy Bypass -File `"$PSScriptRoot\run_launcher_admin.ps1`"" +$Shortcut.WorkingDirectory = $PSScriptRoot +$Shortcut.IconLocation = "powershell.exe,0" +$Shortcut.Description = "NetworkBuster Universal Launcher (Admin Mode)" +$Shortcut.Save() + +Write-Host "`nโœ… Desktop shortcut created!" -ForegroundColor Green + +# Show scheduled task info +Write-Host "`n๐Ÿ“… Scheduled Task Details:" -ForegroundColor Cyan +Write-Host " Task Name: NetworkBuster_ScheduledLaunch" -ForegroundColor White +Write-Host " Trigger: January 17, 2026 at 9:00 AM" -ForegroundColor White +Write-Host " Run Level: Highest (Administrator)" -ForegroundColor White +Write-Host " Status: Ready" -ForegroundColor White + +Write-Host "`n๐ŸŽฎ Quick Commands:" -ForegroundColor Cyan +Write-Host " Start Now: python networkbuster_launcher.py --start" -ForegroundColor White +Write-Host " Status: python networkbuster_launcher.py --status" -ForegroundColor White +Write-Host " Stop All: python networkbuster_launcher.py --stop" -ForegroundColor White + +Write-Host "`nโœจ Press any key to exit..." -ForegroundColor Gray +$null = $Host.UI.RawUI.ReadKey("NoEcho,IncludeKeyDown") diff --git a/save-and-cleanup.ps1 b/save-and-cleanup.ps1 new file mode 100644 index 0000000..9099075 --- /dev/null +++ b/save-and-cleanup.ps1 @@ -0,0 +1,338 @@ +<# +.SYNOPSIS + NetworkBuster Universal Save & Cleanup Script +.DESCRIPTION + Updates, cleans, and saves NetworkBuster project to all available drives +#> + +param( + [switch]$SkipGit, + [switch]$SkipBackup, + [switch]$CleanOnly, + [string]$BackupPath = "" +) + +$ErrorActionPreference = "Continue" +$ProjectRoot = "C:\Users\daypi\networkbuster.net" + +Write-Host "`nโ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" -ForegroundColor Cyan +Write-Host "โ•‘ NETWORKBUSTER UNIVERSAL SAVE & CLEANUP SYSTEM โ•‘" -ForegroundColor Cyan +Write-Host "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Cyan + +# Check if running in project directory +if (-not (Test-Path $ProjectRoot)) { + Write-Host "โŒ Project directory not found: $ProjectRoot" -ForegroundColor Red + exit 1 +} + +Set-Location $ProjectRoot + +# ============================================ +# PHASE 1: CLEANUP OPERATION +# ============================================ +Write-Host "`n[PHASE 1] ๐Ÿงน CLEANUP OPERATION" -ForegroundColor Yellow +Write-Host "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" -ForegroundColor DarkGray + +$cleanupItems = @{ + "Node Modules Cache" = @("node_modules\.cache", ".npm") + "Build Artifacts" = @("dist", "build", ".next", "out") + "Logs" = @("*.log", "logs", "npm-debug.log*") + "Temp Files" = @("*.tmp", "*.temp", ".DS_Store", "Thumbs.db") + "Python Cache" = @("__pycache__", "*.pyc", "*.pyo", ".pytest_cache") + "Editor Temp" = @(".vscode-test", "*.swp", "*.swo", "*~") +} + +$totalCleaned = 0 +$cleanedFiles = 0 + +foreach ($category in $cleanupItems.Keys) { + Write-Host "`n Cleaning: $category..." -ForegroundColor Cyan + + foreach ($pattern in $cleanupItems[$category]) { + try { + $items = Get-ChildItem -Path $ProjectRoot -Recurse -Force -Include $pattern -ErrorAction SilentlyContinue + + foreach ($item in $items) { + try { + $size = 0 + if ($item.PSIsContainer) { + $size = (Get-ChildItem -Path $item.FullName -Recurse -Force -ErrorAction SilentlyContinue | Measure-Object -Property Length -Sum).Sum + Remove-Item -Path $item.FullName -Recurse -Force -ErrorAction SilentlyContinue + } else { + $size = $item.Length + Remove-Item -Path $item.FullName -Force -ErrorAction SilentlyContinue + } + + if ($size) { + $totalCleaned += $size + $cleanedFiles++ + $sizeStr = "{0:N2} MB" -f ($size / 1MB) + Write-Host " โœ“ Removed: $($item.Name) ($sizeStr)" -ForegroundColor DarkGray + } + } catch { + Write-Host " โš  Skipped: $($item.Name)" -ForegroundColor DarkYellow + } + } + } catch { + Write-Host " โš  Pattern not found: $pattern" -ForegroundColor DarkYellow + } + } +} + +$cleanedMB = [math]::Round($totalCleaned / 1MB, 2) +Write-Host "`n โœ… Cleanup Complete!" -ForegroundColor Green +Write-Host " Files Removed: $cleanedFiles" -ForegroundColor White +Write-Host " Space Freed: $cleanedMB MB" -ForegroundColor White + +if ($CleanOnly) { + Write-Host "`nโœ… Cleanup-only mode complete!" -ForegroundColor Green + exit 0 +} + +# ============================================ +# PHASE 2: GIT OPERATIONS +# ============================================ +if (-not $SkipGit) { + Write-Host "`n[PHASE 2] ๐Ÿ“ฆ GIT SAVE OPERATION" -ForegroundColor Yellow + Write-Host "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" -ForegroundColor DarkGray + + # Check if git is available + $gitAvailable = $null -ne (Get-Command git -ErrorAction SilentlyContinue) + + if ($gitAvailable) { + # Check if this is a git repository + $isGitRepo = Test-Path (Join-Path $ProjectRoot ".git") + + if ($isGitRepo) { + Write-Host "`n Checking git status..." -ForegroundColor Cyan + + # Get status + $status = git status --porcelain + + if ($status) { + Write-Host " Changes detected:" -ForegroundColor White + $status | ForEach-Object { Write-Host " $_" -ForegroundColor DarkGray } + + # Add all changes + Write-Host "`n Adding changes to git..." -ForegroundColor Cyan + git add -A + Write-Host " โœ“ Files staged" -ForegroundColor Green + + # Commit with timestamp + $timestamp = Get-Date -Format "yyyy-MM-dd HH:mm:ss" + $commitMsg = "Auto-save: Update and cleanup - $timestamp" + + Write-Host "`n Committing changes..." -ForegroundColor Cyan + git commit -m $commitMsg + Write-Host " โœ“ Changes committed" -ForegroundColor Green + + # Check for remote + $hasRemote = (git remote) -ne $null + if ($hasRemote) { + Write-Host "`n Pushing to remote..." -ForegroundColor Cyan + try { + git push + Write-Host " โœ“ Pushed to remote" -ForegroundColor Green + } catch { + Write-Host " โš  Push failed (may need authentication)" -ForegroundColor Yellow + } + } + } else { + Write-Host " โœ“ No changes to commit" -ForegroundColor Green + } + } else { + Write-Host " โš  Not a git repository" -ForegroundColor Yellow + Write-Host " Initializing git repository..." -ForegroundColor Cyan + git init + git add -A + git commit -m "Initial commit: NetworkBuster project" + Write-Host " โœ“ Repository initialized" -ForegroundColor Green + } + } else { + Write-Host " โš  Git not available, skipping version control" -ForegroundColor Yellow + } +} else { + Write-Host "`n[PHASE 2] โญ๏ธ Skipping Git Operations" -ForegroundColor DarkGray +} + +# ============================================ +# PHASE 3: MULTI-DRIVE BACKUP +# ============================================ +if (-not $SkipBackup) { + Write-Host "`n[PHASE 3] ๐Ÿ’พ MULTI-DRIVE BACKUP" -ForegroundColor Yellow + Write-Host "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" -ForegroundColor DarkGray + + # Get all available drives + $drives = Get-PSDrive -PSProvider FileSystem | Where-Object { + $_.Used -ne $null -and + $_.Name -ne 'C' -and + $_.Free -gt 1GB + } + + if ($drives.Count -eq 0) { + Write-Host " โš  No additional drives available for backup" -ForegroundColor Yellow + } else { + Write-Host "`n Available drives for backup:" -ForegroundColor Cyan + $drives | ForEach-Object { + $freeGB = [math]::Round($_.Free / 1GB, 2) + Write-Host " ๐Ÿ“ $($_.Name): ($($_.Root)) - ${freeGB} GB free" -ForegroundColor White + } + + $timestamp = Get-Date -Format "yyyy-MM-dd_HHmmss" + $backupFolderName = "NetworkBuster_Backup_$timestamp" + + # Exclude patterns for backup + $excludePatterns = @( + "node_modules", + ".git", + ".venv", + "__pycache__", + "*.log", + ".DS_Store", + "Thumbs.db" + ) + + foreach ($drive in $drives) { + $backupRoot = if ($BackupPath) { + Join-Path $drive.Root $BackupPath + } else { + Join-Path $drive.Root "Backups" + } + + $backupPath = Join-Path $backupRoot $backupFolderName + + Write-Host "`n Backing up to $($drive.Name):..." -ForegroundColor Cyan + + try { + # Create backup directory + if (-not (Test-Path $backupRoot)) { + New-Item -ItemType Directory -Path $backupRoot -Force | Out-Null + } + + New-Item -ItemType Directory -Path $backupPath -Force | Out-Null + + # Copy files with exclusions + Write-Host " Copying files..." -ForegroundColor DarkGray + + $robocopyArgs = @( + $ProjectRoot, + $backupPath, + "/E", # Copy subdirectories including empty ones + "/NFL", # No file list + "/NDL", # No directory list + "/NJH", # No job header + "/NJS", # No job summary + "/NC", # No class + "/NS", # No size + "/NP" # No progress + ) + + # Add exclusions + foreach ($pattern in $excludePatterns) { + $robocopyArgs += "/XD" + $robocopyArgs += $pattern + } + + $result = robocopy @robocopyArgs 2>&1 + + # Create backup manifest + $manifest = @{ + BackupDate = Get-Date -Format "yyyy-MM-dd HH:mm:ss" + SourcePath = $ProjectRoot + BackupPath = $backupPath + Drive = $drive.Name + ExcludedPatterns = $excludePatterns + } | ConvertTo-Json -Depth 3 + + $manifestPath = Join-Path $backupPath "BACKUP_MANIFEST.json" + $manifest | Out-File -FilePath $manifestPath -Encoding UTF8 + + # Calculate backup size + $backupSize = (Get-ChildItem -Path $backupPath -Recurse -Force | Measure-Object -Property Length -Sum).Sum + $backupSizeMB = [math]::Round($backupSize / 1MB, 2) + + Write-Host " โœ… Backup complete! ($backupSizeMB MB)" -ForegroundColor Green + Write-Host " Location: $backupPath" -ForegroundColor DarkGray + + } catch { + Write-Host " โŒ Backup failed: $($_.Exception.Message)" -ForegroundColor Red + } + } + } +} else { + Write-Host "`n[PHASE 3] โญ๏ธ Skipping Backup Operations" -ForegroundColor DarkGray +} + +# ============================================ +# PHASE 4: GENERATE REPORT +# ============================================ +Write-Host "`n[PHASE 4] ๐Ÿ“Š GENERATING REPORT" -ForegroundColor Yellow +Write-Host "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" -ForegroundColor DarkGray + +$reportPath = Join-Path $ProjectRoot "SAVE_CLEANUP_REPORT.md" +$reportDate = Get-Date -Format "yyyy-MM-dd HH:mm:ss" + +$report = @" +# NetworkBuster Save & Cleanup Report +**Generated:** $reportDate + +## Cleanup Summary +- **Files Removed:** $cleanedFiles +- **Space Freed:** $cleanedMB MB + +## Git Status +$(if (-not $SkipGit) { + if ($gitAvailable -and $isGitRepo) { + "- โœ… Changes committed to git +- Commit: Auto-save $reportDate" + } else { + "- โš ๏ธ Git operations unavailable" + } +} else { + "- โญ๏ธ Skipped" +}) + +## Backup Status +$(if (-not $SkipBackup) { + if ($drives.Count -gt 0) { + "- โœ… Backups created on $($drives.Count) drive(s) +- Backup folder: $backupFolderName" + } else { + "- โš ๏ธ No additional drives available" + } +} else { + "- โญ๏ธ Skipped" +}) + +## Project Statistics +- **Project Path:** $ProjectRoot +- **Total Drives:** $((Get-PSDrive -PSProvider FileSystem | Where-Object { $_.Used -ne $null }).Count) + +## Next Steps +1. Verify backup integrity +2. Test restored files if needed +3. Review git commit history +4. Monitor disk space usage + +--- +*Report generated by NetworkBuster Universal Save & Cleanup System* +"@ + +$report | Out-File -FilePath $reportPath -Encoding UTF8 +Write-Host "`n โœ… Report saved to: SAVE_CLEANUP_REPORT.md" -ForegroundColor Green + +# ============================================ +# FINAL SUMMARY +# ============================================ +Write-Host "`nโ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" -ForegroundColor Green +Write-Host "โ•‘ โœ… ALL OPERATIONS COMPLETE โ•‘" -ForegroundColor Green +Write-Host "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Green + +Write-Host "`n๐Ÿ“Š Summary:" -ForegroundColor Cyan +Write-Host " โ€ข Cleaned: $cleanedFiles files ($cleanedMB MB)" -ForegroundColor White +Write-Host " โ€ข Git: $(if (-not $SkipGit -and $gitAvailable) { 'Saved โœ“' } else { 'Skipped' })" -ForegroundColor White +Write-Host " โ€ข Backups: $(if (-not $SkipBackup) { "$($drives.Count) drive(s) โœ“" } else { 'Skipped' })" -ForegroundColor White +Write-Host " โ€ข Report: $reportPath" -ForegroundColor White + +Write-Host "`n๐ŸŽ‰ Project saved and cleaned successfully!" -ForegroundColor Green +Write-Host "" diff --git a/scripts/README-nbapp.md b/scripts/README-nbapp.md new file mode 100644 index 0000000..80d874f --- /dev/null +++ b/scripts/README-nbapp.md @@ -0,0 +1,36 @@ +# nbapp Install Helper + +This document explains how to use `install-nbapp-service.ps1` to fetch, build, and optionally install +`github.com/NetworkBuster/nbapp` as a Windows service using the existing NSSM helper. + +Quick usage: + +- Clone/build only (no service): + ```powershell + .\install-nbapp-service.ps1 -Repo 'https://github.com/NetworkBuster/nbapp.git' -InstallDir 'C:\apps\nbapp' + ``` + +- Clone/build and install as a service (UAC required): + ```powershell + .\install-nbapp-service.ps1 -InstallDir 'C:\apps\nbapp' -InstallService -ServiceName 'nbapp' + ``` + +Notes & prerequisites: + +- `git` must be on PATH. The script will clone or update the target directory. +- If the app uses Node, `npm` should be available to run `npm install`. The script will attempt to find + a node runtime in either `tools/node` (repo-local) or system PATH. +- Installing the service requires elevation (UAC). The script delegates to `install-service-nssm.ps1` which + will download/install NSSM if missing and register a Windows service named as provided. +- The script attempts to choose the correct start command (`node ` or `npm start`). For complex start + commands you may need to review and edit the `AppArgs` after installation. + +Logs and files: + +- Application installation dir: whatever you pass in `-InstallDir` (default `S:\apps\nbapp`). +- Service logs are written to the provided `-LogDir` (default `S:\apps\nbapp\logs`). + +If you want, I can also: + +- Automatically enable firewall rules for the installed service, or +- Run the installer non-interactively and attempt to start the service now (requires UAC). diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 0000000..9563b43 --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,41 @@ +# WSL Update Scripts + +This folder contains helper scripts to update WSL distributions. + +Files: +- `update-wsl.ps1` โ€” PowerShell host script that enumerates WSL distros and runs `apt update && apt full-upgrade -y && apt autoremove -y` inside each. Run from an elevated PowerShell session. +- `update-wsl.sh` โ€” Minimal shell script intended to be run *inside* a WSL distro (e.g., `bash update-wsl.sh`). + +Usage (PowerShell host): +- Elevate PowerShell (Run as Administrator) +- Update all distros: `.\scripts\update-wsl.ps1` (or `.\scripts\update-wsl.ps1 -DryRun` to see commands) +- Update a single distro: `.\scripts\update-wsl.ps1 -Distro ubuntu` +- Run the script from a specific folder (e.g., `G:\kodak`): + - `& 'G:\kodak\networkbuster.net\scripts\update-wsl.ps1' -WorkingDir 'G:\kodak'` +- Run updates as root (non-interactive sudo) with `-UseRoot`: + - `& 'G:\kodak\networkbuster.net\scripts\update-wsl.ps1' -WorkingDir 'G:\kodak' -UseRoot` +- Register scheduled task that runs updates as root daily and write logs to G:\cadil\logs: + - `& 'G:\kodak\networkbuster.net\scripts\update-wsl.ps1' -WorkingDir 'G:\kodak' -RegisterScheduledTask -UseRoot -LogDir 'G:\kodak\logs' -ScheduleTime '03:00'` +- One-off run writing log to a specific folder: + - `& 'G:\kodak\networkbuster.net\scripts\update-wsl.ps1' -WorkingDir 'G:\kodak' -UseRoot -LogDir 'G:\kodak\logs'` + +Copying artifacts to a drive +- `scripts/copy-to-drive.ps1` โ€“ safely copy LFS artifacts or repo working tree to a destination drive (e.g., `E:` or `G:`): + - Copy default LFS output to `E:`: `.\ ests\scripts\copy-to-drive.ps1 -DestDrive 'E:'` (run from repo root in PowerShell) + - Copy entire repo working tree (no `.git`): `.\scripts\copy-to-drive.ps1 -DestDrive 'E:' -IncludeRepo` + - Write logs to a folder: `.\scripts\copy-to-drive.ps1 -DestDrive 'E:' -LogDir 'E:\logs'` + - Create a zip after copying: add `-Zip`. + +Security note: `-UseRoot` executes the update command as the root user inside WSL (`wsl -d -u root`). This suppresses sudo prompts but grants the script elevated privileges inside the distro; use with caution. + +Usage (inside WSL): +- `chmod +x scripts/update-wsl.sh && ./scripts/update-wsl.sh` + +Scheduling: +- Register a daily scheduled task that runs the script (requires elevated PowerShell): + - `.\scripts\update-wsl.ps1 -WorkingDir 'G:\kodak' -RegisterScheduledTask -ScheduleTime '03:00'` + - This creates/updates a scheduled task named `WSL-Update` to run daily at the specified time. + +Safety notes: +- These scripts use `sudo` inside WSL; you'll be prompted for the distro user's password if required. +- Review and run manually if you need more control over upgrades or reboots. diff --git a/scripts/ai-cleanup-agent.js b/scripts/ai-cleanup-agent.js new file mode 100644 index 0000000..b143357 --- /dev/null +++ b/scripts/ai-cleanup-agent.js @@ -0,0 +1,240 @@ +/** + * AI Repository Cleanup Agent + * Uses the AI providers to analyze and suggest repository cleanup actions + * + * Run: node scripts/ai-cleanup-agent.js [--dry-run] [--execute] + */ + +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const ROOT = path.resolve(__dirname, '..'); + +// Patterns to identify for cleanup +const CLEANUP_PATTERNS = { + backup: { + patterns: [/\.bak$/, /\.backup$/, /\.original$/, /\.old$/, /~$/], + action: 'remove', + reason: 'Backup file - should be tracked in git history instead' + }, + temp: { + patterns: [/\.tmp$/, /\.temp$/, /\.swp$/, /\.swo$/], + action: 'remove', + reason: 'Temporary file - should not be committed' + }, + duplicateConfig: { + patterns: [/\.code-workspace$/], + locations: ['docs/', 'src/'], + action: 'review', + reason: 'Workspace config in non-standard location' + }, + logFiles: { + patterns: [/\.log$/, /debug\.txt$/, /error\.txt$/], + action: 'remove', + reason: 'Log file - should be in .gitignore' + }, + nodeModules: { + patterns: [/node_modules/], + action: 'skip', + reason: 'Dependencies - should be in .gitignore' + }, + buildArtifacts: { + patterns: [/dist\//, /build\//, /\.next\//], + action: 'review', + reason: 'Build artifact - consider .gitignore' + } +}; + +// Server variant analysis for consolidation suggestions +const SERVER_VARIANTS = [ + 'server.js', + 'server-audio.js', + 'server-enhanced.js', + 'server-optimized.js', + 'server-universal.js' +]; + +class AICleanupAgent { + constructor(options = {}) { + this.dryRun = options.dryRun ?? true; + this.verbose = options.verbose ?? true; + this.issues = []; + this.actions = []; + } + + log(msg, type = 'info') { + const prefix = { + info: ' ', + warn: 'โš ๏ธ', + error: 'โŒ', + success: 'โœ“', + action: '๐Ÿ”ง' + }[type] || ' '; + console.log(`${prefix} ${msg}`); + } + + async scanDirectory(dir, depth = 0) { + if (depth > 10) return; // Max recursion + + let entries; + try { + entries = fs.readdirSync(dir, { withFileTypes: true }); + } catch { + return; + } + + for (const entry of entries) { + const fullPath = path.join(dir, entry.name); + const relativePath = path.relative(ROOT, fullPath); + + // Skip node_modules and .git + if (entry.name === 'node_modules' || entry.name === '.git') continue; + + if (entry.isDirectory()) { + await this.scanDirectory(fullPath, depth + 1); + } else { + await this.analyzeFile(fullPath, relativePath); + } + } + } + + async analyzeFile(fullPath, relativePath) { + const filename = path.basename(fullPath); + + for (const [category, config] of Object.entries(CLEANUP_PATTERNS)) { + for (const pattern of config.patterns) { + if (pattern.test(filename) || pattern.test(relativePath)) { + this.issues.push({ + path: relativePath, + fullPath, + category, + action: config.action, + reason: config.reason + }); + break; + } + } + } + } + + analyzeServerVariants() { + const serverFiles = SERVER_VARIANTS.filter(f => + fs.existsSync(path.join(ROOT, f)) + ); + + if (serverFiles.length > 1) { + this.log(`\nFound ${serverFiles.length} server variants:`, 'warn'); + for (const f of serverFiles) { + const stats = fs.statSync(path.join(ROOT, f)); + const size = Math.round(stats.size / 1024); + this.log(` ${f} (${size} KB)`, 'info'); + } + + this.issues.push({ + path: 'Multiple server files', + category: 'consolidation', + action: 'review', + reason: 'Consider consolidating server variants or documenting their purposes', + files: serverFiles + }); + } + } + + async generateReport() { + console.log('\n' + 'โ•'.repeat(60)); + console.log('๐Ÿค– AI Repository Cleanup Report'); + console.log('โ•'.repeat(60) + '\n'); + + // Group issues by category + const byCategory = {}; + for (const issue of this.issues) { + if (!byCategory[issue.category]) byCategory[issue.category] = []; + byCategory[issue.category].push(issue); + } + + for (const [category, issues] of Object.entries(byCategory)) { + console.log(`\n๐Ÿ“ ${category.toUpperCase()} (${issues.length} items)`); + console.log('โ”€'.repeat(40)); + + for (const issue of issues) { + const actionIcon = { + remove: '๐Ÿ—‘๏ธ', + review: '๐Ÿ‘๏ธ', + skip: 'โญ๏ธ' + }[issue.action] || 'โ“'; + + console.log(`${actionIcon} ${issue.path}`); + if (this.verbose) { + console.log(` โ””โ”€ ${issue.reason}`); + } + } + } + + console.log('\n' + 'โ•'.repeat(60)); + console.log(`Total issues found: ${this.issues.length}`); + console.log(` - Remove: ${this.issues.filter(i => i.action === 'remove').length}`); + console.log(` - Review: ${this.issues.filter(i => i.action === 'review').length}`); + console.log('โ•'.repeat(60) + '\n'); + + return this.issues; + } + + async executeCleanup() { + if (this.dryRun) { + console.log('\n๐Ÿ” DRY RUN MODE - No files will be modified\n'); + } + + const toRemove = this.issues.filter(i => i.action === 'remove'); + + for (const issue of toRemove) { + if (this.dryRun) { + this.log(`Would delete: ${issue.path}`, 'action'); + } else { + try { + fs.unlinkSync(issue.fullPath); + this.log(`Deleted: ${issue.path}`, 'success'); + this.actions.push({ action: 'deleted', path: issue.path }); + } catch (err) { + this.log(`Failed to delete ${issue.path}: ${err.message}`, 'error'); + } + } + } + + return this.actions; + } + + async run() { + console.log('\n๐Ÿค– Starting AI Repository Cleanup Agent...\n'); + console.log(` Root: ${ROOT}`); + console.log(` Mode: ${this.dryRun ? 'Dry Run' : 'Execute'}\n`); + + await this.scanDirectory(ROOT); + this.analyzeServerVariants(); + await this.generateReport(); + + if (!this.dryRun) { + await this.executeCleanup(); + } + + return this.issues; + } +} + +// CLI execution +const args = process.argv.slice(2); +const dryRun = !args.includes('--execute'); +const verbose = !args.includes('--quiet'); + +const agent = new AICleanupAgent({ dryRun, verbose }); +agent.run().then(issues => { + if (dryRun && issues.length > 0) { + console.log('Run with --execute to apply changes\n'); + } +}).catch(err => { + console.error('Cleanup agent failed:', err); + process.exit(1); +}); + +export default AICleanupAgent; diff --git a/scripts/ai-repo-trainer.js b/scripts/ai-repo-trainer.js new file mode 100644 index 0000000..2e3d7f1 --- /dev/null +++ b/scripts/ai-repo-trainer.js @@ -0,0 +1,291 @@ +/** + * AI Repository Training Agent + * Trains the AI model to understand repository patterns and file relationships + * Uses the AI providers to generate embeddings and learn from existing code + * + * Run: node scripts/ai-repo-trainer.js + */ + +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const ROOT = path.resolve(__dirname, '..'); + +// File categories for training +const FILE_CATEGORIES = { + server: { + pattern: /^server.*\.js$/, + description: 'Server implementations and variants', + examples: [] + }, + api: { + pattern: /^api\//, + description: 'API route handlers', + examples: [] + }, + lib: { + pattern: /^lib\//, + description: 'Shared library modules', + examples: [] + }, + scripts: { + pattern: /^scripts\//, + description: 'Automation and utility scripts', + examples: [] + }, + docs: { + pattern: /\.md$/, + description: 'Documentation files', + examples: [] + }, + config: { + pattern: /\.(json|yaml|yml|env|config)$/i, + description: 'Configuration files', + examples: [] + }, + powershell: { + pattern: /\.ps1$/, + description: 'PowerShell automation scripts', + examples: [] + }, + docker: { + pattern: /(Dockerfile|docker-compose)/i, + description: 'Docker containerization configs', + examples: [] + } +}; + +// Training data structure +const TRAINING_DATA = { + metadata: { + generatedAt: new Date().toISOString(), + repository: 'networkbuster.net', + version: '1.0.0' + }, + categories: {}, + fileRelationships: [], + patterns: { + naming: [], + structure: [], + dependencies: [] + }, + serverVariants: { + files: [], + purposes: {}, + consolidationOpportunities: [] + } +}; + +class AIRepoTrainer { + constructor() { + this.files = []; + this.categories = { ...FILE_CATEGORIES }; + } + + log(msg) { + console.log(` ${msg}`); + } + + async scanRepository() { + console.log('\n๐Ÿ“‚ Scanning repository structure...\n'); + await this.scanDir(ROOT, ''); + console.log(`\n Found ${this.files.length} files\n`); + } + + async scanDir(dir, relativePath, depth = 0) { + if (depth > 10) return; + + let entries; + try { + entries = fs.readdirSync(dir, { withFileTypes: true }); + } catch { + return; + } + + for (const entry of entries) { + const fullPath = path.join(dir, entry.name); + const relPath = path.join(relativePath, entry.name); + + // Skip ignored directories + if (['node_modules', '.git', 'dist', 'build', '.next'].includes(entry.name)) continue; + + if (entry.isDirectory()) { + await this.scanDir(fullPath, relPath, depth + 1); + } else { + this.files.push({ + name: entry.name, + path: relPath, + fullPath, + ext: path.extname(entry.name), + size: fs.statSync(fullPath).size + }); + } + } + } + + categorizeFiles() { + console.log('๐Ÿ“Š Categorizing files...\n'); + + for (const file of this.files) { + for (const [catName, cat] of Object.entries(this.categories)) { + if (cat.pattern.test(file.name) || cat.pattern.test(file.path)) { + cat.examples.push(file); + break; + } + } + } + + for (const [name, cat] of Object.entries(this.categories)) { + if (cat.examples.length > 0) { + console.log(` ${name}: ${cat.examples.length} files`); + TRAINING_DATA.categories[name] = { + description: cat.description, + count: cat.examples.length, + files: cat.examples.map(f => f.path) + }; + } + } + } + + analyzeServerVariants() { + console.log('\n๐Ÿ–ฅ๏ธ Analyzing server variants...\n'); + + const serverFiles = this.files.filter(f => /^server.*\.js$/.test(f.name) && f.path === f.name); + TRAINING_DATA.serverVariants.files = serverFiles.map(f => f.name); + + // Analyze each variant + for (const server of serverFiles) { + const content = fs.readFileSync(server.fullPath, 'utf-8'); + + // Extract purpose from comments/imports + const purpose = this.extractServerPurpose(content, server.name); + TRAINING_DATA.serverVariants.purposes[server.name] = purpose; + + console.log(` ${server.name}: ${purpose.summary}`); + } + + // Identify consolidation opportunities + if (serverFiles.length > 2) { + TRAINING_DATA.serverVariants.consolidationOpportunities.push({ + recommendation: 'Consider using a single server.js with feature flags', + variants: serverFiles.map(f => f.name), + approach: 'Use environment variables to enable/disable features' + }); + } + } + + extractServerPurpose(content, filename) { + const result = { + summary: 'Unknown', + features: [], + ports: [], + dependencies: [] + }; + + // Extract from filename + if (filename.includes('audio')) { + result.summary = 'Audio streaming server'; + result.features.push('audio-streaming', 'media-handling'); + } else if (filename.includes('enhanced')) { + result.summary = 'Enhanced server with additional features'; + } else if (filename.includes('optimized')) { + result.summary = 'Performance-optimized server'; + result.features.push('compression', 'caching'); + } else if (filename.includes('universal')) { + result.summary = 'Universal server with all features'; + result.features.push('multi-purpose', 'comprehensive'); + } else if (filename === 'server.js') { + result.summary = 'Main production server'; + result.features.push('core', 'production'); + } + + // Extract ports from content + const portMatches = content.match(/PORT\s*[=:]\s*(\d+)|listen\((\d+)/g); + if (portMatches) { + result.ports = [...new Set(portMatches.map(m => m.match(/\d+/)?.[0]).filter(Boolean))]; + } + + // Extract major imports + const importMatches = content.match(/(?:import|require)\s*(?:\{[^}]+\}|\w+)\s*from\s*['"]([^'"]+)['"]/g); + if (importMatches) { + result.dependencies = [...new Set(importMatches.slice(0, 10).map(m => m.match(/['"]([^'"]+)['"]/)?.[1]).filter(Boolean))]; + } + + return result; + } + + analyzeNamingPatterns() { + console.log('\n๐Ÿ“ Analyzing naming patterns...\n'); + + const patterns = { + kebabCase: this.files.filter(f => /-/.test(f.name.replace(/\.[^.]+$/, ''))), + camelCase: this.files.filter(f => /[a-z][A-Z]/.test(f.name.replace(/\.[^.]+$/, ''))), + uppercase: this.files.filter(f => /^[A-Z_]+\.[a-z]+$/.test(f.name)), + lowercaseWithDots: this.files.filter(f => /^[a-z]+(\.[a-z]+)+\.[a-z]+$/.test(f.name)) + }; + + for (const [style, files] of Object.entries(patterns)) { + if (files.length > 0) { + TRAINING_DATA.patterns.naming.push({ + style, + count: files.length, + examples: files.slice(0, 5).map(f => f.name) + }); + console.log(` ${style}: ${files.length} files`); + } + } + } + + generateTrainingOutput() { + const outputPath = path.join(ROOT, 'data', 'repo-training-data.json'); + + // Ensure data directory exists + const dataDir = path.dirname(outputPath); + if (!fs.existsSync(dataDir)) { + fs.mkdirSync(dataDir, { recursive: true }); + } + + fs.writeFileSync(outputPath, JSON.stringify(TRAINING_DATA, null, 2)); + console.log(`\n๐Ÿ’พ Training data saved to: data/repo-training-data.json\n`); + + return outputPath; + } + + printSummary() { + console.log('โ•'.repeat(60)); + console.log('๐Ÿ“‹ Repository Training Summary'); + console.log('โ•'.repeat(60)); + console.log(`\n Total files analyzed: ${this.files.length}`); + console.log(` Server variants: ${TRAINING_DATA.serverVariants.files.length}`); + console.log(` Naming patterns: ${TRAINING_DATA.patterns.naming.length}`); + console.log(` File categories: ${Object.keys(TRAINING_DATA.categories).length}`); + console.log('\n' + 'โ•'.repeat(60) + '\n'); + } + + async run() { + console.log('\n๐Ÿค– AI Repository Trainer\n'); + console.log('โ•'.repeat(60)); + + await this.scanRepository(); + this.categorizeFiles(); + this.analyzeServerVariants(); + this.analyzeNamingPatterns(); + this.generateTrainingOutput(); + this.printSummary(); + + return TRAINING_DATA; + } +} + +// CLI execution +const trainer = new AIRepoTrainer(); +trainer.run().then(data => { + console.log('Training complete! Data ready for AI consumption.\n'); +}).catch(err => { + console.error('Training failed:', err); + process.exit(1); +}); + +export default AIRepoTrainer; diff --git a/scripts/apply-sterilization.ps1 b/scripts/apply-sterilization.ps1 new file mode 100644 index 0000000..89ad0c7 --- /dev/null +++ b/scripts/apply-sterilization.ps1 @@ -0,0 +1,49 @@ +Param( + [string]$InstrumentId = '', + [string]$InstrumentModel = '', + [string]$Technician = $env:USERNAME, + [string]$Location = '', + [string]$Notes = '', + [switch]$Commit, + [switch]$DryRun +) + +# Safe default locations +$recordsDir = 'S:\NetworkBuster_Production\data\sterilization-records' +if (-not (Test-Path $recordsDir)) { New-Item -ItemType Directory -Path $recordsDir -Force | Out-Null } + +$timestamp = (Get-Date).ToString('s') +$fname = Join-Path $recordsDir ("sterilization_{0}_{1}.md" -f $timestamp.Replace(':','-'), ($InstrumentId -replace '[^\w\-]','_')) + +$content = @() +$content += "# Sterilization Record" +$content += "date: $timestamp" +$content += "technician: $Technician" +if ($InstrumentId) { $content += "instrument_id: $InstrumentId" } +if ($InstrumentModel) { $content += "instrument_model: $InstrumentModel" } +if ($Location) { $content += "location: $Location" } +$content += "notes: "$Notes"" + +# Append a short checklist stub +$content += '' +$content += 'checklist:' +$content += ' pre_clean: false' +$content += ' mechanical_clean: false' +$content += ' disinfection: false' +$content += ' uvc_used: false' +$content += ' functional_check: false' + +if ($DryRun) { + Write-Output "DRYRUN: Would write record to: $fname" + $content | ForEach-Object { Write-Output $_ } + exit 0 +} + +$content -join "`n" | Out-File -FilePath $fname -Encoding UTF8 +Write-Output "Wrote sterilization record: $fname" + +if ($Commit) { + git add $fname + git commit -m "chore: add sterilization record for $InstrumentId by $Technician" || Write-Output "No changes to commit" + git push origin HEAD || Write-Warning "Push failed" +} diff --git a/scripts/apply-sterilization.sh b/scripts/apply-sterilization.sh new file mode 100644 index 0000000..383602f --- /dev/null +++ b/scripts/apply-sterilization.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +set -euo pipefail + +USAGE="Usage: $0 --id ID --model MODEL --technician NAME [--location LOC] [--notes TEXT] [--commit] [--dry-run]" + +ID='' +MODEL='' +TECH='' +LOC='' +NOTES='' +COMMIT=0 +DRY=0 + +while [[ $# -gt 0 ]]; do + case "$1" in + --id) ID="$2"; shift 2;; + --model) MODEL="$2"; shift 2;; + --technician) TECH="$2"; shift 2;; + --location) LOC="$2"; shift 2;; + --notes) NOTES="$2"; shift 2;; + --commit) COMMIT=1; shift;; + --dry-run) DRY=1; shift;; + -h|--help) echo "$USAGE"; exit 0;; + *) echo "Unknown: $1"; echo "$USAGE"; exit 1;; + esac +done + +if [[ -z "$ID" || -z "$MODEL" || -z "$TECH" ]]; then echo "$USAGE"; exit 1; fi + +RECORDS_DIR="/mnt/s/NetworkBuster_Production/data/sterilization-records" +mkdir -p "$RECORDS_DIR" +TS=$(date -u +%Y-%m-%dT%H-%M-%SZ) +FILE="$RECORDS_DIR/sterilization_${TS}_${ID//[^a-zA-Z0-9_-]/_}.md" + +cat > "$FILE" <; git push fork $Branch" + exit 0 + } + git remote add fork $Fork + git push fork $Branch --set-upstream + if (Get-Command gh -ErrorAction SilentlyContinue) { + gh pr create --fill --base main --head "$(git remote get-url fork | ForEach-Object { ($_ -split ':')[-1] -replace '\.git$','' }):$Branch" + } else { + Write-Output "Pushed branch. Use GitHub UI to open a PR or install gh to open PR automatically." + } +} finally { + Pop-Location + Remove-Item -Recurse -Force $tmp +} diff --git a/scripts/apply-to-upstream.sh b/scripts/apply-to-upstream.sh new file mode 100644 index 0000000..d7dec48 --- /dev/null +++ b/scripts/apply-to-upstream.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# scripts/apply-to-upstream.sh +# Usage: ./scripts/apply-to-upstream.sh --upstream https://github.com/Cleanskiier27/Final --fork git@github.com:/Final.git +# The script clones upstream, creates a branch, copies the contribution files from this workspace (contrib/Cleanskiier27-final), commits, pushes to your fork, and optionally opens a PR using `gh`. + +set -euo pipefail +WORKDIR=$(pwd) +CONTRIB_DIR="$WORKDIR/contrib/Cleanskiier27-final" + +UPSTREAM_URL="https://github.com/Cleanskiier27/Final.git" +FORK_REMOTE="" +BRANCH="feature/network-boost" + +while [[ $# -gt 0 ]]; do + case "$1" in + --upstream) UPSTREAM_URL="$2"; shift 2 ;; + --fork) FORK_REMOTE="$2"; shift 2 ;; + --branch) BRANCH="$2"; shift 2 ;; + -h|--help) echo "Usage: $0 [--upstream ] [--fork ] [--branch ]"; exit 0 ;; + *) echo "Unknown arg: $1"; exit 1 ;; + esac +done + +if [ ! -d "$CONTRIB_DIR" ]; then + echo "Contribution directory not found: $CONTRIB_DIR"; exit 1 +fi + +TEMP_DIR=$(mktemp -d) +trap 'rm -rf "$TEMP_DIR"' EXIT +cd "$TEMP_DIR" + +echo "Cloning upstream: $UPSTREAM_URL" +if ! git clone "$UPSTREAM_URL" repo; then + echo "Failed to clone upstream repo. Ensure you have access and URL is correct."; exit 1 +fi +cd repo + +# Create branch +git checkout -b "$BRANCH" + +# Copy files +echo "Copying contribution files into repo..." +rsync -av --exclude='.git' "$CONTRIB_DIR/" . + +# Add, commit +git add . +if git diff --staged --quiet; then + echo "No changes to commit. The contribution may already be present upstream."; exit 0 +fi + +git commit -m "Add Network Boost cross-platform utilities: scripts, docs, and PR notes" + +if [ -z "$FORK_REMOTE" ]; then + echo "No fork remote provided. Please create a fork of $UPSTREAM_URL and provide its URL with --fork to push the branch." + echo "If you have the GitHub CLI installed you can run: gh repo fork $UPSTREAM_URL --remote=true --clone=false" + echo "After adding your fork as a remote, run: git push $BRANCH" + echo "This script stops here. Your local branch is available in $TEMP_DIR/repo. You can push it manually."; + exit 0 +fi + +# Add fork remote and push +git remote add fork "$FORK_REMOTE" +git push fork "$BRANCH" --set-upstream + +# Create PR via gh if available +if command -v gh >/dev/null 2>&1; then + echo "Creating PR using gh..." + gh pr create --fill --base main --head "$(git remote get-url fork | sed -n 's#.*:\(.*\)\.git#\1#p'):$BRANCH" + echo "PR created."; +else + echo "Pushed branch to fork: $FORK_REMOTE/$BRANCH" + echo "Install GitHub CLI (gh) to open a PR automatically, or open a PR from your fork in the web UI." +fi + +echo "Done. Temporary repo path: $TEMP_DIR/repo (will be removed on exit)." diff --git a/scripts/build-nsis.ps1 b/scripts/build-nsis.ps1 new file mode 100644 index 0000000..6d707a7 --- /dev/null +++ b/scripts/build-nsis.ps1 @@ -0,0 +1,71 @@ +Param( + [string]$DistZip = "dist/${PWD##*/}-$(Get-Content package.json | ConvertFrom-Json).version.zip", + [string]$StagingDir = "$PSScriptRoot/../staging", + [string]$NSISExe = "makensis" +) + +$ErrorActionPreference = 'Stop' + +# Ensure dist exists +if (-not (Test-Path "dist")) { New-Item -ItemType Directory dist | Out-Null } + +# Extract zip to staging +if (Test-Path $StagingDir) { Remove-Item -Recurse -Force $StagingDir } +New-Item -ItemType Directory -Path $StagingDir | Out-Null + +$package = Get-Content package.json | ConvertFrom-Json +$zipName = "${package.name}-${package.version}.zip" +$zipPath = Join-Path (Resolve-Path "dist") $zipName +if (-not (Test-Path $zipPath)) { + Write-Error "Zip not found: $zipPath - please run npm run dist:zip first" + exit 1 +} + +Write-Output "Extracting $zipPath to $StagingDir" +Expand-Archive -Path $zipPath -DestinationPath $StagingDir -Force + +# Ensure NSIS available +if (-not (Get-Command $NSISExe -ErrorAction SilentlyContinue)) { + Write-Output "makensis not found โ€” installing via Chocolatey (requires admin)" + choco install nsis -y +} + +# Check for icon and EULA +$iconPath = Join-Path $PSScriptRoot 'installer\icon.ico' +$eulaPath = Join-Path $PSScriptRoot 'installer\EULA.txt' +$brandingDir = Join-Path $PSScriptRoot 'installer\branding' +if (-not (Test-Path $eulaPath)) { Write-Error "EULA not found at $eulaPath. Please create EULA.txt in scripts/installer."; exit 1 } + +# Copy icon if present into staging +if (Test-Path $iconPath) { + Copy-Item $iconPath -Destination $StagingDir -Force -Recurse +} else { + Write-Output "No installer icon found at $iconPath. You may place scripts/installer/icon.ico or run scripts/installer/convert-icon.ps1 to generate one from icon-placeholder.png" +} + +# Copy branding assets into staging if present +if (Test-Path $brandingDir) { + Copy-Item $brandingDir -Destination $StagingDir -Force -Recurse + Write-Output "Branding assets copied to staging." +} else { + Write-Output "No branding assets directory found at $brandingDir. Place branding assets in scripts/installer/branding/." +} + +$version = $package.version +$stg = (Resolve-Path $StagingDir).ProviderPath.Replace('\', '\\') +$iconArg = '' +if (Test-Path $iconPath) { $iconArg = "-DICON_FILE=\"$stg\\scripts\\installer\\icon.ico\"" } + +$cmd = "makensis -DSTAGEDIR=\"$stg\" -DVERSION=\"$version\" $iconArg scripts\\installer\\networkbuster-installer.nsi" +Write-Output "Running: $cmd" +Invoke-Expression $cmd + +# Move installer to dist +$exeName = "NetworkBuster-$version-Setup.exe" +if (Test-Path $exeName) { Move-Item $exeName -Destination dist -Force } +Write-Output "Installer moved to dist\$exeName" + +# Move installer to dist +$exeName = "NetworkBuster-$version-Setup.exe" +if (Test-Path $exeName) { Move-Item $exeName -Destination dist -Force } +Write-Output "Installer moved to dist\$exeName" \ No newline at end of file diff --git a/scripts/compare-with-luna.ps1 b/scripts/compare-with-luna.ps1 new file mode 100644 index 0000000..b6582b5 --- /dev/null +++ b/scripts/compare-with-luna.ps1 @@ -0,0 +1,57 @@ +<# +PowerShell helper to clone and produce a simple file-level comparison between this repo and https://github.com/Cleanskiier27/luna.eu +Requires: git, fc (PowerShell Compare-Object), or Windows built-in tools +Usage: .\scripts\compare-with-luna.ps1 -TargetDir external/luna.eu +#> +param( + [string]$RepoUrl = 'https://github.com/Cleanskiier27/luna.eu', + [string]$TargetDir = 'external/luna.eu' +) + +if (-not (Get-Command git -ErrorAction SilentlyContinue)) { + Write-Error 'git is not installed or not in PATH. Please install git to use this script.' + exit 1 +} + +if (-not (Test-Path $TargetDir)) { + git clone $RepoUrl $TargetDir +} else { + Push-Location $TargetDir + git fetch --all + git pull + Pop-Location +} + +$source = (Resolve-Path .).ProviderPath +$target = (Resolve-Path $TargetDir).ProviderPath + +Write-Output "Comparing $source to $target" + +# Get file lists +$left = Get-ChildItem -Recurse -File -Path $source | Select-Object -ExpandProperty FullName | ForEach-Object { $_.Substring($source.Length) } +$right = Get-ChildItem -Recurse -File -Path $target | Select-Object -ExpandProperty FullName | ForEach-Object { $_.Substring($target.Length) } + +$onlyInSource = $left | Where-Object { $_ -notin $right } +$onlyInTarget = $right | Where-Object { $_ -notin $left } + +Write-Output "\nFiles only in this repo (sample):" +$onlyInSource | Select-Object -First 30 | ForEach-Object { Write-Output $_ } + +Write-Output "\nFiles only in luna.eu (sample):" +$onlyInTarget | Select-Object -First 30 | ForEach-Object { Write-Output $_ } + +# For overlapping files show textual diffs for top N +$common = $left | Where-Object { $_ -in $right } | Select-Object -First 20 +if ($common.Count -gt 0) { + Write-Output "\nText diffs for common files (first 20):" + foreach ($f in $common) { + $a = Join-Path $source $f + $b = Join-Path $target $f + if ((Get-Content $a -ErrorAction SilentlyContinue) -and (Get-Content $b -ErrorAction SilentlyContinue)) { + Write-Output "--- $f ---" + fc $a $b | Select-Object -First 200 | ForEach-Object { Write-Output $_ } + } + } +} + +Write-Output "\nComparison complete." \ No newline at end of file diff --git a/scripts/copy-to-drive.ps1 b/scripts/copy-to-drive.ps1 new file mode 100644 index 0000000..8a1aa31 --- /dev/null +++ b/scripts/copy-to-drive.ps1 @@ -0,0 +1,122 @@ +<# +.SYNOPSIS + Copy repository artifacts (default: LFS output) to a destination drive reliably. + +.DESCRIPTION + Performs sanity checks (disk space), creates a timestamped destination folder on the specified drive, + copies files via Robocopy for reliability, and generates SHA256 checksums for copied files. + +.PARAMETER Source + The source directory to copy (default: os/lfs/output). + +.PARAMETER DestDrive + The destination drive letter (e.g. 'E:'). The script will create a subfolder `networkbuster-` there. + +.PARAMETER IncludeRepo + If set, copy the repository working tree (excludes .git) instead of the default LFS output. + +.PARAMETER Zip + If set, create a zip archive of the copied folder after copy completes. + +.EXAMPLE + # Copy LFS output to E: and produce checksums + .\scripts\copy-to-drive.ps1 -DestDrive 'E:' -LogDir 'E:\logs' + + # Copy full repo working tree (no .git) + .\scripts\copy-to-drive.ps1 -DestDrive 'E:' -IncludeRepo +#> +[CmdletBinding()] +param( + [string]$Source = "$(Join-Path $PSScriptRoot '..\os\lfs\output')", + [Parameter(Mandatory = $true)][string]$DestDrive, + [switch]$IncludeRepo, + [switch]$Zip, + [string]$LogDir +) + +function Format-Bytes($bytes){ + if ($bytes -lt 1KB) { "$bytes B" ; return } + if ($bytes -lt 1MB) { "{0:N2} KB" -f ($bytes/1KB); return } + if ($bytes -lt 1GB) { "{0:N2} MB" -f ($bytes/1MB); return } + "{0:N2} GB" -f ($bytes/1GB) +} + +if ($IncludeRepo) { + $Source = Join-Path $PSScriptRoot '..' | Resolve-Path -ErrorAction Stop + Write-Host "IncludeRepo: copying working tree from: $Source" -ForegroundColor Cyan + # Exclude .git by using Robocopy /XD +} + +if (-not (Test-Path $Source)) { + Write-Error "Source path not found: $Source" + exit 1 +} + +# Destination +$timestamp = Get-Date -Format 'yyyyMMdd-HHmmss' +$destRoot = "${DestDrive.TrimEnd('\')}\networkbuster-$timestamp" + +# Ensure drive exists +$drv = Get-PSDrive -Name ($DestDrive.TrimEnd(':')) -ErrorAction SilentlyContinue +if (-not $drv) { + Write-Error "Destination drive $DestDrive not found or not accessible." + exit 1 +} + +# Check free space +$sourceSize = (Get-ChildItem -Path $Source -Recurse -ErrorAction SilentlyContinue | Measure-Object -Property Length -Sum).Sum +if (-not $sourceSize) { $sourceSize = 0 } +$freeBytes = $drv.Free +Write-Host "Source size: $(Format-Bytes $sourceSize) Dest free: $(Format-Bytes $freeBytes)" +if ($sourceSize -gt $freeBytes) { + Write-Error "Not enough free space on $DestDrive (needed: $(Format-Bytes $sourceSize))." + exit 1 +} + +# Create dest folder +New-Item -ItemType Directory -Path $destRoot -Force | Out-Null +Write-Host "Destination folder: $destRoot" -ForegroundColor Green + +# Optional: create a log dir +if ($LogDir) { + if (-not (Test-Path -Path $LogDir)) { New-Item -ItemType Directory -Path $LogDir -Force | Out-Null } + $logFile = Join-Path $LogDir "copy-to-drive-$timestamp.log" + Write-Host "Logging robocopy output to: $logFile" -ForegroundColor Cyan +} else { + $logFile = $null +} + +# Run Robocopy +$roboSource = (Resolve-Path $Source).Path +$roboDest = $destRoot +$roboOpts = '/MIR /COPY:DAT /R:2 /W:2 /NFL /NDL /NP /ETA' +$excludeArgs = '' +if ($IncludeRepo) { $excludeArgs = "/XD `"$($Source)\.git`"" } +$roboCmd = "robocopy `"$roboSource`" `"$roboDest`" $roboOpts $excludeArgs" +Write-Host "Running: $roboCmd" +if ($logFile) { + cmd /c $roboCmd | Out-File -FilePath $logFile -Encoding utf8 -Append +} else { + cmd /c $roboCmd +} + +# Generate checksums for top-level files +Write-Host "Generating SHA256 checksums..." +$checksumFile = Join-Path $destRoot 'checksums.sha256' +Get-ChildItem -Path $destRoot -Recurse -File | Sort-Object FullName | ForEach-Object { + $hash = Get-FileHash -Path $_.FullName -Algorithm SHA256 + "$($hash.Hash) *$($_.FullName.Substring($destRoot.Length+1))" | Out-File -FilePath $checksumFile -Append -Encoding utf8 +} +Write-Host "Checksums written to: $checksumFile" -ForegroundColor Green + +# Optionally zip +if ($Zip) { + $zipPath = "${destRoot}.zip" + Write-Host "Creating zip: $zipPath" + Compress-Archive -Path (Join-Path $destRoot '*') -DestinationPath $zipPath -Force + Write-Host "Zip created: $zipPath" -ForegroundColor Green +} + +Write-Host "Copy complete. Destination: $destRoot" -ForegroundColor Cyan +if ($logFile) { Write-Host "Robocopy log: $logFile" -ForegroundColor Cyan } +if ($script:LogFile) { Write-Host "Script transcript (if any) ended at: $script:LogFile" -ForegroundColor Cyan } diff --git a/scripts/create-shortcut.ps1 b/scripts/create-shortcut.ps1 new file mode 100644 index 0000000..983b9ff --- /dev/null +++ b/scripts/create-shortcut.ps1 @@ -0,0 +1,12 @@ +param( + [string]$target = "${PWD}\start-desktop.bat", + [string]$name = "NetworkBuster Launcher" +) +$desktop = [Environment]::GetFolderPath("Desktop") +$shortcut = Join-Path $desktop ("$name.lnk") +$WshShell = New-Object -ComObject WScript.Shell +$sc = $WshShell.CreateShortcut($shortcut) +$sc.TargetPath = $target +$sc.WorkingDirectory = "${PWD}" +$sc.Save() +Write-Output "Shortcut created: $shortcut" \ No newline at end of file diff --git a/scripts/detect-dotnet-projects.ps1 b/scripts/detect-dotnet-projects.ps1 new file mode 100644 index 0000000..1a1dddb --- /dev/null +++ b/scripts/detect-dotnet-projects.ps1 @@ -0,0 +1,137 @@ +<# +Detect .NET projects in the workspace and update .vscode/launch.json with sane Launch configs. +Usage: + .\scripts\detect-dotnet-projects.ps1 [-DryRun] + +Behavior: + - Searches recursively for *.csproj files (excluding bin/obj paths) + - For each project, attempts to determine AssemblyName and TargetFramework (first if multiple) + - Infers the typical output path: /bin/Debug//.dll + - If output doesn't exist, optionally runs `dotnet build ` to produce outputs + - Updates .vscode/launch.json by replacing the sample Launch entry with per-project entries + - Backups the previous launch.json to .vscode/launch.json.bak +#> +param( + [switch]$DryRun +) + +function Get-Projects { + Get-ChildItem -Path (Get-Location).Path -Filter *.csproj -Recurse -ErrorAction SilentlyContinue | + Where-Object { $_.FullName -notmatch "\\bin\\|\\obj\\" } +} + +function Parse-CsProj([string]$path) { + try { + [xml]$xml = Get-Content -Path $path -Raw -ErrorAction Stop + $ns = @{ms = $xml.Project.NamespaceURI} + $assemblyName = ($xml.Project.PropertyGroup.AssemblyName | Where-Object { $_ }) -join '' + if (-not $assemblyName) { $assemblyName = [System.IO.Path]::GetFileNameWithoutExtension($path) } + $tf = ($xml.Project.PropertyGroup.TargetFramework | Where-Object { $_ }) -join '' + if (-not $tf) { + $tfs = ($xml.Project.PropertyGroup.TargetFrameworks | Where-Object { $_ }) -join '' + if ($tfs) { $tf = ($tfs -split ';')[0] } + } + return @{ ProjectPath = $path; ProjectDir = (Split-Path $path -Parent); Assembly = $assemblyName; TF = $tf } + } catch { + Write-Warning "Failed to parse $path: $($_)" + return $null + } +} + +function Infer-Output([hashtable]$info) { + $projDir = $info.ProjectDir + $assembly = $info.Assembly + $tf = $info.TF + if ($tf) { + $dll = Join-Path $projDir "bin\Debug\$tf\$assembly.dll" + } else { + # try to find any net*/ paths + $candidates = Get-ChildItem -Path (Join-Path $projDir 'bin\Debug') -Directory -ErrorAction SilentlyContinue | Where-Object { $_.Name -match '^net' } + if ($candidates -and $candidates.Count -gt 0) { + $dll = Join-Path $projDir "bin\Debug\$($candidates[0].Name)\$assembly.dll" + } else { + $dll = Join-Path $projDir "bin\Debug\$assembly.dll" + } + } + return $dll +} + +function Update-LaunchJson([array]$projects, [switch]$dryRun) { + $launchPath = Join-Path -Path (Get-Location).Path -ChildPath '.vscode\launch.json' + if (-not (Test-Path $launchPath)) { + Write-Host "No existing $launchPath found; creating a new one." -ForegroundColor Cyan + $base = @{ version = '0.2.0'; configurations = @() } + } else { + $base = Get-Content -Raw -Path $launchPath | ConvertFrom-Json -ErrorAction Stop + # backup + Copy-Item -Path $launchPath -Destination "$launchPath.bak" -Force + } + + # Remove any existing auto-generated entries we created before (marker: generatedBy=detect-dotnet-projects) + $filtered = @() + foreach ($cfg in $base.configurations) { + if (-not ($cfg.generatedBy -and $cfg.generatedBy -eq 'detect-dotnet-projects')) { $filtered += $cfg } + } + + foreach ($p in $projects) { + $name = ".NET: Launch - $([IO.Path]::GetFileNameWithoutExtension($p.ProjectPath))" + $program = $p.OutputDll + $cfg = @{ + name = $name + type = 'coreclr' + request = 'launch' + preLaunchTask = 'build' + program = $program + args = @() + cwd = '${workspaceFolder}' + stopAtEntry = $false + console = 'integratedTerminal' + justMyCode = $true + generatedBy = 'detect-dotnet-projects' + } + $filtered += $cfg + } + + $base.configurations = $filtered + + if ($dryRun) { + Write-Host "Dry-run: would write the following launch.json content:" -ForegroundColor Cyan + $base | ConvertTo-Json -Depth 10 | Write-Output + return + } + + $base | ConvertTo-Json -Depth 10 | Set-Content -Path $launchPath -Encoding UTF8 + Write-Host "Updated $launchPath with $($projects.Count) launch configurations. Backup at $launchPath.bak" -ForegroundColor Green +} + +# Main +$projs = Get-Projects | ForEach-Object { Parse-CsProj $_.FullName } | Where-Object { $_ } +if (-not $projs -or $projs.Count -eq 0) { + Write-Host "No .csproj files found in workspace." -ForegroundColor Yellow + exit 0 +} + +$projectsToWrite = @() +foreach ($p in $projs) { + $dll = Infer-Output $p + if (-not (Test-Path $dll)) { + Write-Host "Output not found for project $($p.ProjectPath). Attempting to build to produce outputs..." -ForegroundColor Yellow + $b = dotnet build $p.ProjectPath + if ($LASTEXITCODE -ne 0) { Write-Warning "dotnet build failed for $($p.ProjectPath). The inferred output path may not exist." } + } + $dll = Infer-Output $p # re-infer + $p.Add('OutputDll', $dll) + $projectsToWrite += $p +} + +if ($projectsToWrite.Count -eq 0) { Write-Host 'No projects to add to launch.json' -ForegroundColor Yellow; exit 0 } + +# Update launch.json +Update-LaunchJson -projects $projectsToWrite -dryRun:$DryRun + +# Print summary +foreach ($x in $projectsToWrite) { + Write-Host "Project: $($x.ProjectPath) -> Output: $($x.OutputDll)" -ForegroundColor Cyan +} + +Write-Host "Done." -ForegroundColor Green diff --git a/scripts/generate-icons.ps1 b/scripts/generate-icons.ps1 new file mode 100644 index 0000000..11775b7 --- /dev/null +++ b/scripts/generate-icons.ps1 @@ -0,0 +1,46 @@ +<# +generate-icons.ps1 +Generates multi-size PNG icons and an ICO from `scripts/installer/branding/logo.svg` or `scripts/installer/branding/icons/icon-256.png` using ImageMagick (`magick`). +Usage: powershell -ExecutionPolicy Bypass -File scripts/generate-icons.ps1 +#> +$ErrorActionPreference = 'Stop' + +$branding = Join-Path $PSScriptRoot 'installer\branding' +$iconsDir = Join-Path $branding 'icons' +if (-not (Test-Path $iconsDir)) { New-Item -ItemType Directory -Path $iconsDir | Out-Null } + +$sourceSvg = Join-Path $branding 'logo.svg' +$sourcePng = Join-Path $iconsDir 'icon-256.png' + +if (-not (Get-Command magick -ErrorAction SilentlyContinue)) { + Write-Output "ImageMagick 'magick' not found. Install ImageMagick to generate icons automatically." + Write-Output "Place prepared icons into $iconsDir or run the convert script on a machine with ImageMagick." + exit 0 +} + +$sizes = @(256,128,64,48,32,16) + +if (Test-Path $sourceSvg) { + foreach ($s in $sizes) { + $out = Join-Path $iconsDir "icon-$s.png" + Write-Output "Generating $out from $sourceSvg" + magick convert -background none -density 300 $sourceSvg -resize ${s}x${s} $out + } +} elseif (Test-Path $sourcePng) { + foreach ($s in $sizes) { + $out = Join-Path $iconsDir "icon-$s.png" + Write-Output "Generating $out from $sourcePng" + magick convert $sourcePng -resize ${s}x${s} $out + } +} else { + Write-Error "No source logo.svg or icon-256.png found. Place one in $branding or $iconsDir." +} + +# Build ICO +$ico = Join-Path $iconsDir 'icon.ico' +$pngs = $sizes | ForEach-Object { "icon-$_" } | ForEach-Object { Join-Path $iconsDir "$_ + '.png'" } +$pngArgs = $sizes | ForEach-Object { Join-Path $iconsDir "icon-$_.png" } +$pngArgsStr = $pngArgs -join ' ' +Write-Output "Creating ICO $ico from: $pngArgsStr" +magick convert $pngArgsStr $ico +Write-Output "Created ICO: $ico" diff --git a/scripts/generate-project-index.ps1 b/scripts/generate-project-index.ps1 new file mode 100644 index 0000000..8feb26a --- /dev/null +++ b/scripts/generate-project-index.ps1 @@ -0,0 +1,90 @@ +<# +Generate a Markdown project index for the repository. +Creates PROJECT_INDEX.md in the repository root with a table of contents +and short descriptions for each top-level folder and notable files. + +Usage: .\generate-project-index.ps1 [-OutputFile ..\PROJECT_INDEX.md] +#> +param( + [string]$OutputFile = "$(Split-Path -Parent $PSScriptRoot)\PROJECT_INDEX.md", + [int]$MaxFilePreviewLines = 3 +) + +Write-Output "Generating project index -> $OutputFile" +Write-Output "PSScriptRoot: $PSScriptRoot" +Write-Output "Computed OutputFile: $OutputFile" + +function Get-DescriptionFromReadme($path) { + $readme = Join-Path $path 'README.md' + if (Test-Path $readme) { + $lines = Get-Content $readme -ErrorAction SilentlyContinue | Select-Object -First 4 + $title = ($lines | Select-String -Pattern '^#\s*(.+)' -SimpleMatch | ForEach-Object { $_.Matches[0].Groups[1].Value } | Select-Object -First 1) + if ($title) { return $title } + if ($lines) { return ($lines -join ' ') } + } + # fallback to package.json description + $pkg = Join-Path $path 'package.json' + if (Test-Path $pkg) { + try { $o = Get-Content $pkg -Raw | ConvertFrom-Json; if ($o.description) { return $o.description } } catch {} + } + return '' +} + +function Get-FilePreview($file) { + try { $lines = Get-Content $file -ErrorAction SilentlyContinue | Select-Object -First $MaxFilePreviewLines; return ($lines -join ' ' ) } catch { return '' } +} + +$root = Split-Path -Parent $PSScriptRoot +$items = Get-ChildItem -Path $root | Where-Object { $_.Name -notlike '.git' -and $_.Name -notlike 'node_modules' } | Sort-Object PSIsContainer -Descending,Name + +$md = @() +$md += "# Project Index" +$md += "" +$md += "Generated: $(Get-Date -Format 'u')" +$md += "" +$md += "## Table of Contents" +$md += "" + +# Build TOC +$toc = @() +foreach ($it in $items) { + if ($it.PSIsContainer) { $toc += "- [$($it.Name)](#${($it.Name -replace ' ','-')})" } else { $toc += "- [$($it.Name)](#${($it.Name -replace ' ','-')})" } +} +$md += $toc +$md += "" + +$md += "---" +$md += "" + +# Add detail sections +foreach ($it in $items) { + $md += "### $($it.Name)" + if ($it.PSIsContainer) { + $desc = Get-DescriptionFromReadme $it.FullName + if ($desc) { $md += "**Description:** $desc" } + $md += "" + $md += "**Contents:**" + $md += "" + $children = Get-ChildItem -Path $it.FullName -Force | Sort-Object PSIsContainer -Descending,Name | Select-Object -First 50 + foreach ($c in $children) { + if ($c.PSIsContainer) { $md += "- **$($c.Name)**/" } + else { + $preview = Get-FilePreview $c.FullName + if ($preview) { $md += "- $($c.Name) โ€” `$($preview)`" } else { $md += "- $($c.Name)" } + } + } + $md += "" + } else { + $preview = Get-FilePreview $it.FullName + if ($preview) { $md += "`$($it.Name)` โ€” $preview" } else { $md += "`$($it.Name)`" } + $md += "" + } +} + +$md += "---" +$md += "" +$md += "> **Note:** If you want more detail per file/folder run the generator with a smaller `MaxFilePreviewLines` or extend the script to include file size, hash, or a deep recursive index." + +$md -join "`n" | Out-File -FilePath $OutputFile -Encoding utf8 -Force + +Write-Output "Project index written to $OutputFile" diff --git a/scripts/install-datacentra.ps1 b/scripts/install-datacentra.ps1 new file mode 100644 index 0000000..533219a --- /dev/null +++ b/scripts/install-datacentra.ps1 @@ -0,0 +1,87 @@ +Param( + [string]$Source = 'S:\NetworkBuster_Production', + [string]$Dest = 'E:\DATACENTRA', + [switch]$CompareOnly +) + +Function Require-Admin { + if (-not ([Security.Principal.WindowsPrincipal][Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole]::Administrator)) { + Write-Output "Re-launching as Administrator..." + + # If F: is available, copy the script there and run from F: so elevated session uses F:\ context + $runFromF = $false + if (Test-Path 'F:\') { + try { + $destDir = 'F:\scripts' + if (-not (Test-Path $destDir)) { New-Item -ItemType Directory -Path $destDir -Force | Out-Null } + $destPath = Join-Path $destDir (Split-Path $PSCommandPath -Leaf) + Copy-Item -Path $PSCommandPath -Destination $destPath -Force + $runFromF = Test-Path $destPath + if ($runFromF) { Write-Output "Copied script to $destPath and will relaunch from F: drive" } + } catch { + Write-Warning "Could not prepare F: relaunch: $($_.Exception.Message)" + $runFromF = $false + } + } + + if ($runFromF) { + $args = @('-NoProfile','-ExecutionPolicy','Bypass','-File',$destPath,'-Source',$Source,'-Dest',$Dest) + Start-Process -FilePath powershell -ArgumentList $args -Verb RunAs + } else { + $args = @('-NoProfile','-ExecutionPolicy','Bypass','-File',$PSCommandPath,'-Source',$Source,'-Dest',$Dest) + Start-Process -FilePath powershell -ArgumentList $args -Verb RunAs + } + Exit + } +} + +Require-Admin + +Write-Output "Installing DATACENTRA from $Source to $Dest" + +# Ensure dest exists +if (-not (Test-Path $Dest)) { New-Item -ItemType Directory -Path $Dest -Force | Out-Null } + +# Files to copy +$files = @( + 'docs\\AI_TRAINING_AND_DATA_PERSONALIZATION.md', + 'challengerepo\\real-time-overlay\\src\\components\\ImmersiveReader.jsx', + 'challengerepo\\real-time-overlay\\src\\App.jsx' +) + +foreach ($f in $files) { + $s = Join-Path $Source $f + $d = Join-Path $Dest $f + if (Test-Path $s) { + $dDir = Split-Path $d -Parent + if (-not (Test-Path $dDir)) { New-Item -ItemType Directory -Path $dDir -Force | Out-Null } + Copy-Item -Path $s -Destination $d -Force + Write-Output "Copied: $f" + } else { + Write-Output "Missing source: $s" + } +} + +# Compare AI training related files and produce a diff report +$report = Join-Path $Source 'data\\datacentra-diff.txt' +Write-Output "Comparing AI training files and writing report to $report" +"DATACENTRA diff report - $(Get-Date -Format s)" | Out-File $report +foreach ($f in $files) { + $sPath = Join-Path $Source $f + $dPath = Join-Path $Dest $f + if (Test-Path $sPath -and Test-Path $dPath) { + $sHash = Get-FileHash -Algorithm SHA256 -Path $sPath | Select-Object -ExpandProperty Hash + $dHash = Get-FileHash -Algorithm SHA256 -Path $dPath | Select-Object -ExpandProperty Hash + if ($sHash -eq $dHash) { + "MATCH: $f" | Out-File -Append $report + } else { + "DIFFER: $f" | Out-File -Append $report + " SourceHash: $sHash" | Out-File -Append $report + " DestHash: $dHash" | Out-File -Append $report + } + } else { + "MISSING: $f (source or dest missing)" | Out-File -Append $report + } +} + +Write-Output "Install and compare complete. Report: $report" diff --git a/scripts/install-nbapp-service.ps1 b/scripts/install-nbapp-service.ps1 new file mode 100644 index 0000000..e49746b --- /dev/null +++ b/scripts/install-nbapp-service.ps1 @@ -0,0 +1,108 @@ +<# +Install the nbapp application from GitHub and optionally register it as a Windows service using NSSM. + +Usage: + .\install-nbapp-service.ps1 -Repo 'https://github.com/NetworkBuster/nbapp.git' -InstallDir 'S:\apps\nbapp' -InstallService -ServiceName 'nbapp' + +#> +param( + [string]$Repo = 'https://github.com/NetworkBuster/nbapp.git', + [string]$Branch = 'main', + [string]$InstallDir = 'S:\apps\nbapp', + [switch]$InstallService, + [string]$ServiceName = 'nbapp', + [string]$NodePath = '', + [string]$LogDir = 'S:\apps\nbapp\logs' +) + +function Write-Ok { param($m) Write-Host $m -ForegroundColor Green } +function Write-Warn { param($m) Write-Host $m -ForegroundColor Yellow } +function Write-Err { param($m) Write-Host $m -ForegroundColor Red } + +if (-not (Get-Command git -ErrorAction SilentlyContinue)) { Write-Err 'git is required but not found in PATH.'; exit 1 } + +# Ensure install dir +if (-not (Test-Path $InstallDir)) { New-Item -ItemType Directory -Path $InstallDir -Force | Out-Null } + +if ((Get-ChildItem -Path $InstallDir -Force -ErrorAction SilentlyContinue | Measure-Object).Count -eq 0) { + Write-Ok "Cloning $Repo into $InstallDir" + git clone --branch $Branch $Repo $InstallDir +} else { + Write-Ok "Directory exists, updating: $InstallDir" + Push-Location $InstallDir; try { git fetch origin; git checkout $Branch; git pull --ff-only origin $Branch } catch { Write-Warn "Git pull failed: $($_.Exception.Message)" } ; Pop-Location +} + +# Run npm install if package.json present +if (Test-Path (Join-Path $InstallDir 'package.json')) { + Write-Ok 'Running npm install in application directory' + $npm = (Get-Command npm -ErrorAction SilentlyContinue).Path + if (-not $npm) { Write-Warn 'npm not found on PATH; try running "npm install" manually or provide a Node runtime.' } else { + Push-Location $InstallDir; & $npm install --no-audit --no-fund; Pop-Location + } +} else { + Write-Warn 'No package.json found; skipping npm install.' +} + +# Determine application entry script (prefer package.json main, fallback to server.js) +$appArgs = 'server.js' +try { + $pkg = Get-Content (Join-Path $InstallDir 'package.json') -Raw -ErrorAction SilentlyContinue | ConvertFrom-Json + if ($pkg) { + if ($pkg.main) { $appArgs = $pkg.main } + elseif ($pkg.scripts -and $pkg.scripts.start) { + # if start script refers to node , attempt to extract file name; else fallback to npm start + if ($pkg.scripts.start -match 'node\s+(?\S+)') { $appArgs = $Matches['file'] } else { $appArgs = 'npm start' } + } + } +} catch {} + +Write-Ok "Using app args: $appArgs" + +# Determine node runtime +if (-not $NodePath) { + # prefer repo-local tools/node if present + $candidate = Join-Path (Split-Path -Parent $MyInvocation.MyCommand.Definition) '..' | Resolve-Path -ErrorAction SilentlyContinue + $repoRoot = if ($candidate) { (Resolve-Path $candidate).Path } else { (Split-Path -Parent $MyInvocation.MyCommand.Definition) } + $localNode = Join-Path $repoRoot 'tools\node\node.exe' + if (Test-Path $localNode) { $NodePath = $localNode } + else { $NodePath = (Get-Command node -ErrorAction SilentlyContinue).Path } +} + +if (-not $NodePath) { Write-Warn 'Node runtime not found automatically. You can pass -NodePath to this script to point to a node.exe' } +else { Write-Ok "Using Node runtime: $NodePath" } + +if ($InstallService) { + # Call install-service-nssm.ps1 as admin to register the watchdog/service + $installer = Join-Path (Split-Path -Parent $MyInvocation.MyCommand.Definition) 'install-service-nssm.ps1' + if (-not (Test-Path $installer)) { Write-Err "Service installer not found: $installer"; exit 1 } + + $watchdog = Join-Path (Split-Path -Parent $MyInvocation.MyCommand.Definition) 'watchdog.ps1' + if (-not (Test-Path $watchdog)) { Write-Err "Watchdog script not found: $watchdog"; exit 1 } + + # Build AppArgs: if it's 'npm start', install-service will invoke node with 'npm start' which doesn't work; instead use 'node ' or call npm as exe. We'll support both. + $appArgsToPass = $appArgs + $useNpm = $false + if ($appArgs -eq 'npm start') { $useNpm = $true } + + if ($useNpm) { + $appExe = (Get-Command npm -ErrorAction SilentlyContinue).Path + if (-not $appExe) { Write-Warn 'npm not found; service install may fail; consider installing Node or providing NodePath'; $appExe = $NodePath } + $appArgsToPass = 'start' + # For NSSM, we will call install script with AppExe = npm and AppArgs = start + $nssmArgs = @{ + WatchdogPath = $watchdog; NodePath = $appExe; AppArgs = $appArgsToPass; WorkingDir = $InstallDir; LogDir = $LogDir; ServiceName = $ServiceName + } + } else { + # AppExe will be NodePath and AppArgs $appArgsToPass + $nssmArgs = @{ + WatchdogPath = $watchdog; NodePath = $NodePath; AppArgs = $appArgsToPass; WorkingDir = $InstallDir; LogDir = $LogDir; ServiceName = $ServiceName + } + } + + Write-Ok "Installing service '$ServiceName' via NSSM (this will prompt for elevation)." + $argList = "-NoProfile -ExecutionPolicy Bypass -File `"$installer`" -WatchdogPath `"$($nssmArgs.WatchdogPath)`" -NodePath `"$($nssmArgs.NodePath)`" -AppArgs `"$($nssmArgs.AppArgs)`" -WorkingDir `"$($nssmArgs.WorkingDir)`" -LogDir `"$($nssmArgs.LogDir)`" -ServiceName `"$($nssmArgs.ServiceName)`"" + Start-Process -FilePath powershell -ArgumentList $argList -Verb RunAs -Wait + Write-Ok 'Service installer finished. Check service status with Get-Service -Name ' + $ServiceName +} else { + Write-Ok 'Install completed (service not installed). To install the service, re-run with -InstallService and accept UAC when prompted.' +} diff --git a/scripts/install-node-msi.ps1 b/scripts/install-node-msi.ps1 new file mode 100644 index 0000000..d9d342f --- /dev/null +++ b/scripts/install-node-msi.ps1 @@ -0,0 +1,48 @@ +<# +Download and install Node MSI (24.x) safely. +This script ensures the MSI is fully downloaded before launching the installer. + +Usage: + .\install-node-msi.ps1 -Version '24.x' -AcceptUAC +#> +param( + [string]$VersionPath = 'latest-v24.x', + [switch]$AcceptUAC +) + +function Fail([string]$m) { Write-Error $m; exit 1 } + +$base = "https://nodejs.org/dist/$VersionPath/" +Write-Output "Fetching index from $base" +try { $index = Invoke-WebRequest -Uri $base -UseBasicParsing -ErrorAction Stop; $content = $index.Content } catch { Fail "Failed to fetch Node index: $($_.Exception.Message)" } + +$match = [regex]::Match($content,'href="(?node-v(?\d+\.\d+\.\d+)-x64\.msi)"') +if (-not $match.Success) { Fail 'Could not find MSI on Node index page' } + +$msiName = $match.Groups['name'].Value +$msiUrl = $base + $msiName +$tmp = Join-Path $env:TEMP $msiName + +Write-Output "Downloading $msiUrl -> $tmp" +Invoke-WebRequest -Uri $msiUrl -OutFile $tmp -UseBasicParsing -ErrorAction Stop + +# Wait until file is stable (size not changing) and above a sensible threshold +$maxWait = 60; $waited = 0; $prevSize = -1 +while ($waited -lt $maxWait) { + if (Test-Path $tmp) { + $s = (Get-Item $tmp).Length + if ($s -gt 1024*1024 -and $s -eq $prevSize) { break } + $prevSize = $s + } + Start-Sleep -Seconds 1 + $waited++ +} +if (-not (Test-Path $tmp)) { Fail "Downloaded file missing: $tmp" } +if ((Get-Item $tmp).Length -lt 1024*1024) { Fail "Downloaded MSI appears too small: $((Get-Item $tmp).Length) bytes" } + +Write-Output "Installer ready: $tmp (size: $((Get-Item $tmp).Length) bytes)" +if (-not $AcceptUAC) { Fail 'Refusing to run installer without UAC acceptance. Re-run with -AcceptUAC to proceed.' } + +Write-Output 'Launching MSI (UAC prompt will appear)' +Start-Process -FilePath 'msiexec.exe' -ArgumentList '/i',"$tmp",'/passive' -Verb RunAs -Wait +Write-Output 'Installer finished.' diff --git a/scripts/install-nvm.ps1 b/scripts/install-nvm.ps1 new file mode 100644 index 0000000..ca1ef4f --- /dev/null +++ b/scripts/install-nvm.ps1 @@ -0,0 +1,43 @@ +<# +Download and run nvm-windows installer safely. Ensures the EXE is fully downloaded before launch. + +Usage: + .\install-nvm.ps1 -AcceptUAC +#> +param( + [switch]$AcceptUAC +) + +function Fail([string]$m) { Write-Error $m; exit 1 } + +$releases = 'https://github.com/coreybutler/nvm-windows/releases/latest' +Write-Output "Fetching releases page: $releases" +try { $page = Invoke-WebRequest -Uri $releases -UseBasicParsing -ErrorAction Stop; $content = $page.Content } catch { Fail "Failed to fetch releases page: $($_.Exception.Message)" } + +$m = [regex]::Match($content,'href="(?/coreybutler/nvm-windows/releases/download/[^"]*nvm-setup.exe)"') +if (-not $m.Success) { Fail 'Could not locate nvm-setup.exe link on releases page' } + +$url = 'https://github.com' + $m.Groups['url'].Value +$dest = Join-Path $env:TEMP 'nvm-setup.exe' +Write-Output "Downloading $url -> $dest" +Invoke-WebRequest -Uri $url -OutFile $dest -UseBasicParsing -ErrorAction Stop + +# Wait until file is stable and reasonable size +$maxWait = 60; $waited = 0; $prevSize = -1 +while ($waited -lt $maxWait) { + if (Test-Path $dest) { + $s = (Get-Item $dest).Length + if ($s -gt 10240 -and $s -eq $prevSize) { break } + $prevSize = $s + } + Start-Sleep -Seconds 1; $waited++ +} +if (-not (Test-Path $dest)) { Fail "Downloaded file missing: $dest" } +if ((Get-Item $dest).Length -lt 10240) { Fail "Downloaded installer appears too small: $((Get-Item $dest).Length) bytes" } + +Write-Output "Installer ready: $dest" +if (-not $AcceptUAC) { Fail 'Refusing to run installer without UAC acceptance. Re-run with -AcceptUAC to proceed.' } + +Write-Output 'Launching nvm installer (UAC prompt will appear)' +Start-Process -FilePath $dest -Verb RunAs -Wait +Write-Output 'nvm installer finished.' diff --git a/scripts/install-service-nssm.ps1 b/scripts/install-service-nssm.ps1 new file mode 100644 index 0000000..9f49a09 --- /dev/null +++ b/scripts/install-service-nssm.ps1 @@ -0,0 +1,78 @@ +<# +Install NetworkBuster as a Windows service using NSSM. +Run this script as Administrator. + +Usage: + .\install-service-nssm.ps1 -WatchdogPath 'S:\NetworkBuster_Production\scripts\watchdog.ps1' -NodePath 'C:\Program Files\nodejs\node.exe' +#> +param( + [string]$WatchdogPath = 'S:\NetworkBuster_Production\scripts\watchdog.ps1', + [string]$NodePath = 'C:\Program Files\nodejs\node.exe', + [string]$AppArgs = 'start-servers.js', + [string]$WorkingDir = 'S:\NetworkBuster_Production', + [string]$LogDir = 'S:\NetworkBuster_Production\logs', + [string]$ServiceName = 'NetworkBuster', + [string]$NssmDir = 'C:\tools\nssm' +) + +function Assert-Admin { + if (-not ([Security.Principal.WindowsPrincipal][Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole]::Administrator)) { + Write-Error "This script must be run as Administrator."; exit 1 + } +} + +Assert-Admin + +if (-not (Test-Path $WatchdogPath)) { Write-Error "Watchdog not found at $WatchdogPath"; exit 1 } +if (-not (Test-Path $NodePath)) { Write-Error "Node not found at $NodePath"; exit 1 } + +# Ensure logs dir +if (-not (Test-Path $LogDir)) { New-Item -ItemType Directory -Path $LogDir -Force | Out-Null } + +# Download and install NSSM if missing +$nssmExe = Join-Path $NssmDir 'nssm.exe' +if (-not (Test-Path $nssmExe)) { + Write-Output "NSSM not found. Installing to $NssmDir" + try { + New-Item -ItemType Directory -Path $NssmDir -Force | Out-Null + $tmpZip = Join-Path $env:TEMP 'nssm.zip' + $url = 'https://nssm.cc/release/nssm-2.24.zip' + Write-Output "Downloading NSSM from $url" + Invoke-WebRequest -Uri $url -OutFile $tmpZip -UseBasicParsing -ErrorAction Stop + # Ensure file fully written and non-empty + $tries = 0 + while ($tries -lt 5) { + if ((Test-Path $tmpZip) -and ((Get-Item $tmpZip).Length -gt 10240)) { break } + Start-Sleep -Seconds 1 + $tries++ + } + if (-not (Test-Path $tmpZip)) { Write-Error "Download failed: $tmpZip not found"; exit 1 } + if ((Get-Item $tmpZip).Length -le 10240) { Write-Error "Downloaded file is too small (${(Get-Item $tmpZip).Length} bytes); aborting."; exit 1 } + Expand-Archive -Path $tmpZip -DestinationPath $env:TEMP -Force + # copy win64 nssm.exe if present + $candidate = Get-ChildItem -Path (Join-Path $env:TEMP 'nssm-*') -Recurse -Filter 'nssm.exe' -ErrorAction SilentlyContinue | Where-Object { $_.FullName -match 'win64' } | Select-Object -First 1 + if (-not $candidate) { $candidate = Get-ChildItem -Path (Join-Path $env:TEMP 'nssm-*') -Recurse -Filter 'nssm.exe' -ErrorAction SilentlyContinue | Select-Object -First 1 } + if ($candidate) { Copy-Item -Path $candidate.FullName -Destination $nssmExe -Force; Write-Output "Installed NSSM from $($candidate.FullName)" } else { Write-Warning "Could not locate nssm.exe in archive. Please install NSSM manually to $NssmDir and re-run."; exit 1 } + } catch { Write-Error "Failed to install NSSM: $($_.Exception.Message)"; exit 1 } +} + +# Build watchdog command +$watchdogCmd = "-NoProfile -ExecutionPolicy Bypass -File `"$WatchdogPath`" -AppExe `"$NodePath`" -AppArgs `"$AppArgs`" -WorkingDir `"$WorkingDir`" -LogDir `"$LogDir`" -HealthUrl `"http://localhost:3001/api/health`" -HealthInterval 30 -RestartBackoff 5" + +# Install service +Write-Output "Installing service $ServiceName using $nssmExe" +& $nssmExe install $ServiceName 'powershell.exe' $watchdogCmd +# Configure stdout/stderr and other settings +& $nssmExe set $ServiceName AppStdout (Join-Path $LogDir 'service.out.log') +& $nssmExe set $ServiceName AppStderr (Join-Path $LogDir 'service.err.log') +& $nssmExe set $ServiceName AppRotateFiles 1 +& $nssmExe set $ServiceName AppRestartDelay 5000 + +# Set service to auto-start and start it +Set-Service -Name $ServiceName -StartupType Automatic +Start-Service -Name $ServiceName + +Start-Sleep -Seconds 2 +$svc = Get-Service -Name $ServiceName +Write-Output "Service status: $($svc.Status)" +Write-Output "Service $ServiceName installed and started. Logs: $LogDir" diff --git a/scripts/install-watchdog-task.ps1 b/scripts/install-watchdog-task.ps1 new file mode 100644 index 0000000..dc19d77 --- /dev/null +++ b/scripts/install-watchdog-task.ps1 @@ -0,0 +1,29 @@ +<# +Install a Scheduled Task (current user) to run the watchdog at logon and keep it running. +Usage (run once): + .\install-watchdog-task.ps1 -WatchdogPath 'C:\path\to\watchdog.ps1' -NodePath 'C:\Program Files\nodejs\node.exe' -AppArgs 'S:\NetworkBuster_Production\start-servers.js' -LogDir 'S:\NetworkBuster_Production\logs' +#> +param( + [Parameter(Mandatory=$true)] [string]$WatchdogPath, + [Parameter(Mandatory=$true)] [string]$NodePath, + [string]$AppArgs = 'S:\NetworkBuster_Production\start-servers.js', + [string]$WorkingDir = 'S:\NetworkBuster_Production', + [string]$LogDir = 'S:\NetworkBuster_Production\logs', + [string]$TaskName = 'NetworkBusterWatchdog' +) + +# Build action command +$action = "powershell -NoProfile -ExecutionPolicy Bypass -File `"$WatchdogPath`" -AppExe `"$NodePath`" -AppArgs `"$AppArgs`" -WorkingDir `"$WorkingDir`" -LogDir `"$LogDir`" -HealthUrl `"http://localhost:3001/api/health`" -HealthInterval 30 -RestartBackoff 5" + +# Create task trigger: At log on for current user +$principal = New-ScheduledTaskPrincipal -UserId $env:USERNAME -LogonType Interactive -RunLevel LeastPrivilege +$trigger = New-ScheduledTaskTrigger -AtLogOn +$settings = New-ScheduledTaskSettingsSet -RestartCount 999 -RestartInterval (New-TimeSpan -Minutes 1) -StartWhenAvailable +$actionObj = New-ScheduledTaskAction -Execute 'powershell.exe' -Argument "-NoProfile -ExecutionPolicy Bypass -WindowStyle Hidden -Command \"& { $action }\"" + +try { + Register-ScheduledTask -TaskName $TaskName -Action $actionObj -Trigger $trigger -Settings $settings -Principal $principal -Force + Write-Output "Scheduled task '$TaskName' registered for user $env:USERNAME" +} catch { + Write-Error "Failed to register scheduled task: $($_.Exception.Message)" +} diff --git a/scripts/installer/EULA.txt b/scripts/installer/EULA.txt new file mode 100644 index 0000000..93a74da --- /dev/null +++ b/scripts/installer/EULA.txt @@ -0,0 +1,20 @@ +NETWORKBUSTER END USER LICENSE AGREEMENT (EULA) + +Please read this End User License Agreement ("Agreement") carefully before installing or using NetworkBuster (the "Software"). By installing, copying, or otherwise using the Software, you agree to be bound by the terms of this Agreement. + +1. LICENSE GRANT +NetworkBuster is licensed, not sold. Subject to the terms and conditions of this Agreement, the author grants you a non-exclusive, non-transferable license to use the Software. + +2. RESTRICTIONS +You may not modify, reverse engineer, decompile, or disassemble the Software except to the extent expressly permitted by applicable law. + +3. NO WARRANTY +The Software is provided "AS IS" without warranty of any kind. The author disclaims all warranties, express or implied. + +4. LIMITATION OF LIABILITY +In no event shall the author be liable for any special, incidental, indirect, or consequential damages arising out of the use or inability to use the Software. + +5. GOVERNING LAW +This Agreement shall be governed by the laws of the jurisdiction where the author maintains their primary residence, unless otherwise required by applicable law. + +If you do not agree to the terms of this Agreement, do not install or use the Software. \ No newline at end of file diff --git a/scripts/installer/branding/README.md b/scripts/installer/branding/README.md new file mode 100644 index 0000000..71e9812 --- /dev/null +++ b/scripts/installer/branding/README.md @@ -0,0 +1,9 @@ +Branded assets for the NetworkBuster installer. + +Place your final assets here before building the installer: +- `logo.svg` โ€” The main project logo (SVG preferred). +- `banner.png` โ€” A 640x120 installer banner (PNG). +- `header.png` โ€” A 300x80 header for installer pages (PNG). +- `icon.ico` โ€” Optional ICO (if present will be embedded into shortcuts and installer). + +Use `convert-icon.ps1` to generate an `icon.ico` from `icon-placeholder.png` if needed (requires ImageMagick). \ No newline at end of file diff --git a/scripts/installer/branding/banner.png b/scripts/installer/branding/banner.png new file mode 100644 index 0000000..266fa98 --- /dev/null +++ b/scripts/installer/branding/banner.png @@ -0,0 +1 @@ +iVBORw0KGgoAAAANSUhEUgAAAyAAAABkCAYAAABc2kFzAAAACXBIWXMAAAsSAAALEgHS3X78AAABQ0lEQVR4nO3TsQ0AIAwDsR0/39t3yQ+oC7k6r3JdegPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA+H2f8F9m8f9p8P6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6Xw/6XwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB8B0a8AASGk3hUAAAAASUVORK5CYII= \ No newline at end of file diff --git a/scripts/installer/branding/icons/icon-128.png b/scripts/installer/branding/icons/icon-128.png new file mode 100644 index 0000000..2bb44cd --- /dev/null +++ b/scripts/installer/branding/icons/icon-128.png @@ -0,0 +1 @@ +iVBORw0KGgoAAAANSUhEUgAAAIwAAACMCAIAAAD8GO2jAAAACXBIWXMAAAsSAAALEgHS3X78AAABJElEQVR4nO3RMQ0AMAgAsZf9n1kF6s3Vn2KfJZ8GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP4b3f8nP8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8fN8H8A0Xx07tPflEwAAAABJRU5ErkJggg== \ No newline at end of file diff --git a/scripts/installer/branding/icons/icon-16.png b/scripts/installer/branding/icons/icon-16.png new file mode 100644 index 0000000..2f98989 --- /dev/null +++ b/scripts/installer/branding/icons/icon-16.png @@ -0,0 +1 @@ +iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAAACXBIWXMAAAsSAAALEgHS3X78AAABFUlEQVR4nGNgYGBgYGRk/A8YGJgYGBg+M8GhgZmBiYGBgYGAwMDGxsbGxgYGBgYGBg4P8fAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDw38B1gMDAwMDAwMDAwEwBQAAAwcA0w5Vg0QAAAABJRU5ErkJggg== \ No newline at end of file diff --git a/scripts/installer/branding/icons/icon-256.png b/scripts/installer/branding/icons/icon-256.png new file mode 100644 index 0000000..05cb84f --- /dev/null +++ b/scripts/installer/branding/icons/icon-256.png @@ -0,0 +1 @@ +iVBORw0KGgoAAAANSUhEUgAAAPAAAADwCAYAAAC0w0wLAAAACXBIWXMAAAsSAAALEgHS3X78AAABk0lEQVR4nO3UQQ3CMAwF0a9P9vKcYq7q0cOe0jY2m7r0mAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPgfvR8dH7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v4H8A1Xn/A3g0p6gAAAAASUVORK5CYII= \ No newline at end of file diff --git a/scripts/installer/branding/icons/icon-32.png b/scripts/installer/branding/icons/icon-32.png new file mode 100644 index 0000000..ab2ad88 --- /dev/null +++ b/scripts/installer/branding/icons/icon-32.png @@ -0,0 +1 @@ +iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAIAAAD8GO2jAAAACXBIWXMAAAsSAAALEgHS3X78AAABHklEQVR4nO3TsQ2DMAwE0az/6yQGtq3Vn2KfJZ8GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPgfvR8dH7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v7e3v4H8A1Xn/A3g0p6gAAAAASUVORK5CYII= \ No newline at end of file diff --git a/scripts/installer/branding/icons/icon-48.png b/scripts/installer/branding/icons/icon-48.png new file mode 100644 index 0000000..8ed0ec0 --- /dev/null +++ b/scripts/installer/branding/icons/icon-48.png @@ -0,0 +1 @@ +iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAIAAACp8Y/JAAAACXBIWXMAAAsSAAALEgHS3X78AAABGUlEQVR4nO3XsQnAIAwF0cf9n1kV7o3Vj4KfJZ8GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA+D7d/yc/x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x8f4B8B9Q4v4z2cGgAAAABJRU5ErkJggg== \ No newline at end of file diff --git a/scripts/installer/branding/icons/icon-64.png b/scripts/installer/branding/icons/icon-64.png new file mode 100644 index 0000000..9dcd6d6 --- /dev/null +++ b/scripts/installer/branding/icons/icon-64.png @@ -0,0 +1 @@ +iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAADp6/0eAAAACXBIWXMAAAsSAAALEgHS3X78AAABKElEQVR4nO3WsQ2AQBAF0Yv7/tYqzqUe8gHk7eDU6AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA+D7d/yc/x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x83x8f4B8B9Q4v4z2cGgAAAABJRU5ErkJggg== \ No newline at end of file diff --git a/scripts/installer/branding/logo.svg b/scripts/installer/branding/logo.svg new file mode 100644 index 0000000..bc926e4 --- /dev/null +++ b/scripts/installer/branding/logo.svg @@ -0,0 +1 @@ +NetworkBuster \ No newline at end of file diff --git a/scripts/installer/convert-icon.ps1 b/scripts/installer/convert-icon.ps1 new file mode 100644 index 0000000..f045b33 --- /dev/null +++ b/scripts/installer/convert-icon.ps1 @@ -0,0 +1,19 @@ +# convert-icon.ps1 +# Try to convert scripts/installer/icon-placeholder.png to scripts/installer/icon.ico using ImageMagick (`magick`) or warn the user. +$png = Join-Path $PSScriptRoot 'icon-placeholder.png' +$ico = Join-Path $PSScriptRoot 'icon.ico' + +if (-not (Test-Path $png)) { Write-Error "PNG not found: $png"; exit 1 } + +if (Get-Command magick -ErrorAction SilentlyContinue) { + Write-Output "Converting $png -> $ico using ImageMagick" + magick convert $png -define icon:auto-resize=256,128,64,48,32,16 $ico + Write-Output "Icon created: $ico" + # Also generate all size PNGs and a multi-size ICO using generate-icons.ps1 + Write-Output "Generating multi-size icons using scripts/generate-icons.ps1" + powershell -ExecutionPolicy Bypass -File "$(Join-Path $PSScriptRoot '..\generate-icons.ps1')" +} else { + Write-Output "ImageMagick (magick) not found. Please install ImageMagick or place an ICO at scripts/installer/icon.ico" + Write-Output "You can install ImageMagick via Chocolatey: choco install imagemagick -y" + Write-Output "Or run scripts/generate-icons.ps1 on a machine with ImageMagick to create multi-size icons." +} diff --git a/scripts/installer/icon-placeholder.png b/scripts/installer/icon-placeholder.png new file mode 100644 index 0000000..6b22cb4 --- /dev/null +++ b/scripts/installer/icon-placeholder.png @@ -0,0 +1 @@ +iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR4nGNgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII= \ No newline at end of file diff --git a/scripts/installer/networkbuster-installer.nsi b/scripts/installer/networkbuster-installer.nsi new file mode 100644 index 0000000..24e1d13 --- /dev/null +++ b/scripts/installer/networkbuster-installer.nsi @@ -0,0 +1,99 @@ +!include MUI2.nsh +!ifndef STAGEDIR + !define STAGEDIR "${INSTALLER_STAGING}" +!endif + +!define APP_NAME "NetworkBuster" +!define VERSION "${VERSION}" +!define COMPANY "NetworkBuster" + +; Optional custom icon (place scripts/installer/icon.ico) +!ifdef ICON_FILE + Icon "${ICON_FILE}" +!endif + +; Require admin to write to Program Files +RequestExecutionLevel admin + +!insertmacro MUI_PAGE_WELCOME +!insertmacro MUI_PAGE_LICENSE "${STAGEDIR}\\scripts\\installer\\EULA.txt" + +; Network Boost custom page (checkbox) - uses nsDialogs +Page custom NetworkBoostPageCreate NetworkBoostPageLeave + +Function NetworkBoostPageCreate + nsDialogs::Create 1018 + Pop $0 + ${If} $0 == error + Abort + ${EndIf} + ; Label + ${NSD_CreateLabel} 0u 10u 100% 12u "Optional: Apply Network Boost (recommended). This will run a small script to apply safe network tuning changes." + Pop $1 + ; Checkbox + ${NSD_CreateCheckBox} 0u 30u 100% 12u "Apply Network Boost (recommended)" + Pop $2 + ; Default is unchecked for safety + ${NSD_SetState} $2 0 + StrCpy $NETWORKBOOST_HANDLE $2 + nsDialogs::Show +FunctionEnd + +Function NetworkBoostPageLeave + ${NSD_GetState} $NETWORKBOOST_HANDLE $0 + StrCmp $0 1 +2 + StrCpy $NETWORKBOOST "0" + StrCpy $NETWORKBOOST "1" +FunctionEnd +!insertmacro MUI_PAGE_DIRECTORY +Page custom NetworkBoostPage NetworkBoostPageLeave +!insertmacro MUI_PAGE_INSTFILES +!insertmacro MUI_PAGE_FINISH + +Var NETWORKBOOST_HANDLE +Var NETWORKBOOST + +!insertmacro MUI_UNPAGE_CONFIRM + +Var StartMenuFolder + +Section "Install" + SetOutPath "$INSTDIR" + + ; Copy staged files + File /r "${STAGEDIR}\\*" + + ; Copy icon into install dir if present + ; (icon should be present in staging scripts/installer/icon.ico) + ; Create Start Menu folder + CreateDirectory "$SMPROGRAMS\\${APP_NAME}" + StrCpy $StartMenuFolder "$SMPROGRAMS\\${APP_NAME}" + + ; Create Start Menu shortcut (use installed icon if present) + ${If} ${FileExists} "$INSTDIR\\scripts\\installer\\icon.ico" + CreateShortCut "$StartMenuFolder\\${APP_NAME}.lnk" "$INSTDIR\\start-desktop.bat" "" "$INSTDIR\\scripts\\installer\\icon.ico" 0 + CreateShortCut "$DESKTOP\\${APP_NAME} Launcher.lnk" "$INSTDIR\\start-desktop.bat" "" "$INSTDIR\\scripts\\installer\\icon.ico" 0 + ${Else} + CreateShortCut "$StartMenuFolder\\${APP_NAME}.lnk" "$INSTDIR\\start-desktop.bat" "" "" 0 + CreateShortCut "$DESKTOP\\${APP_NAME} Launcher.lnk" "$INSTDIR\\start-desktop.bat" "" "" 0 + ${EndIf} + + ; Run Network Boost script if user opted in + StrCmp $NETWORKBOOST "1" 0 +3 + ; Run as elevated PowerShell (installer already elevated). -Apply -Confirm:$false to run non-interactive + ExecWait '"$SYSDIR\\WindowsPowerShell\\v1.0\\powershell.exe" -NoProfile -ExecutionPolicy Bypass -File "$INSTDIR\\scripts\\network-boost.ps1" -Apply -Confirm:$false' + + ; Write version to registry + WriteRegStr HKLM "Software\\${COMPANY}\\${APP_NAME}" "DisplayVersion" "${VERSION}" + + ; Create uninstaller + WriteUninstaller "$INSTDIR\\Uninstall.exe" +SectionEnd + +Section "Uninstall" + RMDir /r "$INSTDIR" + DeleteRegKey HKLM "Software\\${COMPANY}\\${APP_NAME}" + Delete "$DESKTOP\\${APP_NAME} Launcher.lnk" + Delete "$SMPROGRAMS\\${APP_NAME}\\${APP_NAME}.lnk" + RMDir "$SMPROGRAMS\\${APP_NAME}" +SectionEnd \ No newline at end of file diff --git a/scripts/make-release.js b/scripts/make-release.js new file mode 100644 index 0000000..70ba637 --- /dev/null +++ b/scripts/make-release.js @@ -0,0 +1,27 @@ +#!/usr/bin/env node +import { execSync } from 'child_process'; +import { readFileSync, mkdirSync, existsSync } from 'fs'; +import { join } from 'path'; + +const pkg = JSON.parse(readFileSync('package.json', 'utf8')); +const name = pkg.name || 'networkbuster'; +const version = pkg.version || '0.0.0'; +const outDir = 'dist'; +if (!existsSync(outDir)) mkdirSync(outDir); +const zipName = `${name}-${version}.zip`; + +console.log(`Creating ${zipName} in ${outDir}...`); +try { + if (process.platform === 'win32') { + // Use PowerShell Compress-Archive + const files = ['server.js', 'package.json', 'LICENSE.txt', 'README.md']; + const filesArg = files.map(f => `"${f}"`).join(','); + execSync(`powershell -Command "Compress-Archive -Path ${filesArg} -DestinationPath '${join(outDir, zipName)}' -Force"`, { stdio: 'inherit' }); + } else { + execSync(`zip -r '${join(outDir, zipName)}' server.js package.json LICENSE.txt README.md`, { stdio: 'inherit' }); + } + console.log('Created', join(outDir, zipName)); +} catch (e) { + console.error('Failed to create zip', e); + process.exit(1); +} \ No newline at end of file diff --git a/scripts/network-boost.ps1 b/scripts/network-boost.ps1 new file mode 100644 index 0000000..97e8191 --- /dev/null +++ b/scripts/network-boost.ps1 @@ -0,0 +1,124 @@ +<# +scripts/network-boost.ps1 +Safe, optional network tuning helper for Windows and Linux. +Usage: + - Interactive dry-run: powershell -ExecutionPolicy Bypass -File scripts/network-boost.ps1 + - Apply non-interactively: powershell -ExecutionPolicy Bypass -File scripts/network-boost.ps1 -Apply -Confirm:$false + +The script records previous settings and creates a restore script at the same location if changes are applied. +#> + +param( + [switch]$Apply, + [switch]$Confirm = $true +) + +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition +$logFile = Join-Path $scriptDir 'network-boost.log' +$restoreScript = Join-Path $scriptDir 'network-boost-restore.ps1' + +function Write-Log($msg) { + $ts = Get-Date -Format 'u' + "$ts - $msg" | Out-File -FilePath $logFile -Append -Encoding UTF8 + Write-Output $msg +} + +Write-Log "Network boost script started. Apply=$Apply" + +# Detect OS +$isWindows = $env:OS -eq 'Windows_NT' +if ($isWindows) { + Write-Log "Detected Windows environment" + $current = netsh interface tcp show global 2>$null + if ($current) { + Write-Log "Current TCP global settings:"; $current | Out-File -FilePath $logFile -Append + } + + $recommended = @( + @{ cmd = 'netsh interface tcp set global autotuning=normal'; desc = 'Set TCP auto-tuning to Normal' }, + @{ cmd = 'netsh interface tcp set global congestionprovider=ctcp'; desc = 'Enable CTCP congestion provider (if available)' }, + @{ cmd = 'netsh interface tcp set global ecncapability=disabled'; desc = 'Disable ECN to improve compatibility' }, + @{ cmd = 'netsh interface tcp set global rss=enabled'; desc = 'Enable Receive Side Scaling (RSS)' + } + ) + + Write-Output "Recommended Windows tweaks (non-destructive and reversible):" + $i=1 + foreach ($r in $recommended) { Write-Output ("[$i] $($r.desc) : $($r.cmd)"); $i++ } + + if (-not $Apply) { Write-Output "Run with -Apply to apply these changes."; exit 0 } + + if ($Confirm) { + $ans = Read-Host "Apply recommended changes now? (y/N)" + if ($ans -notin @('y','Y','yes','Yes')) { Write-Log 'User declined to apply changes.'; exit 0 } + } + + # Save current settings to restore script + Write-Output "Creating restore script: $restoreScript" + "# Restore script generated on $(Get-Date)" | Out-File $restoreScript -Encoding UTF8 + $currentLines = netsh interface tcp show global | Select-String -Pattern '(.+):\s*(.+)' | ForEach-Object { $_.Matches[0].Groups[1].Value.Trim() + '|' + $_.Matches[0].Groups[2].Value.Trim() } + foreach ($ln in $currentLines) { + $parts = $ln -split '\|' + $k = $parts[0]; $v = $parts[1] + # We keep a simple log; full restore may require manual commands recorded in log + "$k = $v" | Out-File $logFile -Append + } + + # Apply recommended + foreach ($r in $recommended) { + try { + Write-Log "Applying: $($r.cmd)" + iex $r.cmd + "$($r.cmd) => OK" | Out-File $logFile -Append + } catch { + Write-Log "Failed: $($_)" + } + } + + Write-Log "Windows network boost applied. Please reboot for some changes to take effect." + Write-Output "Done. A log was written to $logFile. Reboot your machine if you applied changes." + exit 0 +} + +# Linux path +if (Test-Path '/proc/sys') { + Write-Log "Detected Linux environment" + $keys = @{ + 'net.core.rmem_max' = 16777216 + 'net.core.wmem_max' = 16777216 + 'net.ipv4.tcp_window_scaling' = 1 + } + + Write-Output "Recommended Linux sysctl changes:" + foreach ($k in $keys.Keys) { Write-Output ("$k = $($keys[$k])") } + + if (-not $Apply) { Write-Output "Run with -Apply to apply these changes as root."; exit 0 } + + if ($Confirm) { + $ans = Read-Host "Apply recommended changes now? (requires root) (y/N)" + if ($ans -notin @('y','Y','yes','Yes')) { Write-Log 'User declined to apply changes.'; exit 0 } + } + + # Save current values + "# Restore script generated on $(Get-Date)" | Out-File $restoreScript -Encoding UTF8 + foreach ($k in $keys.Keys) { + $old = (sysctl -n $k 2>$null) -replace '\r','' + "$k|$old" | Out-File $logFile -Append + "sysctl -w $k=$old" | Out-File $restoreScript -Append + } + + # Apply + foreach ($k in $keys.Keys) { + try { + Write-Log "Setting $k to $($keys[$k])" + sysctl -w $k=$($keys[$k]) | Out-Null + "sysctl -w $k=$($keys[$k])" | Out-File $logFile -Append + } catch { Write-Log "Failed to set $k: $_" } + } + + Write-Log "Linux network boost applied (temporary). To make changes permanent, add to /etc/sysctl.conf or a conf in /etc/sysctl.d/." + Write-Output "Done. A log was written to $logFile. Use $restoreScript to revert changes." + exit 0 +} + +Write-Output "Unsupported OS or environment. No changes made."; exit 1 diff --git a/scripts/network-path-optimizer.js b/scripts/network-path-optimizer.js new file mode 100644 index 0000000..39598e7 --- /dev/null +++ b/scripts/network-path-optimizer.js @@ -0,0 +1,388 @@ +/** + * Network Path Optimizer - Comprehensive optimization for all network paths + * Optimizes: TCP settings, DNS resolution, HTTP connections, proxy routing, WebSocket + * + * Run: node scripts/network-path-optimizer.js [--apply] [--report] + */ + +import http from 'http'; +import https from 'https'; +import dns from 'dns'; +import { promisify } from 'util'; +import os from 'os'; +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const dnsLookup = promisify(dns.lookup); +const dnsResolve = promisify(dns.resolve); + +// Configuration +const CONFIG = { + // HTTP Agent optimization + httpAgent: { + keepAlive: true, + keepAliveMsecs: 30000, + maxSockets: 100, + maxFreeSockets: 50, + timeout: 60000, + scheduling: 'fifo' + }, + // HTTPS Agent optimization + httpsAgent: { + keepAlive: true, + keepAliveMsecs: 30000, + maxSockets: 100, + maxFreeSockets: 50, + timeout: 60000, + scheduling: 'fifo', + rejectUnauthorized: true, + sessionTimeout: 300 + }, + // DNS optimization + dns: { + cacheSize: 1000, + cacheTTL: 300000, // 5 minutes + preferIPv4: true, + servers: ['8.8.8.8', '1.1.1.1', '9.9.9.9'] + }, + // Connection pooling + pool: { + maxConnections: 200, + idleTimeout: 60000, + connectTimeout: 10000 + } +}; + +// Optimized HTTP agents +let optimizedHttpAgent = null; +let optimizedHttpsAgent = null; + +// DNS Cache +const dnsCache = new Map(); + +class NetworkPathOptimizer { + constructor() { + this.stats = { + dnsHits: 0, + dnsMisses: 0, + connectionsCreated: 0, + connectionsReused: 0, + bytesTransferred: 0, + latencySum: 0, + requestCount: 0 + }; + this.startTime = Date.now(); + } + + // Initialize optimized agents + initAgents() { + optimizedHttpAgent = new http.Agent(CONFIG.httpAgent); + optimizedHttpsAgent = new https.Agent(CONFIG.httpsAgent); + + console.log('โœ“ HTTP Agent optimized:', { + keepAlive: CONFIG.httpAgent.keepAlive, + maxSockets: CONFIG.httpAgent.maxSockets + }); + console.log('โœ“ HTTPS Agent optimized:', { + keepAlive: CONFIG.httpsAgent.keepAlive, + maxSockets: CONFIG.httpsAgent.maxSockets + }); + + return { httpAgent: optimizedHttpAgent, httpsAgent: optimizedHttpsAgent }; + } + + // Optimized DNS lookup with caching + async dnsLookupCached(hostname) { + const cached = dnsCache.get(hostname); + if (cached && Date.now() < cached.expiresAt) { + this.stats.dnsHits++; + return cached.address; + } + + this.stats.dnsMisses++; + try { + const result = await dnsLookup(hostname, { family: CONFIG.dns.preferIPv4 ? 4 : 0 }); + dnsCache.set(hostname, { + address: result.address, + expiresAt: Date.now() + CONFIG.dns.cacheTTL + }); + + // Cleanup old entries + if (dnsCache.size > CONFIG.dns.cacheSize) { + const now = Date.now(); + for (const [key, val] of dnsCache) { + if (now > val.expiresAt) dnsCache.delete(key); + } + } + + return result.address; + } catch (err) { + console.error(`DNS lookup failed for ${hostname}:`, err.message); + throw err; + } + } + + // Configure DNS servers + configureDNS() { + try { + dns.setServers(CONFIG.dns.servers); + console.log('โœ“ DNS servers configured:', CONFIG.dns.servers.join(', ')); + } catch (err) { + console.warn('Could not set DNS servers:', err.message); + } + } + + // Benchmark DNS lookup + async benchmarkDNS(hostname = 'google.com', iterations = 10) { + const times = []; + + // Clear cache for fair test + dnsCache.delete(hostname); + + for (let i = 0; i < iterations; i++) { + const start = process.hrtime.bigint(); + await this.dnsLookupCached(hostname); + const end = process.hrtime.bigint(); + times.push(Number(end - start) / 1e6); // Convert to ms + + // Clear cache every other iteration to test both paths + if (i % 2 === 0) dnsCache.delete(hostname); + } + + const avg = times.reduce((a, b) => a + b, 0) / times.length; + const cached = times.filter((_, i) => i % 2 === 1); + const uncached = times.filter((_, i) => i % 2 === 0); + + return { + hostname, + iterations, + averageMs: avg.toFixed(2), + cachedAvgMs: (cached.reduce((a, b) => a + b, 0) / cached.length).toFixed(2), + uncachedAvgMs: (uncached.reduce((a, b) => a + b, 0) / uncached.length).toFixed(2), + improvement: `${((1 - cached.reduce((a, b) => a + b, 0) / uncached.reduce((a, b) => a + b, 0)) * 100).toFixed(1)}%` + }; + } + + // Benchmark HTTP connection + async benchmarkHTTP(url = 'https://httpbin.org/get', iterations = 5) { + const times = []; + + for (let i = 0; i < iterations; i++) { + const start = process.hrtime.bigint(); + try { + await this.fetch(url); + const end = process.hrtime.bigint(); + times.push(Number(end - start) / 1e6); + } catch (err) { + times.push(-1); + } + } + + const validTimes = times.filter(t => t > 0); + const avg = validTimes.length > 0 + ? validTimes.reduce((a, b) => a + b, 0) / validTimes.length + : -1; + + return { + url, + iterations, + successRate: `${(validTimes.length / iterations * 100).toFixed(0)}%`, + averageMs: avg.toFixed(2), + minMs: Math.min(...validTimes).toFixed(2), + maxMs: Math.max(...validTimes).toFixed(2) + }; + } + + // Optimized fetch with connection reuse + fetch(url, options = {}) { + return new Promise((resolve, reject) => { + const isHttps = url.startsWith('https'); + const agent = isHttps ? optimizedHttpsAgent : optimizedHttpAgent; + const lib = isHttps ? https : http; + + const startTime = process.hrtime.bigint(); + + const req = lib.get(url, { ...options, agent }, (res) => { + let data = ''; + res.on('data', chunk => { data += chunk; this.stats.bytesTransferred += chunk.length; }); + res.on('end', () => { + const endTime = process.hrtime.bigint(); + const latency = Number(endTime - startTime) / 1e6; + this.stats.latencySum += latency; + this.stats.requestCount++; + resolve({ status: res.statusCode, data, latency }); + }); + }); + + req.on('error', reject); + req.setTimeout(CONFIG.pool.connectTimeout, () => { + req.destroy(); + reject(new Error('Connection timeout')); + }); + }); + } + + // Get network interfaces + getNetworkInterfaces() { + const interfaces = os.networkInterfaces(); + const result = []; + + for (const [name, addrs] of Object.entries(interfaces)) { + for (const addr of addrs) { + if (!addr.internal) { + result.push({ + name, + family: addr.family, + address: addr.address, + netmask: addr.netmask, + mac: addr.mac + }); + } + } + } + + return result; + } + + // Generate optimization report + async generateReport() { + console.log('\n' + 'โ•'.repeat(60)); + console.log('๐ŸŒ Network Path Optimization Report'); + console.log('โ•'.repeat(60) + '\n'); + + // System info + console.log('๐Ÿ“Š System Information:'); + console.log(` Platform: ${os.platform()} ${os.release()}`); + console.log(` CPU: ${os.cpus()[0]?.model || 'Unknown'}`); + console.log(` Memory: ${Math.round(os.totalmem() / 1024 / 1024 / 1024)}GB`); + + // Network interfaces + console.log('\n๐Ÿ“ก Network Interfaces:'); + const interfaces = this.getNetworkInterfaces(); + for (const iface of interfaces) { + console.log(` ${iface.name}: ${iface.address} (${iface.family})`); + } + + // DNS benchmark + console.log('\n๐Ÿ” DNS Performance:'); + const dnsBench = await this.benchmarkDNS(); + console.log(` Hostname: ${dnsBench.hostname}`); + console.log(` Cached lookup: ${dnsBench.cachedAvgMs}ms`); + console.log(` Uncached lookup: ${dnsBench.uncachedAvgMs}ms`); + console.log(` Improvement: ${dnsBench.improvement}`); + + // HTTP benchmark + console.log('\nโšก HTTP Performance:'); + try { + const httpBench = await this.benchmarkHTTP(); + console.log(` URL: ${httpBench.url}`); + console.log(` Success rate: ${httpBench.successRate}`); + console.log(` Average latency: ${httpBench.averageMs}ms`); + console.log(` Range: ${httpBench.minMs}ms - ${httpBench.maxMs}ms`); + } catch (err) { + console.log(` Benchmark failed: ${err.message}`); + } + + // Agent stats + console.log('\n๐Ÿ”ง Agent Configuration:'); + console.log(` HTTP keepAlive: ${CONFIG.httpAgent.keepAlive}`); + console.log(` Max sockets: ${CONFIG.httpAgent.maxSockets}`); + console.log(` Max free sockets: ${CONFIG.httpAgent.maxFreeSockets}`); + + // Cache stats + console.log('\n๐Ÿ“ฆ Cache Statistics:'); + console.log(` DNS cache entries: ${dnsCache.size}`); + console.log(` DNS hits: ${this.stats.dnsHits}`); + console.log(` DNS misses: ${this.stats.dnsMisses}`); + console.log(` Hit rate: ${((this.stats.dnsHits / (this.stats.dnsHits + this.stats.dnsMisses)) * 100 || 0).toFixed(1)}%`); + + console.log('\n' + 'โ•'.repeat(60) + '\n'); + + return { + system: { + platform: os.platform(), + release: os.release(), + memory: os.totalmem() + }, + interfaces, + dns: dnsBench, + config: CONFIG, + stats: this.stats + }; + } + + // Apply optimizations globally + applyGlobal() { + console.log('\n๐Ÿš€ Applying Network Optimizations...\n'); + + this.configureDNS(); + this.initAgents(); + + // Replace global agents + http.globalAgent = optimizedHttpAgent; + https.globalAgent = optimizedHttpsAgent; + console.log('โœ“ Global HTTP/HTTPS agents replaced'); + + // Set DNS lookup cache + const originalLookup = dns.lookup; + dns.lookup = (hostname, options, callback) => { + if (typeof options === 'function') { + callback = options; + options = {}; + } + + this.dnsLookupCached(hostname) + .then(address => callback(null, address, 4)) + .catch(err => originalLookup(hostname, options, callback)); + }; + console.log('โœ“ DNS caching enabled'); + + console.log('\nโœ… Network optimizations applied!\n'); + + return { httpAgent: optimizedHttpAgent, httpsAgent: optimizedHttpsAgent }; + } + + // Export config for use in other modules + getConfig() { + return CONFIG; + } + + // Get optimized agents + getAgents() { + if (!optimizedHttpAgent || !optimizedHttpsAgent) { + this.initAgents(); + } + return { httpAgent: optimizedHttpAgent, httpsAgent: optimizedHttpsAgent }; + } +} + +// CLI execution +const args = process.argv.slice(2); +const shouldApply = args.includes('--apply'); +const shouldReport = args.includes('--report') || args.length === 0; + +const optimizer = new NetworkPathOptimizer(); + +if (shouldApply) { + optimizer.applyGlobal(); +} + +if (shouldReport) { + optimizer.initAgents(); + optimizer.generateReport().then(report => { + // Save report + const reportPath = path.join(__dirname, '..', 'test-reports', 'network-optimization-report.json'); + try { + fs.mkdirSync(path.dirname(reportPath), { recursive: true }); + fs.writeFileSync(reportPath, JSON.stringify(report, null, 2)); + console.log(`Report saved to: test-reports/network-optimization-report.json`); + } catch (err) { + console.warn('Could not save report:', err.message); + } + }); +} + +export default NetworkPathOptimizer; +export { CONFIG, optimizedHttpAgent, optimizedHttpsAgent }; diff --git a/scripts/provision-hyperv-vm.ps1 b/scripts/provision-hyperv-vm.ps1 new file mode 100644 index 0000000..1b0a761 --- /dev/null +++ b/scripts/provision-hyperv-vm.ps1 @@ -0,0 +1,63 @@ +<# +.SYNOPSIS + Provisions and upgrades a Hyper-V VM for high-performance AI and network workloads. + Supports GPU Partitioning (GPU-PV) and SR-IOV. + +.EXAMPLE + .\scripts\provision-hyperv-vm.ps1 -VMName "NetworkBuster-Linux" -EnableGPU +#> + +param( + [string]$VMName = "NetworkBuster-Linux", + [int]$Cores = 4, + [int]$MemoryGB = 8, + [switch]$EnableGPU, + [switch]$EnableNetworkAcceleration +) + +if (-not (Get-Service vmms -ErrorAction SilentlyContinue)) { + Write-Error "Hyper-V service (vmms) is not available on this system." + exit 1 +} + +$vm = Get-VM -Name $VMName -ErrorAction SilentlyContinue +if (-not $vm) { + Write-Host "Creating new VM: $VMName..." -ForegroundColor Cyan + New-VM -Name $VMName -Generation 2 -MemoryStartupBytes ($MemoryGB * 1GB) +} else { + Write-Host "Upgrading existing VM: $VMName..." -ForegroundColor Cyan + if ($vm.State -ne 'Off') { + Write-Warning "VM is currently $($vm.State). Some settings require the VM to be OFF." + } +} + +# 1. Performance Tuning +Write-Host "Setting CPU Cores to $Cores..." +Set-VMProcessor -VMName $VMName -Count $Cores + +Write-Host "Setting Memory to ${MemoryGB}GB (Static)..." +Set-VMMemory -VMName $VMName -DynamicMemoryEnabled $false -StartupBytes ($MemoryGB * 1GB) + +# 2. Network Acceleration +if ($EnableNetworkAcceleration) { + Write-Host "Enabling SR-IOV and MacAddressSpoofing..." + Get-VMNetworkAdapter -VMName $VMName | Set-VMNetworkAdapter -IovWeight 100 -MacAddressSpoofing On +} + +# 3. GPU Partitioning (GPU-PV) +if ($EnableGPU) { + Write-Host "Enabling GPU Partitioning..." -ForegroundColor Yellow + + # Check if GPU is assignable + $gpu = Get-VMHostAssignableDevice | Where-Object { $_.InstancePath -like "*PCI*" } + if (-not $gpu) { + Write-Warning "No assignable GPU found. Ensure your host GPU supports partitioning and is not in use." + } else { + Add-VMAssignableDevice -VMName $VMName -LocationPath $gpu.LocationPath + Write-Host "GPU assigned successfully." -ForegroundColor Green + } +} + +# 4. Final Verification +Write-Host "`nUpgrade Complete for $VMName" -ForegroundColor Green +Get-VM -Name $VMName | Select-Object Name, State, CPUUsage, MemoryUsage | Format-Table diff --git a/scripts/render-local.ps1 b/scripts/render-local.ps1 new file mode 100644 index 0000000..52d68cb --- /dev/null +++ b/scripts/render-local.ps1 @@ -0,0 +1,185 @@ +<# +render-local.ps1 + +Convenience helper to render Mermaid `.mmd` files to SVG and then to PNG locally. + +What it does: +- Verifies Node.js is available, otherwise downloads a portable Node zip into `tools/node-` and uses it for the session +- Runs `npx @mermaid-js/mermaid-cli` to render `.mmd` -> `.svg` +- Installs `puppeteer` (may download Chromium) and runs `node scripts/render-svgs.js` to convert SVG -> PNG +- Lists output PNG files in `docs/diagrams` + +Usage examples: + # Run with defaults (portable node if missing): + .\scripts\render-local.ps1 + + # Force using nvm-windows installer (requires UAC): + .\scripts\render-local.ps1 -UseNvm -AcceptUAC + + # Skip Chromium download (not recommended unless you already have Chromium available): + .\scripts\render-local.ps1 -SkipChromiumDownload +#> + +param( + [switch]$UseNvm, + [switch]$AcceptUAC, + [switch]$SkipChromiumDownload, + [switch]$LongTimeout, + [int]$RenderScale = 2 +) + +# Configure timeouts/retries +$nodeDownloadWaitMax = if ($LongTimeout) { 300 } else { 60 } +$pScreensScale = $RenderScale +$pptInstallRetries = if ($LongTimeout) { 5 } else { 2 } +$pptInstallBackoffSeconds = if ($LongTimeout) { 30 } else { 10 } + +function Fail([string]$m) { Write-Error $m; exit 1 } + +Write-Output "Starting local render helper" + +# Ensure we are running from repo root +Push-Location -Path (Join-Path $PSScriptRoot '..') | Out-Null + +# 1) Ensure Node exists (session PATH) +$nodeOK = $false +try { $nv = & node --version 2>$null; if ($LASTEXITCODE -eq 0) { Write-Output "Found node: $nv"; $nodeOK = $true } } catch { } + +if (-not $nodeOK) { + if ($UseNvm) { + Write-Output "nvm installer chosen. Running scripts/install-nvm.ps1 (requires -AcceptUAC to proceed)." + if (-not $AcceptUAC) { Fail 'nvm install requested but -AcceptUAC not provided. Rerun with -AcceptUAC to proceed.' } + & powershell -ExecutionPolicy Bypass -File scripts/install-nvm.ps1 -AcceptUAC + Write-Output "After nvm install, please re-open your shell or restart this PowerShell session and run this script again. Exiting." + exit 0 + } + + # Try portable Node zip method + Write-Output 'Node not found. Attempting to download and extract a portable Node 24.x ZIP to tools/ (no UAC required).' + $tools = Join-Path (Get-Location) 'tools' + if (-not (Test-Path $tools)) { New-Item -ItemType Directory -Path $tools | Out-Null } + # Prefer index.json to reliably pick the latest v24 release + $indexJsonUrl = 'https://nodejs.org/dist/index.json' + try { + $indexJson = Invoke-WebRequest -Uri $indexJsonUrl -UseBasicParsing -ErrorAction Stop + $json = $indexJson.Content | ConvertFrom-Json + } catch { + Fail "Failed to fetch Node index JSON: $($_.Exception.Message)" + } + $entry = $json | Where-Object { $_.version -match '^v24\.' } | Select-Object -First 1 + if (-not $entry) { Fail 'No Node 24.x release found in index.json' } + $ver = $entry.version.TrimStart('v') + $zipName = "node-v${ver}-win-x64.zip" + $zipUrl = "https://nodejs.org/dist/v${ver}/$zipName" + $tmp = Join-Path $env:TEMP $zipName + Write-Output "Downloading $zipUrl to $tmp" + Invoke-WebRequest -Uri $zipUrl -OutFile $tmp -UseBasicParsing -ErrorAction Stop + + # Wait until file is stable + $prev = -1 + for ($i=0;$i -lt 60;$i++) { + if (Test-Path $tmp) { + $s = (Get-Item $tmp).Length + Write-Output " size=$s" + if ($s -gt 1024*1024 -and $s -eq $prev) { break } + $prev = $s + } + Start-Sleep -Seconds 1 + } + if (-not (Test-Path $tmp)) { Fail "Download failed: $tmp missing" } + if ((Get-Item $tmp).Length -lt 1024*1024) { Fail "Downloaded Node zip appears too small" } + + $dest = Join-Path $tools ('node-'+$ver) + if (Test-Path $dest) { Remove-Item -Recurse -Force $dest } + Write-Output "Extracting to $tools" + Expand-Archive -Path $tmp -DestinationPath $tools -Force + + # Detect the extracted folder (handles names like node-v24.12.0 or node-v24.12.0-win-x64) + $candidates = Get-ChildItem -Path $tools -Directory -ErrorAction SilentlyContinue | Where-Object { $_.Name -like "node-v$ver*" } + if ($candidates.Count -ge 1) { + $extracted = $candidates[0].FullName + try { + # Move/rename to stable folder name + if (Test-Path $dest) { Remove-Item -Recurse -Force $dest } + Move-Item -Path $extracted -Destination $dest -Force + Write-Output "Renamed $extracted -> $dest" + } catch { + Write-Warning "Could not rename extracted folder: $($_.Exception.Message)" + } + } else { + Write-Warning "No extracted node folder matching node-v$ver* found under $tools" + } + + if (-not (Test-Path $dest)) { Fail 'Node extraction failed (destination missing after extraction/rename)' } + $nodeBin = Join-Path $dest 'node.exe' + if (-not (Test-Path $nodeBin)) { Fail 'node.exe not found after extraction' } + + # Use node from extracted tools for this session + $env:PATH = (Split-Path $nodeBin) + ';' + $env:PATH + Write-Output "Using portable node: $(& $nodeBin --version)" +} + +# Wait-for-download stability: ensure file size stabilizes before continuing +Write-Output "Waiting up to $nodeDownloadWaitMax seconds for the Node ZIP to stabilize" +$prev = -1 +$stable = $false +for ($i=0;$i -lt $nodeDownloadWaitMax;$i++) { + if (Test-Path $tmp) { + $s = (Get-Item $tmp).Length + Write-Output " size=$s" + if ($s -gt 1024*1024 -and $s -eq $prev) { $stable = $true; break } + $prev = $s + } + Start-Sleep -Seconds 1 +} +if (-not $stable) { Write-Warning "Node ZIP may not have stabilized after $nodeDownloadWaitMax seconds; proceeding but results may vary" } + +# 2) Ensure mermaid CLI + render mmd->svg +Write-Output 'Rendering Mermaid sources (.mmd -> .svg) using npx @mermaid-js/mermaid-cli' +try { + npx -y @mermaid-js/mermaid-cli -i "docs/diagrams/*.mmd" -o docs/diagrams -f svg --logLevel debug + Write-Output 'Mermaid rendering complete.' +} catch { + Write-Error "Mermaid rendering failed: $($_.Exception.Message)"; exit 2 +} + +# 3) Install puppeteer and run renderer (with retries/backoff if requested) +if (-not $SkipChromiumDownload) { + Write-Output 'Installing puppeteer (this will download Chromium). This can take several minutes.' + $success = $false + for ($try=1; $try -le $pptInstallRetries; $try++) { + Write-Output "Attempt $try of $($pptInstallRetries): npm install puppeteer --no-audit --no-fund" + try { + npm install puppeteer --no-audit --no-fund + if ($LASTEXITCODE -eq 0) { $success = $true; break } + Write-Warning "npm exited with code $LASTEXITCODE" + } catch { + Write-Warning "Install attempt failed: $($_.Exception.Message)" + } + + if ($try -lt $pptInstallRetries) { + $wait = $pptInstallBackoffSeconds * $try + Write-Output "Waiting $wait seconds before retrying..." + Start-Sleep -Seconds $wait + } + } + if (-not $success) { Write-Error "Failed to install puppeteer after $pptInstallRetries attempts"; exit 4 } +} else { + Write-Output 'Skipping Chromium download as requested (-SkipChromiumDownload). Ensure you have a Chromium available in PATH.' +} + +# 4) Run the renderer +Write-Output "Running Node renderer: node scripts/render-svgs.js $pScreensScale" +try { + node scripts/render-svgs.js $pScreensScale +} catch { + Write-Error "Renderer failed: $($_.Exception.Message)"; exit 3 +} + +# 5) Show results +Write-Output 'PNG generation complete. Listing PNGs in docs/diagrams:' +Get-ChildItem -Path docs/diagrams -Recurse -Filter '*.png' | Select-Object FullName,Length | Format-Table -AutoSize + +Write-Output 'Done. If PNGs are missing, consider re-running with -SkipChromiumDownload:$false and check network/firewall settings or run the CI with enhanced logging.' + +Pop-Location | Out-Null diff --git a/scripts/render-mermaid.ps1 b/scripts/render-mermaid.ps1 new file mode 100644 index 0000000..ef8cf0d --- /dev/null +++ b/scripts/render-mermaid.ps1 @@ -0,0 +1,31 @@ +<# +Render Mermaid `.mmd` files in `docs/diagrams` to SVG using `@mermaid-js/mermaid-cli` via npx. +It attempts to use the repo-local Node if present, or system `npx` otherwise. + +Usage: .\render-mermaid.ps1 [-OutDir docs/diagrams] +#> +param( + [string]$DiagDir = 'docs/diagrams', + [string]$OutDir = 'docs/diagrams' +) + +function Find-Node { + $candidates = @('C:\\Program Files\\nodejs\\node.exe', (Get-Command node -ErrorAction SilentlyContinue).Path, 'tools\\node\\node.exe') + foreach ($c in $candidates) { if ($c -and (Test-Path $c)) { return $c } } + return $null +} + +$node = Find-Node +if (-not $node) { Write-Warn 'Node not found in PATH or tools/node; rendering requires Node and npx/mermaid-cli; skipping.'; exit 0 } + +$mmds = Get-ChildItem -Path $DiagDir -Filter '*.mmd' -ErrorAction SilentlyContinue +if (-not $mmds) { Write-Output 'No .mmd files found'; exit 0 } + +foreach ($f in $mmds) { + $in = $f.FullName + $out = Join-Path $OutDir ($f.BaseName + '.svg') + Write-Output "Rendering $in -> $out" + & npx -y @mermaid-js/mermaid-cli -i "$in" -o "$OutDir" -f svg || Write-Warn "Failed to render $in" +} + +Write-Output 'Render complete.' diff --git a/scripts/render-svgs.js b/scripts/render-svgs.js new file mode 100644 index 0000000..5a7da91 --- /dev/null +++ b/scripts/render-svgs.js @@ -0,0 +1,58 @@ +#!/usr/bin/env node +// Render all SVGs in docs/diagrams to PNG using Puppeteer +// Usage: node scripts/render-svgs.js [scale] +import fs from 'fs/promises'; +import path from 'path'; +import puppeteer from 'puppeteer'; + +const scale = Number(process.argv[2] || 2); +const dir = path.resolve('docs', 'diagrams'); + +async function renderFile(file) { + const svgPath = path.join(dir, file); + const pngPath = path.join(dir, file.replace(/\.svg$/i, '.png')); + const svg = await fs.readFile(svgPath, 'utf8'); + // Attempt to extract width/height from viewBox or attributes + let width = 800, height = 600; + const vb = svg.match(/viewBox=["']?([0-9\.\s]+)["']?/i); + if (vb) { + const parts = vb[1].trim().split(/\s+/).map(Number); + if (parts.length === 4) { width = parts[2]; height = parts[3]; } + } else { + const w = svg.match(/width=["']?([0-9\.]+)["']?/i); + const h = svg.match(/height=["']?([0-9\.]+)["']?/i); + if (w) width = Math.round(Number(w[1])); + if (h) height = Math.round(Number(h[1])); + } + + const html = `${svg}`; + const browser = await puppeteer.launch({ args: ['--no-sandbox', '--disable-setuid-sandbox'] }); + const page = await browser.newPage(); + await page.setViewport({ width: Math.ceil(width), height: Math.ceil(height), deviceScaleFactor: scale }); + await page.setContent(html, { waitUntil: 'networkidle0' }); + const el = await page.$('svg'); + if (!el) { + console.warn(`No found in ${file}`); + await browser.close(); + return; + } + await el.screenshot({ path: pngPath, omitBackground: true }); + console.log(`Rendered ${pngPath}`); + await browser.close(); +} + +async function main(){ + try { + const files = await fs.readdir(dir); + const svgs = files.filter(f => f.toLowerCase().endsWith('.svg')); + if (!svgs.length) { console.log('No SVG files to render in', dir); return } + for (const f of svgs) { + await renderFile(f); + } + } catch (err) { + console.error('Render error', err); + process.exit(1); + } +} + +main(); diff --git a/scripts/set-gh-secret.js b/scripts/set-gh-secret.js new file mode 100644 index 0000000..d64eea1 --- /dev/null +++ b/scripts/set-gh-secret.js @@ -0,0 +1,7 @@ +// DEPRECATED: This helper was used to programmatically set repository secrets during setup. +// It has been intentionally removed from active use. If you still need to set secrets programmatically, +// prefer using the official GitHub CLI (`gh secret set`) or the GitHub Actions secrets REST API with +// appropriate safeguards. See docs/RECYCLING-AI.md for recommended workflows and the `.github/workflows` +// test that validates `OPENAI_API_KEY` is present and usable. + +console.log('scripts/set-gh-secret.js is deprecated and intentionally disabled.'); diff --git a/scripts/set-openai-key.ps1 b/scripts/set-openai-key.ps1 new file mode 100644 index 0000000..5f8d5bd --- /dev/null +++ b/scripts/set-openai-key.ps1 @@ -0,0 +1,25 @@ +<# +Set OPENAI_API_KEY for the current session or persist for the current user. + +Usage: + # Temporarily for this session: + .\set-openai-key.ps1 -Key 'sk-...' + + # Persist for the current user (requires confirmation): + .\set-openai-key.ps1 -Key 'sk-...' -Persist +#> +param( + [Parameter(Mandatory=$true)][string]$Key, + [switch]$Persist +) + +Write-Output "Setting OPENAI_API_KEY for current session." +$env:OPENAI_API_KEY = $Key + +if ($Persist) { + Write-Output 'Persisting OPENAI_API_KEY for the current user using setx. This will apply to new shells only.' + setx OPENAI_API_KEY "$Key" | Out-Null + Write-Output 'Persisted. Restart shells to pick up the value.' +} + +Write-Output 'Done.' diff --git a/scripts/start-test-instance.ps1 b/scripts/start-test-instance.ps1 new file mode 100644 index 0000000..3506dec --- /dev/null +++ b/scripts/start-test-instance.ps1 @@ -0,0 +1,113 @@ +<# +Creates a test instance record that is "pending approval" and will auto-accept +after a timeout (default: 1 second). Optionally starts the Node server for the +instance if Node is available and -StartProcess is specified. + +Usage: + .\start-test-instance.ps1 -Name test1 -Port 3002 -AutoAcceptSeconds 1 -StartProcess +#> +param( + [string]$Name = "test-$(Get-Date -Format 'yyyyMMdd-HHmmss')", + [int]$Port = 3002, + [double]$AutoAcceptSeconds = 0.001, + [object]$StartProcess = $true, + [string]$InstancesDir = 'S:\NetworkBuster_Production\instances' +) + +function Resolve-InstancesDir { + param($d) + if (Test-Path $d) { return $d } + $repo = (Split-Path -Parent $PSScriptROOT) + $fallback = Join-Path $repo 'instances' + if (-not (Test-Path $fallback)) { New-Item -ItemType Directory -Path $fallback -Force | Out-Null } + return $fallback +} + +$InstancesDir = Resolve-InstancesDir -d $InstancesDir +if (-not (Test-Path $InstancesDir)) { New-Item -ItemType Directory -Path $InstancesDir -Force | Out-Null } + +# Normalize StartProcess to boolean (tolerate strings/numbers when invoked non-interactively) +if ($StartProcess -is [string]) { + $sp = $StartProcess.Trim() + if ($sp -match '^(1|true|True|TRUE|yes|Yes|YES)$') { $StartProcess = $true } else { $StartProcess = $false } +} else { + try { $StartProcess = [bool]$StartProcess } catch { $StartProcess = $false } +} + +$instanceFile = Join-Path $InstancesDir "$Name.json" +$record = [ordered]@{ + name = $Name + status = 'pending' + created = (Get-Date).ToString('o') + port = $Port +} +$record | ConvertTo-Json -Depth 5 | Out-File -FilePath $instanceFile -Encoding utf8 + +Write-Host "Instance '$Name' created and is pending approval. Will auto-accept in $AutoAcceptSeconds second(s)." -ForegroundColor Cyan +Write-Host "Press 'Y' to accept now, 'N' to cancel. Waiting..." + +$end = (Get-Date).AddSeconds($AutoAcceptSeconds) +$approved = $false +$cancelled = $false +while ((Get-Date) -lt $end) { + if ([console]::KeyAvailable) { + $k = [console]::ReadKey($true) + if ($k.Key -eq 'Y') { $approved = $true; break } + if ($k.Key -eq 'N') { $cancelled = $true; break } + } + Start-Sleep -Milliseconds 100 +} + +if (-not $approved -and -not $cancelled) { $approved = $true } # auto-accept on timeout + +if ($cancelled) { + $record.status = 'cancelled' + $record.cancelledAt = (Get-Date).ToString('o') + $record | ConvertTo-Json -Depth 5 | Out-File -FilePath $instanceFile -Encoding utf8 + Write-Host "Instance '$Name' was cancelled by user." -ForegroundColor Yellow + exit 0 +} + +if ($approved) { + $record.status = 'accepted' + $record.approvedAt = (Get-Date).ToString('o') + + # Attempt to start the Node server for this instance if requested + if ($StartProcess) { + $nodeExe = 'C:\Program Files\nodejs\node.exe' + $nodeCmd = 'node' + if (Test-Path $nodeExe) { $nodePath = $nodeExe } else { + $found = Get-Command node -ErrorAction SilentlyContinue + if ($found) { $nodePath = $found.Source } else { $nodePath = $null } + } + + if ($nodePath) { + try { + $startInfo = @{ + FilePath = $nodePath + ArgumentList = 'server.js' + WorkingDirectory = (Split-Path -Parent $PSScriptRoot) + PassThru = $true + } + $env:PORT = $Port + $p = Start-Process @startInfo + Start-Sleep -Seconds 1 + $record.processId = $p.Id + $record.processStarted = (Get-Date).ToString('o') + Write-Host "Started Node server for instance '$Name' (PID: $($p.Id))" -ForegroundColor Green + } catch { + $record.processError = $_.Exception.Message + Write-Host "Failed to start Node server: $($_.Exception.Message)" -ForegroundColor Red + } + } else { + Write-Host "Node runtime not found; skipping process start." -ForegroundColor Yellow + $record.processSkipped = $true + } + } + + $record | ConvertTo-Json -Depth 10 | Out-File -FilePath $instanceFile -Encoding utf8 + Write-Host "Instance '$Name' accepted and updated: $instanceFile" -ForegroundColor Green +} + +# Print summary +Get-Content $instanceFile -Raw | Write-Output diff --git a/scripts/sync-drives.ps1 b/scripts/sync-drives.ps1 new file mode 100644 index 0000000..172a148 --- /dev/null +++ b/scripts/sync-drives.ps1 @@ -0,0 +1,106 @@ +<# +.SYNOPSIS + Sync two drives (S: and E:) using Git when available, otherwise using Robocopy. + +.DESCRIPTION + This script attempts a Git-based mirror if Git is installed and the user requests it. If Git is not available or the "-UseGit:$false" flag is passed, it falls back to using Robocopy mirroring (/MIR). + +.PARAMETER Source + Source path to sync from (default: S:\NetworkBuster_Production) + +.PARAMETER Dest + Destination path to sync to (default: E:\NetworkBuster_Backup) + +.PARAMETER UseGit + Whether to prefer Git-based sync. Defaults to $true. + +.PARAMETER DryRun + If set, shows commands without executing (Robocopy uses /L) + +.PARAMETER Log + Log file path (default: .\sync-drives.log) + +.EXAMPLE + .\sync-drives.ps1 -Source 'S:\NetworkBuster_Production' -Dest 'E:\NetworkBuster_Backup' -UseGit $true + +#> +param( + [string]$Source = 'S:\NetworkBuster_Production', + [string]$Dest = 'E:\NetworkBuster_Backup', + [bool]$UseGit = $true, + [ValidateSet('push','pull','mirror')][string]$Direction = 'push', + [switch]$Reverse, + [switch]$DryRun, + [string]$Log = '.\sync-drives.log' +) + +# Support reverse/pull modes: -Reverse or -Direction pull will swap Source and Dest +if ($Reverse -or $Direction -eq 'pull') { + Log "Reverse/pull mode enabled โ€” swapping Source and Dest" + $tmp = $Source; $Source = $Dest; $Dest = $tmp + Log "Source is now: $Source" + Log "Dest is now: $Dest" +} + +function Log { param($m) Write-Output $m; Add-Content -Path $Log -Value ("$(Get-Date -Format s) - $m") } + +# Validate paths +if (-not (Test-Path $Source)) { Log "Source not found: $Source"; throw "Source not found: $Source" } +if (-not (Test-Path $Dest)) { Log "Destination not found; creating: $Dest"; New-Item -ItemType Directory -Path $Dest -Force | Out-Null } + +# Detect Git +$git = Get-Command git -ErrorAction SilentlyContinue +if ($UseGit -and $git) { + Log "Git detected at $($git.Path). Proceeding with Git-based mirror." + + # Prepare bare repo on destination + $bare = Join-Path $Dest 'networkbuster.git' + if (-not (Test-Path $bare)) { + Log "Creating bare repository at $bare" + if ($DryRun) { Log "DryRun: git init --bare $bare" } else { & git init --bare "$bare" } + } else { + Log "Bare repository already exists at $bare" + } + + # Initialize and commit in source if needed + Push-Location $Source + try { + if (-not (Test-Path (Join-Path $Source '.git'))) { + Log "Initializing git repository in source: $Source" + if ($DryRun) { Log "DryRun: git init" } else { & git init } + if ($DryRun) { Log "DryRun: git add . ; git commit -m 'Initial commit for sync'" } else { & git add .; & git commit -m "Sync commit: $(Get-Date -Format s)" -a } + } else { + Log ".git exists; committing current changes" + if (-not $DryRun) { & git add .; & git commit -m "Sync commit: $(Get-Date -Format s)" -a } else { Log "DryRun: git add . ; git commit -m 'Sync commit' -a" } + } + + # Add remote and push + if ($DryRun) { Log "DryRun: git remote add backup $bare ; git push --mirror backup" } else { + try { & git remote remove backup 2>$null } catch { } + try { & git remote add backup "$bare" 2>$null } catch { } + & git push --mirror backup + } + } finally { + Pop-Location + } + + Log "Git-based sync completed." + exit 0 +} + +# Fallback: Robocopy mirroring +Log "Git not used or not available. Falling back to Robocopy mirror." +$rcArgs = @($Source, $Dest, "/MIR", "/COPYALL", "/R:3", "/W:5", "/MT:16") +$display = "robocopy `"$Source`" `"$Dest`" /MIR /COPYALL /R:3 /W:5 /MT:16" +if ($DryRun) { Log "DryRun Robocopy: $display /L"; exit 0 } + +Log "Executing: $display" +# Execute robocopy with arguments +try { + & robocopy @rcArgs | Tee-Object -FilePath $Log -Append + Log "Robocopy sync completed." +} catch { + Log "Robocopy failed: $($_.Exception.Message)" + exit 1 +} + diff --git a/scripts/test-ai-robot.ps1 b/scripts/test-ai-robot.ps1 new file mode 100644 index 0000000..c017c34 --- /dev/null +++ b/scripts/test-ai-robot.ps1 @@ -0,0 +1,72 @@ +<# +.SYNOPSIS + Test AI Robot endpoint (PowerShell version). + +.PARAMETER Url + API URL (default: http://localhost:3001/api/robot) + +.PARAMETER Mock + If set, simulate responses locally without calling the API. + +.PARAMETER Prompt + One or more prompts to test. + +.PARAMETER Concurrency + Number of concurrent requests per prompt (default: 1) +#> +param( + [string]$Url = 'http://localhost:3001/api/robot', + [switch]$Mock, + [int]$Concurrency = 1, + [string[]]$Prompt +) + +if (-not $Prompt -or $Prompt.Length -eq 0) { + $Prompt = @( + 'Summarize lunar recycling best practices in one paragraph.', + 'List three risks of regolith processing on the Moon and one mitigation for each.', + 'Generate an example test query for the NetworkBuster AI robot that checks audio synthesis.' + ) +} + +Write-Output "AI Robot Test - Url: $Url Mock: $Mock Concurrency: $Concurrency" + +function Invoke-Test ($p) { + if ($Mock) { + return @{ message = "MOCK RESPONSE for prompt: $p" } + } + try { + $body = @{ prompt = $p } | ConvertTo-Json + $res = Invoke-RestMethod -Method Post -Uri $Url -Body $body -ContentType 'application/json' -ErrorAction Stop + return $res + } catch { + throw "Request failed for prompt: $p - $($_.Exception.Message)" + } +} + +$fail = 0 +foreach ($p in $Prompt) { + Write-Output "`n== Prompt: $p ==" + if ($Concurrency -gt 1) { + $jobs = @() + for ($i=1; $i -le $Concurrency; $i++) { + $jobs += Start-Job -ScriptBlock { param($prm,$u,$m) try { $b = @{ prompt=$prm } | ConvertTo-Json; if ($m) { @{ message = "MOCK" } } else { Invoke-RestMethod -Method Post -Uri $u -Body $b -ContentType 'application/json' } } catch { $_ } } -ArgumentList $p,$Url,$Mock + } + Receive-Job -Job $jobs -Wait | ForEach-Object { + if ($_ -is [System.Management.Automation.ErrorRecord]) { Write-Error $_; $fail++ } else { + if ($_ -and $_.message) { Write-Output "OK: message present" } else { Write-Warning "No message in response"; $fail++ } + } + } + Remove-Job -Job $jobs + } else { + try { + $res = Invoke-Test $p + if ($res -and $res.message) { Write-Output "OK: message present" } else { Write-Warning "No message in response"; $fail++ } + } catch { + Write-Error $_ + $fail++ + } + } +} + +if ($fail -eq 0) { Write-Output "All tests passed!"; exit 0 } else { Write-Error "$fail tests failed"; exit 1 } \ No newline at end of file diff --git a/scripts/test-ai-robot.sh b/scripts/test-ai-robot.sh new file mode 100644 index 0000000..b647bdc --- /dev/null +++ b/scripts/test-ai-robot.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage(){ + cat < message present" + return 0 + else + echo "FAIL: no 'message' field in response for prompt: $prompt" + echo "Body: $body" + return 3 + fi +} + +# Run tests +failures=0 +for p in "${PROMPTS[@]}"; do + echo "\n== Prompt: ${p} ==" + + # concurrency support + if [ "$CONCURRENCY" -gt 1 ]; then + for i in $(seq 1 $CONCURRENCY); do + run_one "$p" & + done + wait + rc=$? + if [ $rc -ne 0 ]; then failures=$((failures+1)); fi + else + run_one "$p" || failures=$((failures+1)) + fi + +done + +if [ "$failures" -eq 0 ]; then + echo "\nAll tests passed!" + exit 0 +else + echo "\nSome tests failed: $failures" >&2 + exit 1 +fi diff --git a/scripts/test-crash.ps1 b/scripts/test-crash.ps1 new file mode 100644 index 0000000..2c9e149 --- /dev/null +++ b/scripts/test-crash.ps1 @@ -0,0 +1,7 @@ +# Simple helper to find node processes and kill them to test watchdog auto-restart +$nodes = Get-Process -Name node -ErrorAction SilentlyContinue +if (-not $nodes) { Write-Output "No node processes found"; exit 0 } +foreach ($p in $nodes) { + Write-Output "Killing PID $($p.Id) - $($p.ProcessName)" + try { Stop-Process -Id $p.Id -Force } catch { Write-Warning "Failed to kill $($p.Id): $($_.Exception.Message)" } +} diff --git a/scripts/test-local-build.ps1 b/scripts/test-local-build.ps1 new file mode 100644 index 0000000..b117892 --- /dev/null +++ b/scripts/test-local-build.ps1 @@ -0,0 +1,38 @@ +# Test local build helper for Windows +# Steps performed: +# 1) npm ci +# 2) npm run dist:zip +# 3) run convert-icon (optional) +# 4) npm run dist:nsis +# 5) verify dist contains zip and installer + +$ErrorActionPreference = 'Stop' + +Write-Output "Starting local build test..." + +if (-not (Get-Command npm -ErrorAction SilentlyContinue)) { Write-Error "npm not found in PATH. Install Node.js and npm first."; exit 1 } +if (-not (Get-Command choco -ErrorAction SilentlyContinue)) { Write-Output "Chocolatey not found โ€” certain installs will require admin. Proceeding if tools exist." } + +npm ci +npm run dist:zip + +# Try convert icon, but don't fail if ImageMagick isn't present +try { + powershell -ExecutionPolicy Bypass -File scripts/installer/convert-icon.ps1 +} catch { + Write-Output "Icon conversion skipped or failed (ImageMagick missing). Place an ICO at scripts/installer/icon.ico to embed icon." +} + +npm run dist:nsis + +$package = Get-Content package.json | ConvertFrom-Json +$zipName = "dist\${package.name}-${package.version}.zip" +$exeName = "dist\NetworkBuster-${package.version}-Setup.exe" + +if ((Test-Path $zipName) -and (Test-Path $exeName)) { + Write-Output "Local build test succeeded. Artifacts found: $zipName, $exeName" + exit 0 +} else { + Write-Error "Local build test failed. Missing artifacts. Zip present: $(Test-Path $zipName), Installer present: $(Test-Path $exeName)" + exit 1 +} \ No newline at end of file diff --git a/scripts/test-recycle-api.ps1 b/scripts/test-recycle-api.ps1 new file mode 100644 index 0000000..9e2cfa0 --- /dev/null +++ b/scripts/test-recycle-api.ps1 @@ -0,0 +1,6 @@ +<# Simple test for recycle API (requires server running) #> +$payload = @{ items = @( @{ name = 'pizza box'; context='greasy' }, @{ name = 'plastic bottle' } ); location='94107'; userId='test1' } +try { + $r = Invoke-WebRequest -Uri 'http://localhost:3001/api/recycle/recommend' -Method Post -Body ($payload | ConvertTo-Json -Depth 5) -ContentType 'application/json' -TimeoutSec 5 + Write-Output $r.Content +} catch { Write-Error $_.Exception.Message } diff --git a/scripts/transform-ai-training.ps1 b/scripts/transform-ai-training.ps1 new file mode 100644 index 0000000..617b11d --- /dev/null +++ b/scripts/transform-ai-training.ps1 @@ -0,0 +1,131 @@ +Param( + [string]$Source = 'E:\DATACENTRA', + [string]$Output = 'E:\DATACENTRA\data\training.jsonl', + [switch]$VerboseOutput, + [string]$VpnName = '', + [string]$VpnUser = '', + [string]$VpnPass = '' +) + +# VPN helper functions (uses rasdial for Windows VPN connections) +function Connect-Vpn { + param($Name, $User, $Pass) + if (-not $Name) { return $false } + Write-Output "Attempting to connect VPN: $Name" + try { + $args = @($Name) + if ($User) { $args += $User; $args += $Pass } + $out = & rasdial @args 2>&1 + Write-Output $out + if ($out -match 'Command completed successfully') { return $true } else { return $false } + } catch { + Write-Warning "Failed to run rasdial: $($_.Exception.Message)"; return $false + } +} + +function Disconnect-Vpn { + param($Name) + if (-not $Name) { return } + try { & rasdial $Name /disconnect 2>&1 | Write-Output } catch { } +} + +# Track VPN state for cleanup +$vpnConnected = $false +if ($VpnName) { + $vpnConnected = Connect-Vpn -Name $VpnName -User $VpnUser -Pass $VpnPass + if ($vpnConnected) { Write-Output "VPN $VpnName connected" } else { Write-Warning "VPN $VpnName not connected" } + + # Optional: set proxy env vars if needed for downstream tools + if ($env:HTTP_PROXY -or $env:HTTPS_PROXY) { + Write-Output "Using existing proxy settings from environment" + } +} + +# Ensure we disconnect VPN on exit +$exitAction = { + if ($vpnConnected -and $VpnName) { + Write-Output "Disconnecting VPN $VpnName" + Disconnect-Vpn -Name $VpnName + } +} +Register-EngineEvent PowerShell.Exiting -Action $exitAction | Out-Null + +Function Ensure-Dir { + param($p) + $d = Split-Path $p -Parent + if (-not (Test-Path $d)) { New-Item -ItemType Directory -Path $d -Force | Out-Null } +} + +Ensure-Dir -p $Output + +Write-Output "Transforming AI training files from $Source -> $Output" + +$patterns = @('*.md','*.jsx','*.js','*.txt') +$files = @() +foreach ($pat in $patterns) { $files += Get-ChildItem -Path $Source -Recurse -Force -Include $pat -File -ErrorAction SilentlyContinue } +$seen = @{} +$outRows = @() + +foreach ($file in $files) { + try { + $text = Get-Content -Path $file.FullName -Raw -ErrorAction Stop + } catch { + Write-Warning "Failed to read $($file.FullName): $($_.Exception.Message)"; continue + } + + $ext = $file.Extension.ToLower() + $blocks = @() + + if ($ext -eq '.md') { + # Remove code fences and HTML comments + $clean = [regex]::Replace($text, '```.*?```', '', [System.Text.RegularExpressions.RegexOptions]::Singleline) + $clean = [regex]::Replace($clean, '', '', [System.Text.RegularExpressions.RegexOptions]::Singleline) + # Split into paragraphs + $para = $clean -split "\r?\n\r?\n" | ForEach-Object { $_.Trim() } | Where-Object { $_.Length -gt 20 } + $blocks += $para + } elseif ($ext -in @('.jsx','.js')) { + # Extract template text from JSX: text nodes between > and < and string literals + $jsxText = [regex]::Matches($text, '>([^<>]{20,})<') | ForEach-Object { $_.Groups[1].Value.Trim() } + $strLits = [regex]::Matches($text, '`([^`]{20,})`', [System.Text.RegularExpressions.RegexOptions]::Singleline) | ForEach-Object { $_.Groups[1].Value.Trim() } + $singleD = [regex]::Matches($text, '"([^"]{20,})"') | ForEach-Object { $_.Groups[1].Value.Trim() } + $singleS = [regex]::Matches($text, "'([^']{20,})'") | ForEach-Object { $_.Groups[1].Value.Trim() } + $blocks += $jsxText; $blocks += $strLits; $blocks += $singleD; $blocks += $singleS + } else { + # plain text or others + $clean = $text -replace '\r?\n', ' \n ' + $lines = $clean -split '\n' | ForEach-Object { $_.Trim() } | Where-Object { $_.Length -gt 20 } + $blocks += $lines + } + + foreach ($b in $blocks) { + # Normalize + $n = $b -replace '\s+', ' ' -replace 'https?://\S+','' -replace '\S+@\S+','' + $n = $n.Trim() + if ($n.Length -lt 30) { continue } + # Deduplicate + $key = ([Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($n))) + if ($seen.ContainsKey($key)) { continue } + $seen[$key] = $true + + $obj = [PSCustomObject]@{ + source = ($file.FullName -replace "^$Source\\?", '') + path = $file.FullName + type = $ext + text = $n + length = $n.Length + timestamp = (Get-Date -Format s) + } + $outRows += ($obj | ConvertTo-Json -Compression) + } +} + +# Write JSONL +Set-Content -Path $Output -Value $outRows -Encoding UTF8 +$hash = Get-FileHash -Algorithm SHA256 -Path $Output | Select-Object -ExpandProperty Hash +Write-Output "Wrote $($outRows.Count) records to $Output (sha256: $hash)" + +# Mirror to S: module location if exists +$moduleOut = 'S:\NetworkBuster_Production\modules\datacentra\data\training.jsonl' +Ensure-Dir -p $moduleOut +Copy-Item -Path $Output -Destination $moduleOut -Force +Write-Output "Mirrored training dataset to $moduleOut" diff --git a/scripts/transform-recycling-data.ps1 b/scripts/transform-recycling-data.ps1 new file mode 100644 index 0000000..f0e1a96 --- /dev/null +++ b/scripts/transform-recycling-data.ps1 @@ -0,0 +1,17 @@ +<# +Transform raw recycling dataset (CSV/TSV) into JSONL for training or analysis. +Usage: .\transform-recycling-data.ps1 -Input data/raw.csv -Output data/recycling.jsonl +#> +param( + [Parameter(Mandatory=$true)][string]$Input, + [string]$Output = 'data/recycling.jsonl' +) + +if (-not (Test-Path $Input)) { Write-Error "Input not found: $Input"; exit 1 } + +Get-Content $Input | ConvertFrom-Csv | ForEach-Object { + $obj = @{ item = $_.Item; category = $_.Category; notes = $_.Notes } + $obj | ConvertTo-Json -Depth 5 +} | Out-File -FilePath $Output -Encoding utf8 + +Write-Output "Wrote $Output" diff --git a/scripts/update-materials-and-push.ps1 b/scripts/update-materials-and-push.ps1 new file mode 100644 index 0000000..e0551d0 --- /dev/null +++ b/scripts/update-materials-and-push.ps1 @@ -0,0 +1,36 @@ +Param( + [switch]$Push = $false +) + +$items = @( + 'Nitrile gloves', + 'N95 respirators or PAPRs', + 'Safety goggles / face shields', + 'Lint-free wipes (microfiber)', + 'Sterile swabs (foam tipped)', + 'Isopropyl alcohol (70%โ€“90%)', + 'Manufacturer-approved optical cleaning fluids', + 'HEPA portable air purifier', + 'UV-C lamp (supplementary only)', + 'Disposable gowns / shoe covers', + 'Sealable waste bags' +) + +$md = 'MATERIALS.md' +if (-not (Test-Path $md)) { throw "$md not found" } + +$content = Get-Content -Raw -Path $md +foreach ($it in $items) { + if ($content -notmatch [regex]::Escape($it)) { + Add-Content -Path $md -Value "- $it" + Write-Output "Added: $it" + } else { + Write-Output "Already present: $it" + } +} + +if ($Push) { + git add $md + git commit -m "docs: add sterilization supplies to MATERIALS.md" || Write-Output "No changes to commit" + git push origin HEAD || Write-Warning "Push failed" +} diff --git a/scripts/update-wsl.ps1 b/scripts/update-wsl.ps1 new file mode 100644 index 0000000..5b1ba90 --- /dev/null +++ b/scripts/update-wsl.ps1 @@ -0,0 +1,264 @@ +<# +.SYNOPSIS + Update packages in WSL distros from Windows (PowerShell script). + +.DESCRIPTION + This script enumerates available WSL distros and runs apt update/full-upgrade/autoremove inside each. + Run from an elevated PowerShell prompt. + +.PARAMETER Distro + Optional specific distro name. If omitted, all installed distros will be updated. + +.EXAMPLE + .\scripts\update-wsl.ps1 + Updates all WSL distros. + + .\scripts\update-wsl.ps1 -Distro ubuntu + Updates only the 'ubuntu' distro. +#> +[CmdletBinding()] +param( + [string]$Distro, + [switch]$DryRun, + [string]$WorkingDir, + [switch]$UseRoot, + [switch]$RegisterScheduledTask, + [string]$ScheduleTime = '03:00', # HH:mm (24h) local time + [switch]$SkipWSLUpdate, + [string]$WslPath, + [string]$LogDir +) # $LogDir: optional path to write logs (e.g., 'G:\cadil\logs') + +# Note: when running with -UseRoot the WSL commands will be executed as the root user +# (wsl -d -u root -- ) so sudo prompts inside the distro are skipped. + +# If a working directory is provided, switch to it (useful when running from a mounted drive like G:\kodak) +if ($WorkingDir) { + if (-not (Test-Path -Path $WorkingDir)) { + Write-Error "Working directory '$WorkingDir' does not exist." + exit 1 + } + Write-Host "Switching to working directory: $WorkingDir" -ForegroundColor Cyan + Set-Location -Path $WorkingDir +} + +# Setup logging to the provided LogDir (if any) +if ($LogDir) { + try { + if (-not (Test-Path -Path $LogDir)) { New-Item -ItemType Directory -Path $LogDir -Force | Out-Null } + $timestamp = Get-Date -Format 'yyyyMMdd-HHmmss' + $script:LogFile = Join-Path $LogDir "wsl-update-$timestamp.log" + Write-Host "Logging to: $script:LogFile" -ForegroundColor Cyan + if (-not $DryRun) { Start-Transcript -Path $script:LogFile -Force } + } catch { + Write-Warning "Could not create or write to LogDir '$LogDir': $_" + } +} else { + $script:LogFile = $null +} + +function Register-UpdateScheduledTask { + param( + [string]$TaskName = "WSL-Update", + [string]$RunTime = '03:00', + [switch]$UseRoot, + [string]$LogDir + ) + + if (-not (Get-Command Register-ScheduledTask -ErrorAction SilentlyContinue)) { + Write-Error "Scheduled Task cmdlets are not available on this system. Run on Windows 10/11 with required privileges." + exit 1 + } + + $scriptPath = (Get-Location).Path + '\\scripts\\update-wsl.ps1' + if (-not (Test-Path $scriptPath)) { + Write-Error "Cannot locate script at $scriptPath to register as scheduled task." + exit 1 + } + + $timeParts = $RunTime -split ':' + $trigger = New-ScheduledTaskTrigger -Daily -At (Get-Date -Hour [int]$timeParts[0] -Minute [int]$timeParts[1] -Second 0) + + # Include -UseRoot flag if requested + $useArg = '' + if ($UseRoot) { $useArg = ' -UseRoot' } + + # Include -LogDir if provided + $logArg = '' + if ($LogDir) { $logArg = " -LogDir `"$LogDir`"" } + + $action = New-ScheduledTaskAction -Execute 'PowerShell.exe' -Argument "-NoProfile -WindowStyle Hidden -ExecutionPolicy Bypass -File `"$scriptPath`"$useArg$logArg" + + # Register or update + if (Get-ScheduledTask -TaskName $TaskName -ErrorAction SilentlyContinue) { + Unregister-ScheduledTask -TaskName $TaskName -Confirm:$false + } + + Register-ScheduledTask -TaskName $TaskName -Trigger $trigger -Action $action -RunLevel Highest -Force + $runAsRootText = '' + if ($UseRoot) { $runAsRootText = ' (runs updates as root)' } + $logText = '' + if ($LogDir) { $logText = "; logs -> $LogDir" } + Write-Host "Scheduled task '$TaskName' created to run daily at $RunTime (script: $scriptPath)$runAsRootText$logText" -ForegroundColor Green +} + +function Run-UpdateInDistro { + param($name) + Write-Host "==> Updating distro: $name" -ForegroundColor Cyan + + $execUser = '' + if ($UseRoot) { $execUser = '-u root' } + + # Detect package manager inside the distro + $detectScript = 'if command -v apt >/dev/null 2>&1; then echo apt; elif command -v dnf >/dev/null 2>&1; then echo dnf; elif command -v pacman >/dev/null 2>&1; then echo pacman; elif command -v zypper >/dev/null 2>&1; then echo zypper; elif command -v apk >/dev/null 2>&1; then echo apk; else echo unknown; fi' + try { + $pkgmgr = & $wslCommand -d $name $execUser -- bash -lc "$detectScript" 2>$null + $pkgmgr = ($pkgmgr -join "`n").Trim() + } catch { + Write-Host "Could not detect package manager for $($name): $($_)" -ForegroundColor Yellow + return + } + + switch ($pkgmgr) { + 'apt' { $updateCmd = 'apt update && apt full-upgrade -y && apt autoremove -y' } + 'dnf' { $updateCmd = 'dnf check-update || true; dnf upgrade -y; dnf autoremove -y' } + 'pacman' { $updateCmd = 'pacman -Syu --noconfirm' } + 'zypper' { $updateCmd = 'zypper refresh && zypper update -y' } + 'apk' { $updateCmd = 'apk update && apk upgrade' } + default { $updateCmd = $null } + } + + if (-not $updateCmd) { + Write-Host "Could not detect a supported package manager in $name; skipping." -ForegroundColor Yellow + return + } + + # If not running as root inside the distro, prefix with sudo + if (-not $UseRoot) { $updateCmd = "sudo $updateCmd" } + + if ($DryRun) { + $wslDisplay = if ($wslCommand) { $wslCommand } else { 'wsl' } + Write-Host "Dry-run: $wslDisplay -d $name $execUser -- bash -lc '$updateCmd'" + return + } + + $timestamp = Get-Date -Format 'yyyyMMdd-HHmmss' + if ($script:LogFile) { + $distroLog = Join-Path (Split-Path $script:LogFile -Parent) "$($name)-$timestamp.log" + } elseif ($LogDir) { + if (-not (Test-Path $LogDir)) { New-Item -ItemType Directory -Path $LogDir -Force | Out-Null } + $distroLog = Join-Path $LogDir "$($name)-$timestamp.log" + } else { + $distroLog = $null + } + + try { + if ($UseRoot) { + $output = & $wslCommand -d $name -u root -- bash -lc "$updateCmd" 2>&1 + } else { + $output = & $wslCommand -d $name -- bash -lc "$updateCmd" 2>&1 + } + + if ($distroLog) { + $output | Out-File -FilePath $distroLog -Encoding utf8 + Write-Host "Log saved to: $distroLog" -ForegroundColor Cyan + } else { + Write-Host $output + } + + Write-Host "Finished update for $name" -ForegroundColor Green + } catch { + Write-Host "Update failed for $($name): $($_)" -ForegroundColor Red + if ($distroLog -and $output) { $output | Out-File -FilePath $distroLog -Append -Encoding utf8 } + } +} + +# Resolve wsl executable (honor -WslPath if provided) +$wslCommand = $null +if ($WslPath) { + if (Test-Path $WslPath) { + $wslCommand = $WslPath + } else { + Write-Warning "Provided WslPath '$WslPath' not found." + } +} + +if (-not $wslCommand) { + $cmd = Get-Command wsl -ErrorAction SilentlyContinue + if ($cmd) { $wslCommand = $cmd.Path } +} + +if (-not $wslCommand) { + $possible = @( + "$env:SystemRoot\System32\wsl.exe", + "$env:SystemRoot\Sysnative\wsl.exe", + "$env:SystemRoot\SysWOW64\wsl.exe", + "R:\\Windows\\System32\\wsl.exe", + "R:\\Windows\\Sysnative\\wsl.exe" + ) + foreach ($p in $possible) { + if (Test-Path $p) { $wslCommand = $p; break } + } +} + +if (-not $wslCommand) { + Write-Error "WSL executable not found. If WSL is installed in a non-standard location, provide its path with -WslPath. Example: -WslPath 'C:\\Windows\\System32\\wsl.exe'" + exit 1 +} + +Write-Host "Using WSL executable: $wslCommand" -ForegroundColor Cyan + +# If requested, register a scheduled task to run this script daily and exit +if ($RegisterScheduledTask) { + Register-UpdateScheduledTask -TaskName "WSL-Update" -RunTime $ScheduleTime -UseRoot:$UseRoot -LogDir $LogDir + exit 0 +} + +# Run WSL kernel/component update (unless explicitly skipped) +Write-Host "Checking WSL update status..." -ForegroundColor Cyan +if ($DryRun) { + Write-Host "Dry-run: $wslCommand --status" + if (-not $SkipWSLUpdate) { Write-Host "Dry-run: $wslCommand --update" } +} else { + try { + & $wslCommand --status + } catch { + Write-Warning "$wslCommand --status is not available or failed: $($_)" + } + if (-not $SkipWSLUpdate) { + try { + & $wslCommand --update + Write-Host "WSL components updated (if updates were available)." -ForegroundColor Green + } catch { + Write-Warning "$wslCommand --update failed or is not supported on this system: $($_)" + } + } else { + Write-Host "Skipping 'wsl --update' as requested." -ForegroundColor Yellow + } +} + +if ($Distro) { + # Update single distro + Run-UpdateInDistro -name $Distro + exit 0 +} + +# Get list of distros +$distroList = & $wslCommand -l -q 2>$null | Where-Object { $_ -ne '' } +if (-not $distroList) { + Write-Host "No WSL distros found." -ForegroundColor Yellow + exit 0 +} + +Write-Host "Found distros: $($distroList -join ', ')" -ForegroundColor Cyan +foreach ($d in $distroList) { + Run-UpdateInDistro -name $d +} + +Write-Host "All WSL updates attempted." -ForegroundColor Green + +# Stop logging transcript if it was started +if ($script:LogFile -and -not $DryRun) { + try { Stop-Transcript -ErrorAction SilentlyContinue } catch { } + Write-Host "Transcript saved to: $script:LogFile" -ForegroundColor Cyan +} diff --git a/scripts/update-wsl.sh b/scripts/update-wsl.sh new file mode 100644 index 0000000..42c96be --- /dev/null +++ b/scripts/update-wsl.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# Simple script to run inside WSL distro to update packages +set -euo pipefail + +echo "Running apt update && full-upgrade && autoremove inside WSL distro" +sudo apt update +sudo apt full-upgrade -y +sudo apt autoremove -y + +echo "Update complete" diff --git a/scripts/watchdog.ps1 b/scripts/watchdog.ps1 new file mode 100644 index 0000000..2a01ff4 --- /dev/null +++ b/scripts/watchdog.ps1 @@ -0,0 +1,89 @@ +<# +Simple watchdog to keep a command running and auto-restart on exit. +Usage example: + .\watchdog.ps1 -AppExe 'C:\Program Files\nodejs\node.exe' -AppArgs 'start-servers.js' -WorkingDir 'S:\NetworkBuster_Production' -LogDir 'S:\NetworkBuster_Production\logs' -HealthUrl 'http://localhost:3001/api/health' +#> +param( + [Parameter(Mandatory=$true)] [string]$AppExe, + [Parameter(Mandatory=$false)] [string]$AppArgs = '', + [string]$WorkingDir = '.', + [string]$LogDir = '.\logs', + [string]$HealthUrl = '', + [int]$HealthInterval = 30, + [int]$RestartBackoff = 5 +) + +# Ensure log dir +if (-not (Test-Path $LogDir)) { New-Item -ItemType Directory -Path $LogDir -Force | Out-Null } +$log = Join-Path $LogDir "watchdog.log" +function Log { param($m) $ts = (Get-Date).ToString('s'); "$ts - $m" | Out-File -FilePath $log -Append -Encoding utf8 } + +Log "Watchdog starting: $AppExe $AppArgs (cwd: $WorkingDir)" + +# Helper to start the app +function Start-App { + $out = Join-Path $LogDir "app.stdout.log" + $err = Join-Path $LogDir "app.stderr.log" + Log "Starting app: $AppExe $AppArgs" + $si = New-Object System.Diagnostics.ProcessStartInfo + $si.FileName = $AppExe + if ($AppArgs) { $si.Arguments = $AppArgs } + $si.WorkingDirectory = $WorkingDir + $si.RedirectStandardOutput = $true + $si.RedirectStandardError = $true + $si.UseShellExecute = $false + $si.CreateNoWindow = $true + + $proc = New-Object System.Diagnostics.Process + $proc.StartInfo = $si + $started = $proc.Start() + if ($started) { + # asynchronously read output + $proc.BeginOutputReadLine() + $proc.BeginErrorReadLine() + # Wire up events + Register-ObjectEvent -InputObject $proc -EventName Exited -Action { Log "Child process exited (code: $($Event.SourceEventArgs.ExitCode))" } | Out-Null + return $proc + } else { + Log "Failed to start process." + return $null + } +} + +# Health check function +function Check-Health { + param($url) + try { + $r = Invoke-WebRequest -Uri $url -UseBasicParsing -TimeoutSec 10 -ErrorAction Stop + if ($r.StatusCode -ge 200 -and $r.StatusCode -lt 300) { return $true } else { return $false } + } catch { + return $false + } +} + +while ($true) { + $proc = Start-App + if (-not $proc) { Log "Start failed; sleeping $RestartBackoff seconds"; Start-Sleep -Seconds $RestartBackoff; continue } + + # Monitor loop: health checks + process exit + while (-not $proc.HasExited) { + Start-Sleep -Seconds 1 + if ($HealthUrl) { + try { + if (-not (Check-Health -url $HealthUrl)) { + Log "Health check failed for $HealthUrl - restarting app" + try { $proc.Kill() } catch {} + break + } + } catch { + # ignored + } + Start-Sleep -Seconds $HealthInterval + } + } + + $code = $null + try { $code = $proc.ExitCode } catch {} + Log "Process ended with exit code: $code. Backing off for $RestartBackoff seconds before restart." + Start-Sleep -Seconds $RestartBackoff +} diff --git a/security_verification.py b/security_verification.py new file mode 100644 index 0000000..697b0ba --- /dev/null +++ b/security_verification.py @@ -0,0 +1,412 @@ +#!/usr/bin/env python3 +""" +NetworkBuster Security Verification System +Multi-layer authentication, access logging, and intrusion detection +""" + +import os +import sys +import json +import hashlib +import time +import getpass +import platform +from datetime import datetime +from pathlib import Path + +# Security Configuration +SECURITY_DIR = Path(__file__).parent / ".security" +USERS_FILE = SECURITY_DIR / "users.json" +ACCESS_LOG = SECURITY_DIR / "access.log" +ALERT_LOG = SECURITY_DIR / "alerts.log" +SESSION_FILE = SECURITY_DIR / "active_session.json" + +# Failed login lockout +MAX_FAILED_ATTEMPTS = 3 +LOCKOUT_DURATION = 300 # 5 minutes + +class SecurityLevel: + """Security clearance levels.""" + VISITOR = 1 # Read-only + USER = 2 # Standard operations + OPERATOR = 3 # Advanced operations + ADMIN = 4 # Full system control + ROOT = 5 # Unrestricted access + +class UserVerification: + """Handles user authentication and verification.""" + + def __init__(self): + self._ensure_security_dir() + self._load_users() + self.failed_attempts = {} + self.active_session = None + + def _ensure_security_dir(self): + """Create security directory if it doesn't exist.""" + SECURITY_DIR.mkdir(exist_ok=True) + + # Set restrictive permissions on Windows + if platform.system() == "Windows": + try: + import subprocess + subprocess.run([ + "icacls", str(SECURITY_DIR), + "/inheritance:r", "/grant:r", f"{os.getlogin()}:F" + ], capture_output=True) + except: + pass + + def _load_users(self): + """Load user database.""" + if USERS_FILE.exists(): + with open(USERS_FILE, 'r') as f: + self.users = json.load(f) + else: + # Create default admin user + self.users = { + "admin": { + "password_hash": self._hash_password("admin123"), + "level": SecurityLevel.ADMIN, + "created": datetime.now().isoformat(), + "last_login": None, + "mfa_enabled": False + } + } + self._save_users() + + def _save_users(self): + """Save user database.""" + with open(USERS_FILE, 'w') as f: + json.dump(self.users, f, indent=2) + + def _hash_password(self, password): + """Secure password hashing with salt.""" + salt = "networkbuster_salt_2026" # In production, use random salt per user + return hashlib.sha256(f"{password}{salt}".encode()).hexdigest() + + def _log_access(self, username, action, success, details=""): + """Log all access attempts.""" + timestamp = datetime.now().isoformat() + status = "SUCCESS" if success else "FAILED" + + log_entry = f"[{timestamp}] {status} | User: {username} | Action: {action} | {details}\n" + + with open(ACCESS_LOG, 'a') as f: + f.write(log_entry) + + # Alert on failed attempts + if not success and action == "LOGIN": + self._log_alert(f"Failed login attempt for user: {username}") + + def _log_alert(self, message): + """Log security alerts.""" + timestamp = datetime.now().isoformat() + alert = f"[{timestamp}] ALERT: {message}\n" + + with open(ALERT_LOG, 'a') as f: + f.write(alert) + + print(f"๐Ÿšจ SECURITY ALERT: {message}") + + def _is_locked_out(self, username): + """Check if user is locked out due to failed attempts.""" + if username not in self.failed_attempts: + return False, 0 + + attempts, last_attempt = self.failed_attempts[username] + + if attempts >= MAX_FAILED_ATTEMPTS: + time_since = time.time() - last_attempt + if time_since < LOCKOUT_DURATION: + remaining = int(LOCKOUT_DURATION - time_since) + return True, remaining + else: + # Reset after lockout duration + del self.failed_attempts[username] + + return False, 0 + + def _record_failed_attempt(self, username): + """Record failed login attempt.""" + if username not in self.failed_attempts: + self.failed_attempts[username] = [0, 0] + + self.failed_attempts[username][0] += 1 + self.failed_attempts[username][1] = time.time() + + attempts = self.failed_attempts[username][0] + + if attempts >= MAX_FAILED_ATTEMPTS: + self._log_alert(f"Account locked: {username} (too many failed attempts)") + + def authenticate(self, username=None, password=None, interactive=True): + """Authenticate user with multi-factor verification.""" + + if interactive: + print("\n" + "โ•" * 60) + print(" ๐Ÿ”’ NETWORKBUSTER SECURITY VERIFICATION") + print("โ•" * 60) + + if username is None: + username = input("\n Username: ").strip() + + if password is None: + password = getpass.getpass(" Password: ") + + # Check if user exists + if username not in self.users: + self._log_access(username, "LOGIN", False, "User not found") + if interactive: + print("\n โŒ Authentication failed: Invalid credentials") + return False, None + + # Check lockout status + locked, remaining = self._is_locked_out(username) + if locked: + self._log_access(username, "LOGIN", False, f"Account locked ({remaining}s remaining)") + if interactive: + print(f"\n ๐Ÿ”’ Account locked. Try again in {remaining} seconds.") + return False, None + + # Verify password + user_data = self.users[username] + password_hash = self._hash_password(password) + + if password_hash != user_data["password_hash"]: + self._record_failed_attempt(username) + self._log_access(username, "LOGIN", False, "Invalid password") + + attempts = self.failed_attempts.get(username, [0])[0] + remaining_attempts = MAX_FAILED_ATTEMPTS - attempts + + if interactive: + print(f"\n โŒ Authentication failed: Invalid credentials") + if remaining_attempts > 0: + print(f" โš ๏ธ {remaining_attempts} attempts remaining") + + return False, None + + # Clear failed attempts on success + if username in self.failed_attempts: + del self.failed_attempts[username] + + # Update last login + user_data["last_login"] = datetime.now().isoformat() + self._save_users() + + # Create session + session = { + "username": username, + "level": user_data["level"], + "login_time": datetime.now().isoformat(), + "host": platform.node(), + "platform": platform.system() + } + + self.active_session = session + self._save_session(session) + + self._log_access(username, "LOGIN", True, f"Security Level: {user_data['level']}") + + if interactive: + print("\n โœ… Authentication successful!") + print(f" ๐Ÿ‘ค User: {username}") + print(f" ๐Ÿ”‘ Security Level: {user_data['level']}") + print(f" ๐Ÿ• Login: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") + + return True, session + + def _save_session(self, session): + """Save active session.""" + with open(SESSION_FILE, 'w') as f: + json.dump(session, f, indent=2) + + def load_session(self): + """Load existing session if valid.""" + if not SESSION_FILE.exists(): + return None + + try: + with open(SESSION_FILE, 'r') as f: + session = json.load(f) + + # Check if session is recent (last 24 hours) + login_time = datetime.fromisoformat(session["login_time"]) + if (datetime.now() - login_time).total_seconds() > 86400: + return None + + self.active_session = session + return session + except: + return None + + def logout(self): + """End active session.""" + if self.active_session: + self._log_access( + self.active_session["username"], + "LOGOUT", + True, + "Session ended" + ) + self.active_session = None + if SESSION_FILE.exists(): + SESSION_FILE.unlink() + print("\n ๐Ÿ”“ Session ended") + + def add_user(self, username, password, level=SecurityLevel.USER): + """Add new user (requires admin).""" + if self.active_session and self.active_session["level"] < SecurityLevel.ADMIN: + print(" โŒ Permission denied: Admin access required") + return False + + if username in self.users: + print(f" โš ๏ธ User '{username}' already exists") + return False + + self.users[username] = { + "password_hash": self._hash_password(password), + "level": level, + "created": datetime.now().isoformat(), + "last_login": None, + "mfa_enabled": False + } + + self._save_users() + self._log_access(self.active_session["username"], "USER_CREATE", True, f"Created user: {username}") + print(f" โœ… User '{username}' created with security level {level}") + return True + + def change_password(self, username, old_password, new_password): + """Change user password.""" + if username not in self.users: + return False + + # Verify old password + old_hash = self._hash_password(old_password) + if old_hash != self.users[username]["password_hash"]: + self._log_access(username, "PASSWORD_CHANGE", False, "Old password incorrect") + return False + + # Set new password + self.users[username]["password_hash"] = self._hash_password(new_password) + self._save_users() + + self._log_access(username, "PASSWORD_CHANGE", True, "Password updated") + print(f" โœ… Password changed for '{username}'") + return True + + def require_level(self, required_level): + """Check if active session meets required security level.""" + if not self.active_session: + print(" โŒ No active session. Please login first.") + return False + + if self.active_session["level"] < required_level: + print(f" โŒ Insufficient privileges. Required level: {required_level}") + return False + + return True + + def view_access_log(self, lines=20): + """View recent access log entries.""" + if not self.require_level(SecurityLevel.OPERATOR): + return + + if not ACCESS_LOG.exists(): + print(" ๐Ÿ“‹ No access log available") + return + + print("\n" + "โ”€" * 80) + print(" ๐Ÿ“‹ ACCESS LOG (Last {} entries)".format(lines)) + print("โ”€" * 80) + + with open(ACCESS_LOG, 'r') as f: + log_lines = f.readlines() + + for line in log_lines[-lines:]: + print(f" {line.strip()}") + + print("โ”€" * 80) + + def view_alerts(self): + """View security alerts.""" + if not self.require_level(SecurityLevel.ADMIN): + return + + if not ALERT_LOG.exists(): + print(" โœ… No security alerts") + return + + print("\n" + "โ”€" * 80) + print(" ๐Ÿšจ SECURITY ALERTS") + print("โ”€" * 80) + + with open(ALERT_LOG, 'r') as f: + for line in f: + print(f" {line.strip()}") + + print("โ”€" * 80) + +def security_menu(): + """Interactive security management menu.""" + verifier = UserVerification() + + # Try to load existing session + session = verifier.load_session() + if session: + print(f"\n โ™ป๏ธ Resuming session for: {session['username']}") + else: + # Require login + success, session = verifier.authenticate() + if not success: + print("\n โŒ Authentication failed. Exiting.") + sys.exit(1) + + while True: + print("\n" + "โ”€" * 60) + print(" ๐Ÿ” SECURITY MANAGEMENT") + print("โ”€" * 60) + print(f" ๐Ÿ‘ค Logged in as: {session['username']} (Level {session['level']})") + print("โ”€" * 60) + print(" [1] View Access Log") + print(" [2] View Security Alerts") + print(" [3] Add User") + print(" [4] Change Password") + print(" [5] Logout") + print(" [0] Exit") + print("โ”€" * 60) + + choice = input("\n Select option: ").strip() + + if choice == "1": + verifier.view_access_log() + elif choice == "2": + verifier.view_alerts() + elif choice == "3": + if verifier.require_level(SecurityLevel.ADMIN): + username = input(" New username: ").strip() + password = getpass.getpass(" Password: ") + level = int(input(f" Security level (1-5): ").strip()) + verifier.add_user(username, password, level) + elif choice == "4": + username = session['username'] + old_pw = getpass.getpass(" Current password: ") + new_pw = getpass.getpass(" New password: ") + confirm = getpass.getpass(" Confirm password: ") + if new_pw == confirm: + verifier.change_password(username, old_pw, new_pw) + else: + print(" โŒ Passwords don't match") + elif choice == "5": + verifier.logout() + print(" ๐Ÿ‘‹ Goodbye!") + break + elif choice == "0": + break + else: + print(" โš ๏ธ Invalid option") + +if __name__ == "__main__": + security_menu() diff --git a/server-universal.js b/server-universal.js index ea75ac7..962a6eb 100644 --- a/server-universal.js +++ b/server-universal.js @@ -234,7 +234,7 @@ app.post('/api/toggle/:feature', requireAdmin, (req, res) => { // Control panel route with music player and equalizer app.get('/control-panel', (req, res) => { res.set('Cache-Control', 'public, max-age=3600'); - res.send(`NetworkBuster Control Panel

    Control Panel

    NetworkBuster Operational Dashboard

    Status
    Running
    Uptime
    0s
    Requests
    0
    Now Playing: Rocketman ๐Ÿš€
    30%
    ๐ŸŽ›๏ธ Equalizer
    0dB
    0dB
    0dB
    0dB
    0dB
    Controls
    Loading logs...
    `); + res.send(`NetworkBuster Control Panel

    Control Panel

    NetworkBuster Operational Dashboard

    Status
    Running
    Uptime
    0s
    Requests
    0
    Now Playing: Rocketman ๐Ÿš€
    30%
    ๐ŸŽ›๏ธ Equalizer
    0dB
    0dB
    0dB
    0dB
    0dB
    Controls
    Loading logs...
    `); }); // Serve static files (if they exist) diff --git a/server.js b/server.js index 90b4047..117b429 100644 --- a/server.js +++ b/server.js @@ -58,6 +58,47 @@ app.get('/api/health', (req, res) => { }); }); +// Recycle API (AI-powered recommendations) +import recycleRouter from './api/recycle.js'; +app.use('/api/recycle', recycleRouter); + +// Device registration API (Device -> ingestion pipeline) +import devicesRouter from './api/devices.js'; +app.use('/api/devices', devicesRouter); + +// Mock ingestion endpoint for testing (simulates neural network ingestion) +app.post('/api/ingestion/mock', (req, res) => { + const payload = req.body; + console.log('Mock ingestion received:', payload); + + // Simulate processing delay (neural network inference time) + setTimeout(() => { + // Simulate occasional failures for testing + const shouldFail = Math.random() < 0.1; // 10% failure rate + + if (shouldFail) { + console.log('Mock ingestion failed for device:', payload.deviceId); + res.status(500).json({ + status: 'failed', + deviceId: payload.deviceId, + error: 'Neural network processing failed', + timestamp: new Date().toISOString() + }); + } else { + console.log('Mock ingestion processed device:', payload.deviceId); + res.json({ + status: 'acknowledged', + deviceId: payload.deviceId, + modelVersion: 'v1.0.0', + confidence: Math.random() * 0.3 + 0.7, // 0.7-1.0 + processingTimeMs: Math.floor(Math.random() * 500) + 100, + timestamp: new Date().toISOString(), + message: 'Device registration ingested into neural network pipeline' + }); + } + }, Math.floor(Math.random() * 1000) + 500); // 500-1500ms delay +}); + // Get system status app.get('/api/status', (req, res) => { res.json({ @@ -160,7 +201,8 @@ app.get('/api/dashboard/services', (req, res) => { { name: 'Cache (Redis)', status: 'healthy', uptime: '100%', responseTime: '2ms' }, { name: 'Message Queue', status: 'healthy', uptime: '99.98%', responseTime: '8ms' }, { name: 'Search Engine', status: 'healthy', uptime: '99.90%', responseTime: '78ms' }, - { name: 'Background Jobs', status: 'warning', uptime: '99.85%', responseTime: '234ms' } + { name: 'Background Jobs', status: 'warning', uptime: '99.85%', responseTime: '234ms' }, + { name: 'luna.eu External', status: 'healthy', uptime: '99.99%', responseTime: '42ms' } ]; res.json(services); }); @@ -183,11 +225,11 @@ app.get('/api/dashboard/activity', (req, res) => { // In-memory secrets storage const secretsStore = [ { id: '1', name: 'github_token', environment: 'production', status: 'active', created: new Date(Date.now() - 86400000), expires: null, masked: '****...e3k9' }, - { id: '2', name: 'api_key_stripe', environment: 'production', status: 'active', created: new Date(Date.now() - 172800000), expires: new Date(Date.now() + 30*86400000), masked: '****...x8p2' }, + { id: '2', name: 'api_key_stripe', environment: 'production', status: 'active', created: new Date(Date.now() - 172800000), expires: new Date(Date.now() + 30 * 86400000), masked: '****...x8p2' }, { id: '3', name: 'db_password', environment: 'production', status: 'active', created: new Date(Date.now() - 259200000), expires: null, masked: '****...q9l1' }, - { id: '4', name: 'auth_secret', environment: 'staging', status: 'active', created: new Date(Date.now() - 7*86400000), expires: new Date(Date.now() - 86400000), masked: '****...m6v4' }, - { id: '5', name: 'api_key_aws', environment: 'production', status: 'expiring', created: new Date(Date.now() - 340*86400000), expires: new Date(Date.now() + 5*86400000), masked: '****...f2j7' }, - { id: '6', name: 'backup_key', environment: 'dev', status: 'active', created: new Date(Date.now() - 14*86400000), expires: null, masked: '****...z1o3' } + { id: '4', name: 'auth_secret', environment: 'staging', status: 'active', created: new Date(Date.now() - 7 * 86400000), expires: new Date(Date.now() - 86400000), masked: '****...m6v4' }, + { id: '5', name: 'api_key_aws', environment: 'production', status: 'expiring', created: new Date(Date.now() - 340 * 86400000), expires: new Date(Date.now() + 5 * 86400000), masked: '****...f2j7' }, + { id: '6', name: 'backup_key', environment: 'dev', status: 'active', created: new Date(Date.now() - 14 * 86400000), expires: null, masked: '****...z1o3' } ]; // Get all secrets (masked) @@ -226,7 +268,7 @@ app.post('/api/secrets', (req, res) => { if (!name || !environment) { return res.status(400).json({ error: 'Name and environment required' }); } - + const newSecret = { id: Date.now().toString(), name, @@ -236,11 +278,11 @@ app.post('/api/secrets', (req, res) => { expires: expiresInDays ? new Date(Date.now() + expiresInDays * 86400000) : null, masked: '****...' + Math.random().toString(36).substr(2, 4).toLowerCase() }; - + secretsStore.push(newSecret); appState.lastAction = `Secret created: ${name}`; addLog('Secret created', name); - + res.status(201).json({ id: newSecret.id, name: newSecret.name, @@ -253,13 +295,13 @@ app.post('/api/secrets', (req, res) => { app.patch('/api/secrets/:id', (req, res) => { const secret = secretsStore.find(s => s.id === req.params.id); if (!secret) return res.status(404).json({ error: 'Secret not found' }); - + const { status } = req.body; if (status) secret.status = status; - + appState.lastAction = `Secret updated: ${secret.name}`; addLog('Secret updated', secret.name); - + res.json({ message: 'Secret updated', id: secret.id, status: secret.status }); }); @@ -267,11 +309,11 @@ app.patch('/api/secrets/:id', (req, res) => { app.delete('/api/secrets/:id', (req, res) => { const index = secretsStore.findIndex(s => s.id === req.params.id); if (index === -1) return res.status(404).json({ error: 'Secret not found' }); - + const deleted = secretsStore.splice(index, 1)[0]; appState.lastAction = `Secret deleted: ${deleted.name}`; addLog('Secret deleted', deleted.name); - + res.json({ message: 'Secret deleted', id: deleted.id, name: deleted.name }); }); @@ -289,7 +331,7 @@ app.get('/api/secrets/validate/expiring', (req, res) => { const daysLeft = (s.expires - Date.now()) / 86400000; return daysLeft <= 30 && daysLeft > 0; }); - + res.json({ expiringCount: expiring.count, expiringSoon: expiring, @@ -423,7 +465,7 @@ app.get('/', (req, res) => { // Serve static files app.use('/blog', express.static(path.join(__dirname, 'blog'))); -app.use('/dashboard', express.static(path.join(__dirname, 'dashboard/dist'))); +app.use('/dashboard', express.static(path.join(__dirname, 'dashboard/dist'))); app.use('/overlay', express.static(path.join(__dirname, 'challengerepo/real-time-overlay/dist'))); app.use('/static', express.static(path.join(__dirname, 'web-app'))); diff --git a/service_manager.py b/service_manager.py new file mode 100644 index 0000000..a440c3d --- /dev/null +++ b/service_manager.py @@ -0,0 +1,239 @@ +#!/usr/bin/env python3 +""" +NetworkBuster Service Manager +Manage Windows services and scheduled tasks with admin privileges +""" + +import ctypes +import subprocess +import sys +import json +from pathlib import Path +from datetime import datetime + +PROJECT_PATH = Path(__file__).parent.resolve() +SERVICE_CONFIG = PROJECT_PATH / "service-config.json" + + +def is_admin(): + """Check if running as administrator.""" + try: + return ctypes.windll.shell32.IsUserAnAdmin() + except: + return False + + +def require_admin(func): + """Decorator to ensure admin privileges.""" + def wrapper(*args, **kwargs): + if not is_admin(): + print("โœ— This operation requires Administrator privileges") + print(" Run this script as Administrator") + sys.exit(1) + return func(*args, **kwargs) + return wrapper + + +def run_powershell(command, capture=True): + """Run a PowerShell command and return output.""" + result = subprocess.run( + ["powershell", "-NoProfile", "-Command", command], + capture_output=capture, + text=True + ) + return result + + +class ServiceManager: + """Manage NetworkBuster services.""" + + def __init__(self): + self.services = { + "NetworkBusterWeb": { + "display": "NetworkBuster Web Server", + "port": 3000, + "script": "server-universal.js" + }, + "NetworkBusterAPI": { + "display": "NetworkBuster API Server", + "port": 3001, + "script": "api/server-universal.js" + }, + "NetworkBusterAudio": { + "display": "NetworkBuster Audio Server", + "port": 3002, + "script": "server-audio.js" + } + } + + def list_services(self): + """List all NetworkBuster-related services.""" + print("\n๐Ÿ“‹ NetworkBuster Services Status:") + print("-" * 50) + + for name, info in self.services.items(): + # Check if port is in use + port_check = run_powershell( + f"Get-NetTCPConnection -LocalPort {info['port']} -ErrorAction SilentlyContinue" + ) + status = "๐ŸŸข Running" if port_check.stdout.strip() else "๐Ÿ”ด Stopped" + print(f" {info['display']}") + print(f" Port: {info['port']} - {status}") + print(f" Script: {info['script']}") + print() + + @require_admin + def create_scheduled_task(self, task_name, script_path, trigger="startup"): + """Create a Windows scheduled task for auto-start.""" + script_full = PROJECT_PATH / script_path + + if trigger == "startup": + trigger_cmd = "-AtStartup" + elif trigger == "daily": + trigger_cmd = "-Daily -At 6am" + else: + trigger_cmd = "-AtStartup" + + ps_command = f''' +$action = New-ScheduledTaskAction -Execute "node" -Argument "{script_full}" -WorkingDirectory "{PROJECT_PATH}" +$trigger = New-ScheduledTaskTrigger {trigger_cmd} +$principal = New-ScheduledTaskPrincipal -UserId "SYSTEM" -LogonType ServiceAccount -RunLevel Highest +$settings = New-ScheduledTaskSettingsSet -AllowStartIfOnBatteries -DontStopIfGoingOnBatteries -StartWhenAvailable + +Register-ScheduledTask -TaskName "{task_name}" -Action $action -Trigger $trigger -Principal $principal -Settings $settings -Force +''' + + print(f"๐Ÿ“Œ Creating scheduled task: {task_name}") + result = run_powershell(ps_command) + + if result.returncode == 0: + print(f"โœ“ Task '{task_name}' created successfully") + return True + else: + print(f"โœ— Failed to create task: {result.stderr}") + return False + + @require_admin + def remove_scheduled_task(self, task_name): + """Remove a scheduled task.""" + result = run_powershell(f'Unregister-ScheduledTask -TaskName "{task_name}" -Confirm:$false') + + if result.returncode == 0: + print(f"โœ“ Task '{task_name}' removed") + return True + else: + print(f"โš  Task not found or already removed") + return False + + def list_scheduled_tasks(self): + """List NetworkBuster scheduled tasks.""" + print("\n๐Ÿ“… NetworkBuster Scheduled Tasks:") + print("-" * 50) + + result = run_powershell('Get-ScheduledTask | Where-Object {$_.TaskName -like "*NetworkBuster*"} | Format-Table TaskName, State -AutoSize') + + if result.stdout.strip(): + print(result.stdout) + else: + print(" No NetworkBuster tasks found") + + @require_admin + def open_firewall_port(self, port, name): + """Open a firewall port for a service.""" + ps_command = f''' +New-NetFirewallRule -DisplayName "{name}" -Direction Inbound -Protocol TCP -LocalPort {port} -Action Allow +''' + result = run_powershell(ps_command) + + if result.returncode == 0: + print(f"โœ“ Firewall port {port} opened for {name}") + return True + else: + print(f"โœ— Failed to open port: {result.stderr}") + return False + + @require_admin + def setup_all_firewall_rules(self): + """Set up firewall rules for all services.""" + print("\n๐Ÿ”ฅ Setting up firewall rules...") + + for name, info in self.services.items(): + self.open_firewall_port(info['port'], info['display']) + + def check_ports(self): + """Check which ports are in use.""" + print("\n๐Ÿ”Œ Port Status:") + print("-" * 50) + + ports = [3000, 3001, 3002, 3003, 8080] + + for port in ports: + result = run_powershell( + f"Get-NetTCPConnection -LocalPort {port} -ErrorAction SilentlyContinue | Select-Object -First 1 OwningProcess" + ) + + if result.stdout.strip() and "OwningProcess" in result.stdout: + # Get process name + pid_result = run_powershell( + f"(Get-NetTCPConnection -LocalPort {port} -ErrorAction SilentlyContinue | Select-Object -First 1).OwningProcess" + ) + pid = pid_result.stdout.strip() + if pid: + proc_result = run_powershell(f"(Get-Process -Id {pid} -ErrorAction SilentlyContinue).ProcessName") + proc_name = proc_result.stdout.strip() or "unknown" + print(f" Port {port}: ๐ŸŸข In use by {proc_name} (PID: {pid})") + else: + print(f" Port {port}: ๐ŸŸข In use") + else: + print(f" Port {port}: โšช Available") + + +def main(): + """Main menu for service management.""" + manager = ServiceManager() + + print("=" * 60) + print(" NetworkBuster Service Manager") + print("=" * 60) + + admin_status = "โœ“ Administrator" if is_admin() else "โš  Standard User" + print(f" Status: {admin_status}") + print() + + while True: + print("\n๐Ÿ“‹ Menu:") + print(" 1. List services status") + print(" 2. Check port usage") + print(" 3. List scheduled tasks") + print(" 4. Create startup task (requires admin)") + print(" 5. Setup firewall rules (requires admin)") + print(" 6. Exit") + print() + + choice = input("Select option (1-6): ").strip() + + if choice == "1": + manager.list_services() + elif choice == "2": + manager.check_ports() + elif choice == "3": + manager.list_scheduled_tasks() + elif choice == "4": + if is_admin(): + manager.create_scheduled_task("NetworkBusterServers", "start-servers.js", "startup") + else: + print("โš  Please run as Administrator for this option") + elif choice == "5": + if is_admin(): + manager.setup_all_firewall_rules() + else: + print("โš  Please run as Administrator for this option") + elif choice == "6": + print("๐Ÿ‘‹ Goodbye!") + break + else: + print("Invalid option") + + +if __name__ == "__main__": + main() diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..cfd7c68 --- /dev/null +++ b/setup.py @@ -0,0 +1,61 @@ +""" +NetworkBuster Setup Script +Build and install NetworkBuster as a Windows application +""" + +from setuptools import setup, find_packages +import os +import sys + +# Read version from config +VERSION = "1.0.1" +DESCRIPTION = "NetworkBuster - Complete Network Management Suite" +LONG_DESCRIPTION = """ +NetworkBuster is a comprehensive network management suite featuring: +- Real-time network monitoring and topology mapping +- API endpoint tracing and performance analysis +- Mission control dashboard +- Audio streaming server +- Universal launcher with scheduled deployment +- Maximum power production optimization +""" + +# Requirements +REQUIREMENTS = [ + 'flask>=3.0.0', + 'flask-cors>=4.0.0', + 'requests>=2.31.0', + 'psutil>=5.9.0', + 'schedule>=1.2.0', +] + +setup( + name='networkbuster', + version=VERSION, + author='NetworkBuster Team', + author_email='admin@networkbuster.net', + description=DESCRIPTION, + long_description=LONG_DESCRIPTION, + long_description_content_type='text/markdown', + url='https://networkbuster.net', + packages=find_packages(), + install_requires=REQUIREMENTS, + classifiers=[ + 'Development Status :: 4 - Beta', + 'Intended Audience :: System Administrators', + 'Topic :: System :: Networking :: Monitoring', + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python :: 3.14', + 'Operating System :: Microsoft :: Windows', + ], + python_requires='>=3.10', + entry_points={ + 'console_scripts': [ + 'networkbuster=networkbuster_launcher:main', + 'networkbuster-map=network_map_viewer:main', + 'networkbuster-tracer=api_tracer:main', + ], + }, + include_package_data=True, + zip_safe=False, +) diff --git a/spaceship-3d-blueprints/DRONE-SYSTEM-SPECS.md b/spaceship-3d-blueprints/DRONE-SYSTEM-SPECS.md new file mode 100644 index 0000000..301622f --- /dev/null +++ b/spaceship-3d-blueprints/DRONE-SYSTEM-SPECS.md @@ -0,0 +1,47 @@ +# DRONE SWARM & SCAN ALGORITHM SPECIFICATIONS + +## Overview +This document outlines the technical specifications for the "Unbreakable" Drone Flight System (DFS) designed for the NetworkBuster ecosystem. The system prioritizes fault tolerance, real-time matter analysis, and autonomous pattern generation. + +## 1. Flight Algorithms + +### 1.1 Spiral Search (Algorithm: `SPIRAL_ALPHA`) +- **Purpose:** Rapid area coverage expanding from a central point of interest. +- **Math:** Archimedean spiral $r = a + b\theta$. +- **Application:** Used when searching for a signal source or anomaly with unknown exact coordinates but known general vicinity. + +### 1.2 Grid Raster (Algorithm: `GRID_BETA`) +- **Purpose:** 100% coverage mapping of a defined sector. +- **Logic:** Alternating directional passes (Boustrophedon path). +- **Application:** Geological surveys, matter density mapping, and perimeter security sweeps. + +## 2. Matter Scan Technology + +The drone fleet utilizes a multi-spectral sensor array to analyze matter in real-time. + +| Material Class | Spectral Signature Range | Response Action | +|----------------|--------------------------|-----------------| +| SILICA | 0.8 - 0.9 | Log & Continue | +| FERROUS | 0.4 - 0.6 | Mark for Mining | +| ORGANIC | 0.1 - 0.3 | Avoid / Alert | +| ANOMALY | < 0.1 or > 0.9 | **IMMEDIATE HALT & SCAN** | + +## 3. "Unbreakable" Software Architecture + +To ensure mission success in hostile or high-interference environments (e.g., lunar surface, radiation zones), the software implements **Triple Modular Redundancy (TMR)**. + +### 3.1 Self-Healing Loops +The `UnbreakableAutopilot` class runs a background watchdog thread that monitors: +1. **Memory Integrity:** Checks for bit-flips caused by radiation. +2. **Process Liveness:** Restarts hung threads within 50ms. +3. **Sensor Variance:** Discards outlier data from damaged sensors. + +### 3.2 Error Injection & Recovery +The system is designed to assume failure. +- **Turbulence Compensation:** Gyroscopic stabilization logic runs at 400Hz. +- **Battery Failsafe:** Auto-RTH (Return to Home) triggers at 20% capacity (hard-coded, cannot be overridden by user commands). + +## 4. Deployment +- **Platform:** Compatible with NBS-1 Spacecraft deployment bays. +- **Control:** Autonomous or via `drone_flight_system.py` console. +- **Link:** Subspace relay to Cloud One Orbital Station. diff --git a/spaceship-3d-blueprints/README.md b/spaceship-3d-blueprints/README.md new file mode 100644 index 0000000..5f89096 --- /dev/null +++ b/spaceship-3d-blueprints/README.md @@ -0,0 +1,280 @@ +# NetworkBuster Space Infrastructure +## ๐Ÿš€ Spaceship 3D Blueprints & Plans + +Welcome to the NetworkBuster Space Division technical documentation. This directory contains complete specifications for our extraterrestrial network infrastructure. + +--- + +## ๐Ÿ“ Directory Structure + +``` +spaceship-3d-blueprints/ +โ”‚ +โ”œโ”€โ”€ moonbase-alpha/ ๐ŸŒ• Lunar Data Center +โ”‚ โ”œโ”€โ”€ README.md - Complete specifications +โ”‚ โ”œโ”€โ”€ structural/ - Construction blueprints +โ”‚ โ”œโ”€โ”€ electrical/ - Power distribution +โ”‚ โ”œโ”€โ”€ network/ - Data center layout +โ”‚ โ””โ”€โ”€ life-support/ - ECLSS diagrams +โ”‚ +โ”œโ”€โ”€ spacecraft/ ๐Ÿš€ NBS-1 "Data Voyager" +โ”‚ โ”œโ”€โ”€ NBS-1-SPECS.md - Full technical specs +โ”‚ โ”œโ”€โ”€ structural/ - Airframe design +โ”‚ โ”œโ”€โ”€ propulsion/ - Engine schematics +โ”‚ โ”œโ”€โ”€ avionics/ - Flight computers +โ”‚ โ””โ”€โ”€ interior/ - Habitat layout +โ”‚ +โ”œโ”€โ”€ orbital-station/ ๐Ÿ›ฐ๏ธ Cloud One LEO Station +โ”‚ โ”œโ”€โ”€ CLOUD-ONE-SPECS.md - Station specifications +โ”‚ โ”œโ”€โ”€ structural/ - Module designs +โ”‚ โ”œโ”€โ”€ data-center/ - Server rack layouts +โ”‚ โ”œโ”€โ”€ thermal/ - Cooling systems +โ”‚ โ””โ”€โ”€ power/ - Solar/battery systems +โ”‚ +โ””โ”€โ”€ README.md โ† You are here +``` + +--- + +## ๐ŸŒŒ NetworkBuster Space Network + +### Infrastructure Overview + +| Facility | Type | Status | Purpose | +|----------|------|--------|---------| +| **Moonbase Alpha** | Lunar Surface | Planned 2027 | Primary data center, 50 PF compute | +| **Cloud One Station** | LEO (550km) | Operational 2026 | Edge computing, 15 PF, <10ms latency | +| **NBS-1 Spacecraft** | Transport | Testing 2026 | Cargo/crew to Moon, 25T payload | +| **Satellite Constellation** | 500 sats | Deploying | Global mesh network | + +--- + +## ๐ŸŽฏ Mission Statement + +**"Delivering Data to the Final Frontier"** + +NetworkBuster's space infrastructure provides: +1. **Ultra-low latency** - Orbital edge computing (<10ms to 95% of Earth) +2. **Extreme redundancy** - Off-planet backup for critical data +3. **Global coverage** - No dead zones, pole-to-pole connectivity +4. **Future-proof** - Mars relay capability, interplanetary network ready + +--- + +## ๐Ÿ“Š Key Specifications + +### Moonbase Alpha +- **Location:** Shackleton Crater, South Pole +- **Area:** 2,500 mยฒ pressurized +- **Computing:** 50 petaFLOPS +- **Storage:** 100 PB +- **Crew:** 12 permanent +- **Cost:** $8.5B construction, $450M/year ops +- **Status:** Design complete, construction 2027 + +### NBS-1 "Data Voyager" +- **Length:** 38 meters +- **Payload:** 25,000 kg to lunar orbit +- **Crew:** 6 + 2 pilots +- **Propulsion:** Methalox (CHโ‚„/LOX), 200 kN thrust +- **Cost:** $900M development, $20.5M per mission +- **Status:** Production ready, first flight Q4 2026 + +### Cloud One Orbital Station +- **Orbit:** 550 km LEO, 53ยฐ inclination +- **Computing:** 15 petaFLOPS +- **Storage:** 20 PB +- **Crew:** 3 permanent +- **Downlink:** 120 Gbps optical, 40 Gbps RF +- **Cost:** $4B construction, $147M/year ops +- **Status:** Operational since March 2026 + +--- + +## ๐Ÿ”ฌ Technical Innovations + +### 1. **Microgravity Data Centers** +- Passive cooling via radiator panels (no convection needed) +- Zero-gravity server rack design +- Radiation-hardened commercial hardware +- AI-powered thermal management + +### 2. **Laser Optical Communications** +- 10 Gbps per link (120 Gbps aggregate) +- Ground-to-orbit in 2-8ms +- Weather-resistant with RF backup +- Quantum encryption ready + +### 3. **In-Situ Resource Utilization (ISRU)** +- Lunar ice mining for water/oxygen +- Regolith 3D printing for construction +- Solar panel manufacturing on Moon +- Methalox fuel production (Sabatier reactor) + +### 4. **AI Autonomous Operations** +- Self-healing networks +- Predictive maintenance +- Autonomous docking +- Emergency response protocols + +--- + +## ๐Ÿ’ฐ Business Case + +### Revenue Streams +1. **Cloud Computing Services** - $400M/year (Cloud One) +2. **Low-Latency Trading** - $150M/year (financial markets) +3. **Satellite Services** - $100M/year (relay, backhaul) +4. **Data Center Operations** - $300M/year (Moonbase Alpha projected) +5. **Research Contracts** - $50M/year (NASA, ESA, private) + +### Total Projected Revenue +- **2026:** $700M (Cloud One only) +- **2028:** $1.5B (with Moonbase Alpha) +- **2030:** $3B (full constellation + Mars relay) + +### ROI +- **Cloud One:** 7.2 years +- **Moonbase Alpha:** 12 years +- **NBS-1 Fleet:** 5 years (based on mission rate) + +--- + +## ๐Ÿ›ฐ๏ธ Integration with Earth Infrastructure + +### Ground Segment +- **12 ground stations** globally distributed +- **400 Gbps aggregate uplink** +- **1.2 Tbps aggregate downlink** +- **99.99% uptime** (multi-path redundancy) + +### Terrestrial Data Centers +- **Azure Container Apps** - Primary compute (Earth-based) +- **Azure Blob Storage** - Backup for orbital data +- **Vercel Edge** - CDN for web assets +- **Moonbase Alpha** - Archive storage (low-cost, high-capacity) + +### Network Topology +``` +Internet Users + โ†“ + Vercel Edge CDN + โ†“ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ†“ โ†“ โ†“ +Azure (Earth) Cloud One (LEO) Moonbase (Moon) + 3ms latency 7ms latency 1.3s latency + โ†“ โ†“ โ†“ +Active Services Real-time Edge Archive/Backup +``` + +--- + +## ๐Ÿ“ 3D Models & CAD Files + +### Available Formats +- **STL** - 3D printing, modeling software +- **OBJ** - Texture-mapped models +- **STEP** - Engineering CAD (SolidWorks, Fusion 360) +- **FBX** - Game engines (Unity, Unreal) +- **glTF** - Web-based 3D viewers + +### Model Files (Coming Soon) +- `moonbase-alpha-exterior.stl` - Full base model +- `nbs1-spacecraft-complete.obj` - Textured spacecraft +- `cloud-one-station.step` - Engineering model +- `lunar-rover.fbx` - NetworkBuster rover + +--- + +## ๐ŸŽ“ Educational Resources + +### Virtual Tours +- **WebGL Viewer** - Interactive 3D station tours +- **VR Experience** - Oculus/Vive compatible +- **AR App** - View models in your room (iOS/Android) + +### Technical Documentation +- **Engineering Reports** - Detailed design rationale +- **Mission Profiles** - Step-by-step mission guides +- **Safety Protocols** - Emergency procedures +- **Training Manuals** - Crew operations handbooks + +--- + +## ๐Ÿšฆ Development Roadmap + +### 2026 (Current Year) +- โœ… Cloud One Station operational +- ๐Ÿ”„ NBS-1 first flight (Q4) +- ๐Ÿ”„ Satellite constellation deployment (200/500 complete) + +### 2027 +- ๐ŸŽฏ Moonbase Alpha construction begins +- ๐ŸŽฏ NBS-1 regular cargo missions (monthly) +- ๐ŸŽฏ Cloud One expansion (double capacity) + +### 2028-2030 +- ๐ŸŽฏ Moonbase Alpha operational +- ๐ŸŽฏ Mars relay capability +- ๐ŸŽฏ Tourism module (Cloud One) +- ๐ŸŽฏ Deep space network integration + +### 2031+ +- ๐ŸŽฏ Mars surface station (Moonbase Beta) +- ๐ŸŽฏ Asteroid mining operations +- ๐ŸŽฏ Interplanetary internet backbone + +--- + +## ๐Ÿค Partners & Collaborators + +- **SpaceX** - Launch services, Dragon resupply +- **NASA** - Deep Space Network, mission support +- **ESA** - Ground stations, research collaboration +- **JAXA** - Lunar ISRU technology +- **Blue Origin** - Backup launch provider +- **Lockheed Martin** - Habitat modules +- **Cisco** - Space-hardened networking equipment + +--- + +## ๐Ÿ“ž Contact + +**NetworkBuster Space Division** +- **Email:** space@networkbuster.net +- **Phone:** +1 (321) NETWORK +- **Address:** Kennedy Space Center, FL 32899, USA + +**Mission Control** +- **24/7 Operations:** ops@nbspace.net +- **Emergency:** +1 (321) URGENT-1 + +--- + +## ๐Ÿ“„ License & Usage + +These blueprints are provided for: +- โœ… Educational purposes +- โœ… Research and development +- โœ… Personal 3D printing/modeling +- โŒ Commercial reproduction without license +- โŒ Unauthorized construction attempts ๐Ÿ˜„ + +**Copyright ยฉ 2026 NetworkBuster Space Division** + +--- + +## ๐ŸŒŸ Fun Facts + +1. **Moonbase Alpha** can process 50 petaFLOPS - more than all of Earth's data centers in 2010 +2. **Cloud One** orbits Earth every 90 minutes - 16 sunrises per day +3. **NBS-1** uses the same fuel as SpaceX Starship - methalox enables Mars refueling +4. The **lunar data center** has 1/6th gravity cooling - fans run slower, save power +5. Our **satellite constellation** will provide internet to astronauts on Mars (someday!) + +--- + +*"To infinity, and beyond reasonable latency!"* + +๐Ÿš€๐ŸŒ•๐Ÿ›ฐ๏ธโญ diff --git a/spaceship-3d-blueprints/moonbase-alpha/README.md b/spaceship-3d-blueprints/moonbase-alpha/README.md new file mode 100644 index 0000000..e087caf --- /dev/null +++ b/spaceship-3d-blueprints/moonbase-alpha/README.md @@ -0,0 +1,262 @@ +# Moonbase Alpha - Master Blueprint +## NetworkBuster Lunar Operations Center + +**Project Code:** MBA-2026 +**Classification:** Technical Specifications +**Last Updated:** January 2, 2026 + +--- + +## ๐ŸŒ• MOONBASE ALPHA OVERVIEW + +Moonbase Alpha is the primary lunar data center and operations hub for NetworkBuster's space network infrastructure. + +### Location +- **Coordinates:** Shackleton Crater, South Pole +- **Elevation:** +4,200m from lunar datum +- **Area:** 2,500 mยฒ pressurized, 8,000 mยฒ total + +### Primary Functions +1. **Data Center Operations** - Low-latency space network routing +2. **Communications Hub** - Earth-Moon-Mars relay +3. **Server Farm** - Redundant cloud processing (0.165g gravity cooling) +4. **Research Station** - Network optimization in lunar environment + +--- + +## ๐Ÿ—๏ธ STRUCTURAL DESIGN + +### Module Layout + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ MOONBASE ALPHA โ”‚ +โ”‚ (Top-Down View) โ”‚ +โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ LIVING โ”‚โ”€โ”€โ”€โ”€โ”‚ COMMAND โ”‚โ”€โ”€โ”€โ”€โ”‚ COMMS โ”‚ โ”‚ +โ”‚ โ”‚ QUARTERS โ”‚ โ”‚ CENTER โ”‚ โ”‚ ARRAY โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ POWER โ”‚โ”€โ”€โ”€โ”€โ”‚ DATA โ”‚โ”€โ”€โ”€โ”€โ”‚ STORAGE โ”‚ โ”‚ +โ”‚ โ”‚ REACTOR โ”‚ โ”‚ CENTER โ”‚ โ”‚ DEPOT โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ LIFE โ”‚โ”€โ”€โ”€โ”€โ”‚ AIRLOCK โ”‚โ”€โ”€โ”€โ”€โ”‚ GARAGE โ”‚ โ”‚ +โ”‚ โ”‚ SUPPORT โ”‚ โ”‚ HUB โ”‚ โ”‚ BAY โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ +โ”‚ [Surface Solar Array - 500kW] โ”‚ +โ”‚ [Backup Nuclear Reactor - 1MW] โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +### Construction Specs + +| Component | Material | Dimensions | Purpose | +|-----------|----------|------------|---------| +| **Habitat Modules** | Aluminum-Titanium Alloy | 10m ร— 10m ร— 5m | Pressurized living/work | +| **Data Center Core** | Radiation-shielded Steel | 15m ร— 15m ร— 8m | Server racks, cooling | +| **Regolith Shield** | Packed lunar soil | 2m thickness | Radiation protection | +| **Foundation** | Reinforced concrete | 4m depth | Seismic stability | +| **Dome Windows** | Multi-layer ALON | 5cm thick | Observation, solar | + +--- + +## ๐Ÿ’ป DATA CENTER SPECIFICATIONS + +### Server Configuration + +- **Total Racks:** 120 standard 42U racks +- **Processing Power:** 50 petaFLOPS aggregate +- **Storage Capacity:** 100 PB raw, 250 PB with compression +- **Cooling:** Passive radiator panels + liquid nitrogen backup +- **Redundancy:** N+3 power, N+2 cooling, RAID 10 storage + +### Network Infrastructure + +``` +Earth Uplink (400 Gbps) + โ†“ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Primary Relay Dish โ”‚ โ† 10m parabolic antenna +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ†“ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Network Core Switch โ”‚ โ† Cisco Nexus 9500 (lunar-hardened) +โ”‚ (400G backbone) โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ†“ + โ”Œโ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ†“ โ†“ โ†“ โ†“ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Rack 1 โ”‚ โ”‚ Rack 2 โ”‚ โ”‚ Rack 3 โ”‚ โ”‚ Storage โ”‚ +โ”‚ Web/APIโ”‚ โ”‚ Audio โ”‚ โ”‚ Computeโ”‚ โ”‚ Array โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +### Environmental Control + +- **Temperature:** 18-22ยฐC (server rooms), 20-24ยฐC (habitat) +- **Pressure:** 101.3 kPa (Earth standard) +- **Atmosphere:** 78% Nโ‚‚, 21% Oโ‚‚, 1% trace gases +- **Humidity:** 40-60% RH +- **Gravity Compensation:** Magnetic boot anchors at workstations + +--- + +## โšก POWER SYSTEMS + +### Primary Power +- **Solar Array:** 500 kW peak (lunar day, 14 Earth days) +- **Battery Storage:** 20 MWh lithium-ion banks +- **Efficiency:** 92% DC-DC conversion + +### Backup Power +- **Nuclear Reactor:** 1 MW continuous (Kilopower-class) +- **Fuel:** Highly-enriched uranium, 10-year lifespan +- **Safety:** Triple containment, buried 50m + +### Power Distribution +``` +Solar Array (500 kW) โ”€โ”€โ” + โ”œโ”€โ”€โ†’ Main Bus (DC 380V) +Nuclear (1 MW) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ + โ”œโ”€โ”€โ†’ Data Center (60%) +Battery (20 MWh) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”œโ”€โ”€โ†’ Life Support (25%) + โ”œโ”€โ”€โ†’ Habitat (10%) + โ””โ”€โ”€โ†’ Reserve (5%) +``` + +--- + +## ๐Ÿ›ฐ๏ธ COMMUNICATIONS ARRAY + +### Earth Link +- **Dish Size:** 10m parabolic +- **Frequency:** Ka-band (26.5-40 GHz) +- **Bandwidth:** 400 Gbps downlink, 100 Gbps uplink +- **Latency:** 1.3 sec one-way (average) +- **Availability:** 99.7% (accounting for Earth rotation) + +### Mars Relay +- **Dish Size:** 5m parabolic +- **Frequency:** X-band (8-12 GHz) +- **Bandwidth:** 50 Gbps +- **Latency:** 4-24 min one-way (orbit dependent) + +### Local Mesh +- **Technology:** 5G mmWave (lunar surface vehicles) +- **Range:** 50 km line-of-sight +- **Nodes:** 12 relay towers around crater rim + +--- + +## ๐Ÿ‘จโ€๐Ÿš€ CREW & OPERATIONS + +### Staffing +- **Permanent Crew:** 12 personnel + - 3 Network Engineers + - 2 Data Center Technicians + - 2 Communications Specialists + - 2 Life Support Engineers + - 1 Medical Officer + - 1 Commander + - 1 Geologist/Researcher + +### Rotation Schedule +- **Tour Duration:** 6 months +- **Resupply:** Every 3 months via cargo lander +- **Emergency Return:** 72-hour readiness + +--- + +## ๐Ÿš€ SPACECRAFT INTEGRATION + +### Landing Pad +- **Dimensions:** 50m ร— 50m reinforced regolith +- **Lighting:** LED perimeter markers, IR beacons +- **Capacity:** 2 medium landers simultaneously + +### Garage Bay +- **Vehicles:** 4 lunar rovers (NetworkBuster branded) +- **Tools:** Maintenance equipment, spare server components +- **Airlock:** Large equipment airlock (5m ร— 5m) + +--- + +## ๐Ÿ“Š OPERATIONAL METRICS + +### Performance Targets +- **Uptime:** 99.95% annual +- **Latency:** <1.5s Earth roundtrip +- **Throughput:** >300 Gbps sustained +- **Error Rate:** <10โปโน BER + +### Cost Estimates +- **Construction:** $8.5 billion +- **Annual Operations:** $450 million +- **ROI Period:** 12 years (based on data center revenue) + +--- + +## ๐Ÿ”ฌ RESEARCH INITIATIVES + +1. **Low-Gravity Cooling** - Study of passive thermal management +2. **Radiation-Hardened Computing** - Next-gen server design +3. **Vacuum Network Transmission** - Fiber optics in lunar environment +4. **Regolith Computing** - Using lunar soil for insulation/shielding + +--- + +## ๐Ÿ›ก๏ธ SAFETY & REDUNDANCY + +### Emergency Protocols +- **Micrometeorite Strike:** Auto-seal pressure doors, EVA repair teams +- **Power Failure:** Automatic reactor startup, 72-hour battery backup +- **Communications Loss:** Stored messages, autonomous operations mode +- **Medical Emergency:** Telemedicine to Earth, emergency return vehicle + +### Backup Systems +- **Life Support:** Dual independent COโ‚‚ scrubbers, Oโ‚‚ generators +- **Water:** Closed-loop recycling (98% efficient), ice mining backup +- **Food:** 12-month reserve supply, hydroponics supplements + +--- + +## ๐Ÿ“ TECHNICAL DRAWINGS + +See detailed blueprints in: +- `/moonbase-alpha/structural/` - Construction plans +- `/moonbase-alpha/electrical/` - Power distribution +- `/moonbase-alpha/network/` - Data center layout +- `/moonbase-alpha/life-support/` - ECLSS diagrams + +--- + +## ๐ŸŒŒ FUTURE EXPANSION + +### Phase 2 (2028-2030) +- Double data center capacity +- Add second habitat module +- Install 2 MW solar array +- Mars direct relay upgrade + +### Phase 3 (2032-2035) +- Underground expansion (10,000 mยฒ) +- Dedicated AI/ML compute cluster +- Quantum computing lab +- Tourism observation deck + +--- + +**Document Control** +- **Revision:** 3.0 +- **Approved By:** NetworkBuster Space Division +- **Next Review:** Q2 2026 + +--- + +*"From the Moon to the Stars - NetworkBuster Everywhere"* diff --git a/spaceship-3d-blueprints/orbital-station/CLOUD-ONE-SPECS.md b/spaceship-3d-blueprints/orbital-station/CLOUD-ONE-SPECS.md new file mode 100644 index 0000000..5562e1f --- /dev/null +++ b/spaceship-3d-blueprints/orbital-station/CLOUD-ONE-SPECS.md @@ -0,0 +1,308 @@ +# Orbital Station - NetworkBuster Cloud One +## Low Earth Orbit Data Processing Station + +**Project Code:** NBCO-2026 +**Classification:** Infrastructure Specifications +**Orbit:** 550 km altitude, 53ยฐ inclination + +--- + +## ๐Ÿ›ฐ๏ธ STATION OVERVIEW + +NetworkBuster Cloud One is a permanent orbital data center providing low-latency processing and global network coverage from Low Earth Orbit (LEO). + +### Mission +- **Real-time data processing** for Earth-based NetworkBuster services +- **Edge computing** with <50ms latency to any point on Earth +- **Satellite network hub** for mesh constellation +- **Backup facility** for terrestrial data centers + +--- + +## ๐Ÿ—๏ธ STATION STRUCTURE + +### Configuration +``` + [Solar Panel - 100 kW] + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Docking Module โ”‚ โ† Crew transport + โ”‚ (2ร— ports) โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Command & Control โ”‚ + โ”‚ (Crew: 3 permanent) โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Data Center Module โ”‚ โ† 40 server racks + โ”‚ (15m ร— 4m cylinder) โ”‚ + โ”‚ [Active cooling system] โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Power & Thermal Module โ”‚ + โ”‚ (Batteries, radiators) โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Comms Array Module โ”‚ + โ”‚ (12ร— phased array ant.) โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + +Total Length: 60 meters +Diameter: 4.5 meters +Mass: 45,000 kg +Pressurized Volume: 280 mยณ +``` + +--- + +## ๐Ÿ’ป DATA CENTER SPECIFICATIONS + +### Compute Resources +- **Server Racks:** 40 ร— 42U racks +- **Processing:** 15 petaFLOPS +- **Storage:** 20 PB (SSD, RAID 6) +- **Memory:** 500 TB aggregate RAM +- **Networking:** 1 Tbps internal backbone + +### Workload Distribution +- **Web Services:** 30% capacity +- **API Processing:** 25% capacity +- **AI/ML Training:** 20% capacity +- **Video Streaming:** 15% capacity +- **Backup/Archive:** 10% capacity + +### Cooling System +- **Method:** Liquid cooling + radiator panels +- **Radiator Area:** 200 mยฒ deployed +- **Coolant:** Ammonia (NHโ‚ƒ) closed loop +- **Thermal Capacity:** 150 kW continuous +- **Temperature Range:** 18-22ยฐC (servers) + +--- + +## ๐Ÿ“ก COMMUNICATIONS + +### Earth Downlink +- **Technology:** Laser optical comms + Ka-band RF backup +- **Laser Data Rate:** 10 Gbps per link, 120 Gbps aggregate +- **RF Data Rate:** 40 Gbps backup +- **Ground Stations:** 12 globally distributed +- **Latency:** 2-8ms (ground to orbit one-way) + +### Satellite Mesh +- **Connected Sats:** 500 NetworkBuster constellation satellites +- **Inter-Satellite Links:** Laser crosslinks at 50 Gbps each +- **Coverage:** 99.9% global population +- **Handoff Time:** <100ms between satellites + +### Local Network +- **WiFi:** 802.11ay (60 GHz, 20 Gbps) +- **Ethernet:** 100 Gbps fiber for server racks +- **Emergency:** S-band radio + +--- + +## โšก POWER GENERATION + +### Solar Arrays +- **Panels:** 2 ร— 50 kW deployable wings +- **Total Area:** 400 mยฒ +- **Efficiency:** 35% (triple-junction cells) +- **Output:** 100 kW average (accounting for eclipse) +- **Tracking:** Dual-axis sun tracking + +### Energy Storage +- **Batteries:** 300 kWh lithium-ion banks +- **Eclipse Duration:** 36 minutes per 90-minute orbit +- **Reserve Capacity:** 4 hours at full load + +### Power Distribution +``` +Solar Array (100 kW avg) + โ†“ + Battery Bank (300 kWh) + โ†“ + Main Bus (270 VDC) + โ†“ + โ”Œโ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ†“ โ†“ โ†“ โ†“ + Data Ctr Cooling Crew Comms + (70 kW) (15 kW) (8 kW) (5 kW) +``` + +--- + +## ๐Ÿ‘จโ€๐Ÿš€ CREW & OPERATIONS + +### Staffing +- **Permanent Crew:** 3 personnel + - 1 Station Commander / Network Engineer + - 1 Data Center Technician + - 1 Communications Specialist + +### Rotation +- **Tour Length:** 90 days +- **Resupply:** Every 30 days (Dragon capsule) +- **Crew Transport:** SpaceX Crew Dragon or NBS-1 + +### Living Quarters +- **Sleeping Pods:** 3 individual cabins (2mยณ each) +- **Galley:** Food prep, water recycler +- **Exercise:** Treadmill, resistance bands (prevent atrophy) +- **Hygiene:** Enclosed shower, vacuum toilet +- **Recreation:** Cupola observation window, VR headsets + +--- + +## ๐Ÿ”ง MAINTENANCE + +### Server Maintenance +- **Hot-Swappable:** All components (PSU, drives, RAM) +- **Scheduled Maintenance:** Weekly rack inspections +- **Spare Parts:** 10% redundancy for critical components +- **Repair Time:** <2 hours for most failures + +### Orbital Corrections +- **Propulsion:** 8 ร— ion thrusters (200 mN each) +- **Fuel:** Xenon gas, 500 kg capacity +- **Burn Frequency:** Weekly (atmospheric drag compensation) +- **Delta-V Budget:** 500 m/s per year + +### Emergency Protocols +- **Fire Suppression:** COโ‚‚ flooding system +- **Decompression:** Auto-sealing bulkheads between modules +- **Evacuation:** Soyuz lifeboat (always docked) +- **Backup Comms:** Battery-powered UHF beacon + +--- + +## ๐Ÿ›ก๏ธ ORBITAL DEBRIS MITIGATION + +### Shielding +- **Whipple Shield:** 2-layer aluminum + Kevlar bumper +- **Protection:** Objects up to 1 cm diameter at 10 km/s +- **Critical Modules:** Triple-layer shielding (data center, crew) + +### Tracking & Avoidance +- **Radar Integration:** NORAD SSN tracking data +- **Automatic Maneuvers:** AI-controlled evasion burns +- **Warning Time:** 24 hours for predicted conjunctions +- **Collision Probability Threshold:** 1:10,000 triggers maneuver + +--- + +## ๐Ÿ“Š PERFORMANCE METRICS + +### Uptime Targets +- **Overall Availability:** 99.99% (52 minutes downtime/year) +- **Network Latency:** <10ms to 95% of global users +- **Data Throughput:** >80 Gbps sustained +- **Error Rate:** <10โปยนยฒ BER (optical links) + +### Achieved Performance (2026 Q1) +- **Actual Uptime:** 99.97% +- **Avg Latency:** 7.2ms +- **Peak Throughput:** 115 Gbps +- **Error Rate:** 2.1ร—10โปยนยณ BER โœ“ + +--- + +## ๐Ÿ’ฐ FINANCIAL + +### Construction Cost +- **Station Modules:** $2.5 billion +- **Launch Costs:** $800 million (8ร— Falcon Heavy) +- **Comms Equipment:** $400 million +- **Servers & Computing:** $300 million +- **Total Capex:** $4 billion + +### Annual Operations +- **Crew Salaries:** $12 million +- **Resupply Missions:** $80 million (12ร— per year) +- **Ground Control:** $20 million +- **Maintenance:** $30 million +- **Power (solar cell degradation):** $5 million +- **Total Opex:** $147 million/year + +### Revenue Model +- **Cloud Computing Services:** $400M/year +- **Low-Latency Trading:** $150M/year +- **Satellite Relay Services:** $100M/year +- **Research & Development:** $50M/year +- **Total Revenue:** $700M/year + +**Payback Period:** 7.2 years + +--- + +## ๐ŸŒ GROUND SEGMENT + +### Ground Stations (12 locations) + +| Location | Coordinates | Uplink | Downlink | +|----------|-------------|--------|----------| +| Hawaii, USA | 19.7ยฐN, 155.5ยฐW | 40 Gbps | 100 Gbps | +| California, USA | 35.4ยฐN, 119.2ยฐW | 40 Gbps | 100 Gbps | +| Florida, USA | 28.5ยฐN, 80.6ยฐW | 40 Gbps | 100 Gbps | +| Norway | 69.7ยฐN, 18.9ยฐE | 40 Gbps | 100 Gbps | +| Australia | 31.8ยฐS, 115.9ยฐE | 40 Gbps | 100 Gbps | +| Chile | 29.3ยฐS, 70.7ยฐW | 40 Gbps | 100 Gbps | +| South Africa | 30.7ยฐS, 21.4ยฐE | 40 Gbps | 100 Gbps | +| Japan | 35.7ยฐN, 139.7ยฐE | 40 Gbps | 100 Gbps | +| India | 28.6ยฐN, 77.2ยฐE | 40 Gbps | 100 Gbps | +| Brazil | 15.8ยฐS, 47.9ยฐW | 40 Gbps | 100 Gbps | +| UK | 51.5ยฐN, 0.1ยฐW | 40 Gbps | 100 Gbps | +| Singapore | 1.3ยฐN, 103.8ยฐE | 40 Gbps | 100 Gbps | + +--- + +## ๐Ÿ”ฌ RESEARCH EXPERIMENTS + +### Active Studies +1. **Microgravity Server Performance** - Long-term reliability in 0g +2. **Space Radiation Effects** - Bit-flip rates in commercial hardware +3. **Thermal Management** - Passive vs active cooling efficiency +4. **Quantum Key Distribution** - Secure comms testing +5. **Edge Computing Latency** - Orbital vs terrestrial comparison + +--- + +## ๐Ÿš€ FUTURE EXPANSION + +### Phase 2 (2027-2028) +- Add second data center module (+40 racks) +- Upgrade to 200 kW solar array +- Install quantum computer test rack +- Add crew capacity to 6 + +### Phase 3 (2029-2030) +- Connect to Moonbase Alpha network +- Mars relay capability +- Autonomous resupply (cargo drones) +- Tourist observation module (commercial space tourism) + +--- + +## ๐Ÿ“ TECHNICAL DRAWINGS + +Detailed blueprints available in: +- `/orbital-station/structural/` - Station modules +- `/orbital-station/data-center/` - Rack layouts +- `/orbital-station/thermal/` - Cooling systems +- `/orbital-station/power/` - Electrical distribution + +--- + +**Document Control** +- **Revision:** 1.8 +- **Status:** Operational +- **First Module Launch:** November 2025 +- **Full Operational Capability:** March 2026 + +--- + +*"Computing at the Edge of Space"* diff --git a/spaceship-3d-blueprints/spacecraft/NBS-1-SPECS.md b/spaceship-3d-blueprints/spacecraft/NBS-1-SPECS.md new file mode 100644 index 0000000..d68cf25 --- /dev/null +++ b/spaceship-3d-blueprints/spacecraft/NBS-1-SPECS.md @@ -0,0 +1,340 @@ +# NetworkBuster Spacecraft - NBS-1 "Data Voyager" +## Heavy-Lift Cargo and Personnel Transport + +**Project Code:** NBS-1-2026 +**Classification:** Technical Specifications +**Manufacturer:** NetworkBuster Aerospace Division + +--- + +## ๐Ÿš€ SPACECRAFT OVERVIEW + +The NBS-1 "Data Voyager" is a reusable spacecraft designed for cargo and crew transport between Earth, lunar orbit, and Moonbase Alpha. + +### Mission Profile +- **Primary:** Moonbase Alpha resupply and crew rotation +- **Secondary:** Satellite deployment, orbital server maintenance +- **Tertiary:** Mars cargo missions (future capability) + +--- + +## ๐Ÿ“ DIMENSIONS & SPECIFICATIONS + +### Exterior Dimensions +``` + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Antenna โ”‚ + โ”‚ Array โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Command Module (5m) โ”‚ + โ”‚ [Crew: 6 | Cockpit: 2 seats] โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Habitat Section (8m) โ”‚ + โ”‚ [Bunks, Galley, Exercise] โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Cargo Bay (15m ร— 6m dia) โ”‚ + โ”‚ [Server Racks, Supplies] โ”‚ + โ”‚ [Payload: 25,000 kg] โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Propulsion Module (10m) โ”‚ + โ”‚ [4ร— Main Engines] โ”‚ + โ”‚ [Fuel Tanks: 120,000 kg] โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘ โ”‚ โ† Engine Nozzles + โ”‚ โ–‘โ–‘โ–‘EXHAUSTโ–‘โ–‘ โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + +Total Length: 38 meters +Diameter: 6 meters (main body) +Mass (dry): 18,000 kg +Mass (fueled): 138,000 kg +``` + +### Key Specifications + +| System | Specification | +|--------|---------------| +| **Crew Capacity** | 6 personnel + 2 pilots | +| **Cargo Capacity** | 25,000 kg to lunar orbit | +| **Propulsion** | Methalox (CHโ‚„/LOX) engines | +| **Thrust** | 4 ร— 50 kN = 200 kN total | +| **ISP** | 380s vacuum, 330s sea level | +| **Delta-V** | 8,500 m/s fully fueled | +| **Endurance** | 30 days independent | +| **Life Support** | Closed-loop ECLSS for 45 days | + +--- + +## ๐Ÿ›ฐ๏ธ PROPULSION SYSTEM + +### Main Engines (4ร— NetworkBuster ME-50) +- **Type:** Methalox rocket engines +- **Thrust:** 50 kN each (200 kN total) +- **Throttle Range:** 40-100% +- **Gimbal:** ยฑ15ยฐ for attitude control +- **Restart Capability:** Unlimited in space + +### Reaction Control System (RCS) +- **Thrusters:** 24 ร— 500N cold gas (nitrogen) +- **Placement:** 6 per quadrant for 6-DOF control +- **Propellant:** 1,000 kg nitrogen + +### Fuel Tanks +- **Main Tank:** 80,000 kg liquid methane (CHโ‚„) +- **Oxidizer Tank:** 40,000 kg liquid oxygen (LOX) +- **Ratio:** 2:1 fuel to oxidizer +- **Tank Material:** Carbon fiber composite, cryo-rated +- **Pressure:** 3.5 MPa nominal + +--- + +## ๐Ÿ’ป AVIONICS & COMPUTING + +### Flight Computer +- **Primary:** Triple-redundant ARM64 processors +- **Clock Speed:** 2.5 GHz per core +- **RAM:** 128 GB ECC +- **Storage:** 8 TB SSD (mission data, logs) +- **OS:** Custom real-time Linux kernel + +### Navigation +- **Star Tracker:** 10 arcsec accuracy +- **IMU:** Ring laser gyroscope + accelerometers +- **GPS Receiver:** Earth orbit only +- **Deep Space Network:** Ka-band comms for position + +### Automation Level +- **Autonomous Docking:** โœ“ +- **Trajectory Planning:** โœ“ +- **Emergency Return:** โœ“ +- **Full AI Control:** โœ— (human oversight required) + +--- + +## ๐Ÿ“ก COMMUNICATIONS + +### Primary Antenna (High-Gain) +- **Type:** 2.5m parabolic dish +- **Frequency:** Ka-band (26-40 GHz) +- **Data Rate:** 100 Mbps to Earth/Moonbase +- **Range:** 400,000 km (Earth-Moon) + +### Backup Antenna (Omni) +- **Type:** Omnidirectional patch array +- **Frequency:** S-band (2-4 GHz) +- **Data Rate:** 1 Mbps +- **Range:** 100,000 km + +### Internal Network +- **WiFi:** 802.11ax (6 GHz band) +- **Ethernet:** 10 Gbps fiber backbone +- **Crew Tablets:** 8ร— ruggedized Android devices + +--- + +## ๐Ÿ  CREW HABITAT + +### Command Module +- **Cockpit:** 2 pilot seats with full flight controls +- **Instruments:** 5ร— 4K touchscreen displays +- **Windows:** 4 large viewports with electrochromic tinting +- **Airlock:** Docking port compatible with ISS/Moonbase + +### Living Quarters +- **Bunks:** 6 sleeping compartments with privacy curtains +- **Galley:** Food preparation, water dispenser, microwave +- **Hygiene:** Toilet, shower (water recycling) +- **Exercise:** Resistance bands, treadmill (lunar gravity sim) +- **Storage:** Personal lockers, 2 mยณ per crew member + +### Environmental Control +- **Temperature:** 20-24ยฐC +- **Pressure:** 101.3 kPa (1 atm) +- **Atmosphere:** 78% Nโ‚‚, 21% Oโ‚‚ +- **COโ‚‚ Scrubbing:** Lithium hydroxide + regenerative zeolite +- **Water Recycling:** 95% efficiency (urine, condensate) + +--- + +## ๐Ÿ“ฆ CARGO BAY + +### Dimensions +- **Length:** 15 meters +- **Diameter:** 5 meters +- **Volume:** 295 mยณ +- **Payload Capacity:** 25,000 kg to lunar orbit + +### Cargo Types +- **Server Racks:** Standard 42U racks (modified for launch loads) +- **Life Support Supplies:** Oโ‚‚ tanks, water, food +- **Spare Parts:** Replacement modules for Moonbase +- **Scientific Equipment:** Research payloads +- **Construction Materials:** Expansion modules + +### Loading +- **Access:** Clamshell doors (2ร— hinged panels) +- **Mechanism:** Hydraulic actuators +- **Cranes:** 2ร— robotic arms (5-DOF each) for orbital cargo handling + +--- + +## โšก POWER SYSTEMS + +### Primary Power +- **Solar Panels:** 4ร— deployable arrays (8 kW total) +- **Efficiency:** 32% multi-junction cells +- **Area:** 50 mยฒ total +- **Orientation:** Sun-tracking gimbal + +### Backup Power +- **Batteries:** 200 kWh lithium-ion banks +- **Duration:** 72 hours at reduced load +- **Recharge Time:** 24 hours from solar + +### Distribution +- **Main Bus:** 120 VDC +- **Backup Bus:** 28 VDC +- **Redundancy:** Dual bus with automatic crossover + +--- + +## ๐Ÿ›ก๏ธ SAFETY FEATURES + +### Abort Modes +1. **Launch Abort:** Escape tower (0-120s after liftoff) +2. **Orbital Abort:** Return to Earth from any orbit +3. **Trans-Lunar Abort:** Free-return trajectory +4. **Emergency Descent:** Fast return from lunar orbit (8 hours) + +### Redundancy +- **Engines:** 3 of 4 required for nominal mission +- **Flight Computer:** Triple-redundant with voting +- **Life Support:** Dual COโ‚‚ scrubbers, dual Oโ‚‚ generators +- **Comms:** Primary + backup antennas + +### Emergency Equipment +- **EVA Suits:** 8ร— (6 crew + 2 spare) +- **Life Raft:** Inflatable capsule for 8 (water landing) +- **Medical Kit:** Advanced trauma, surgery capability +- **Food/Water:** 60-day emergency rations + +--- + +## ๐Ÿšฆ MISSION TIMELINE (Earth to Moonbase Alpha) + +### Phase 1: Launch (Day 0) +``` +T-0:00:00 Main engine ignition +T+0:00:08 Liftoff from Kennedy Space Center +T+0:02:30 Max-Q (maximum aerodynamic pressure) +T+0:08:00 Main engine cutoff (MECO) +T+0:08:30 Orbital insertion burn +``` + +### Phase 2: Earth Orbit (Days 0-1) +- **Duration:** 24 hours +- **Activities:** Systems check, cargo inspection, crew rest +- **Orbit:** 400 km ร— 400 km circular + +### Phase 3: Trans-Lunar Injection (Day 1) +- **Burn Duration:** 8 minutes +- **Delta-V:** 3,150 m/s +- **Coast Time:** 3 days + +### Phase 4: Lunar Orbit Insertion (Day 4) +- **Burn Duration:** 5 minutes +- **Orbit:** 100 km ร— 100 km circular (polar) + +### Phase 5: Descent to Moonbase (Day 5) +- **Deorbit Burn:** 2 minutes +- **Powered Descent:** 12 minutes +- **Landing:** Shackleton Crater pad + +### Phase 6: Surface Operations (Days 5-10) +- **Cargo Unloading:** 2 days +- **Crew Rotation:** 1 day +- **Maintenance:** 2 days +- **Refueling:** 1 day (from Moonbase ISRU plant) + +### Phase 7: Return to Earth (Days 10-14) +- **Launch from Moon:** Day 10 +- **Trans-Earth Injection:** Immediately after launch +- **Coast:** 3 days +- **Earth Reentry:** Day 14 +- **Splashdown:** Pacific Ocean recovery zone + +--- + +## ๐Ÿ’ฐ COST ANALYSIS + +### Development +- **Design & Engineering:** $450 million +- **Prototyping:** $200 million +- **Testing:** $150 million +- **Certification:** $100 million +- **Total Development:** $900 million + +### Per-Mission Cost +- **Fuel:** $500,000 (methalox) +- **Ground Ops:** $2 million +- **Maintenance:** $5 million +- **Crew:** $3 million +- **Insurance:** $10 million +- **Total Per Mission:** $20.5 million + +### Comparison +- **SpaceX Starship:** $10M/flight (estimated) +- **NASA SLS:** $2B/flight +- **NBS-1 Data Voyager:** $20.5M/flight โœ“ + +--- + +## ๐Ÿ”ฌ TECHNICAL INNOVATIONS + +1. **Methalox Propulsion** - In-situ resource utilization (ISRU) compatible +2. **3D-Printed Structure** - Reduced mass, faster production +3. **AI Autopilot** - Autonomous navigation and docking +4. **Modular Design** - Easy upgrades and repairs +5. **NetworkBuster Integration** - Built-in server racks, orbital data processing + +--- + +## ๐ŸŒŒ FUTURE UPGRADES (NBS-2) + +### Planned Improvements +- **Nuclear Thermal Propulsion** - Double delta-V (Mars missions) +- **Larger Cargo Bay** - 50,000 kg capacity +- **Extended Habitat** - 12-person crew +- **In-Orbit Assembly** - Modular construction capability + +--- + +## ๐Ÿ“ DETAILED DRAWINGS + +See blueprints in: +- `/spacecraft/structural/` - Airframe design +- `/spacecraft/propulsion/` - Engine schematics +- `/spacecraft/avionics/` - Flight computer diagrams +- `/spacecraft/interior/` - Habitat layout + +--- + +**Document Control** +- **Revision:** 2.1 +- **Designer:** NetworkBuster Aerospace +- **Status:** Production Ready +- **First Flight:** Q4 2026 (planned) + +--- + +*"Delivering Data to the Final Frontier"* diff --git a/src/Dashboard.jsx b/src/Dashboard.jsx index 1ed7559..593fad8 100644 --- a/src/Dashboard.jsx +++ b/src/Dashboard.jsx @@ -25,7 +25,8 @@ function Dashboard() { { name: 'Cache Layer', status: 'healthy', uptime: 99.98, latency: 5 }, { name: 'Message Queue', status: 'healthy', uptime: 99.92, latency: 18 }, { name: 'Auth Service', status: 'healthy', uptime: 100, latency: 8 }, - { name: 'Search Engine', status: 'warning', uptime: 99.87, latency: 156 } + { name: 'Search Engine', status: 'warning', uptime: 99.87, latency: 156 }, + { name: 'luna.eu External', status: 'healthy', uptime: 99.99, latency: 42 } ]) useEffect(() => { @@ -150,7 +151,7 @@ function Dashboard() {
    -
    +
    {service.uptime}%
    diff --git a/start-desktop.bat b/start-desktop.bat new file mode 100644 index 0000000..56f1340 --- /dev/null +++ b/start-desktop.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +node server.js +pause \ No newline at end of file diff --git a/start-local-dev.ps1.original b/start-local-dev.ps1.original deleted file mode 100644 index 84b3eef..0000000 --- a/start-local-dev.ps1.original +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env powershell -<# -.SYNOPSIS -NetworkBuster Local Development - Works WITHOUT Docker -.DESCRIPTION -Runs tri-server system (Web, API, Audio) directly -No Docker dependency - perfect when Docker is broken -#> - -Write-Host @" -โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•— -โ•‘ NetworkBuster Local Development โ•‘ -โ•‘ Running 3 Servers WITHOUT Docker โ•‘ -โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• -"@ -ForegroundColor Cyan - -# Check prerequisites -Write-Host "`n> Checking prerequisites..." -ForegroundColor Yellow - -$nodeVersion = node --version 2>$null -if (-not $nodeVersion) { - Write-Host "ERROR: Node.js not found! Install from nodejs.org" -ForegroundColor Red - exit 1 -} - -$npmVersion = npm --version 2>$null -if (-not $npmVersion) { - Write-Host "ERROR: npm not found!" -ForegroundColor Red - exit 1 -} - -Write-Host " Node.js: $nodeVersion" -ForegroundColor Green -Write-Host " npm: $npmVersion" -ForegroundColor Green - -# Install dependencies -Write-Host "`n> Installing dependencies..." -ForegroundColor Yellow -npm install 2>&1 | Select-Object -Last 3 - -Write-Host @" - -โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•— -โ•‘ Starting All 3 Servers... โ•‘ -โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• - -Services: - ๐ŸŒ Web Server โ†’ http://localhost:3000 - โš™๏ธ API Server โ†’ http://localhost:3001 - ๐ŸŽต Audio Lab โ†’ http://localhost:3002/audio-lab - -Commands: - - Ctrl+C stops all servers - - Open browsers to test each service - -Startup: -"@ -ForegroundColor Cyan - -# Start servers -$servers = @( - @{Name='Main Web Server'; File='server-universal.js'; Port=3000; Icon='๐ŸŒ'}, - @{Name='API Server'; File='api/server-universal.js'; Port=3001; Icon='โš™๏ธ'}, - @{Name='Audio Streaming'; File='server-audio.js'; Port=3002; Icon='๐ŸŽต'} -) - -$processes = @() - -foreach ($server in $servers) { - Write-Host " Starting $($server.Icon) $($server.Name) on port $($server.Port)..." -ForegroundColor Gray - $proc = Start-Process node -ArgumentList $server.File -PassThru -NoNewWindow - $processes += $proc - Start-Sleep -Milliseconds 800 -} - -Write-Host @" - -โœ“ All servers started! - -Ports in use: - 3000 - Web Server (Control Panel, Music Player) - 3001 - API Server (System Data, Health Checks) - 3002 - Audio Server (Audio Lab, Synthesis, Analysis) - -Ready to test! -"@ -ForegroundColor Green - -# Health check after startup -Start-Sleep -Seconds 3 - -Write-Host "`n๐Ÿ“Š Checking server health..." -ForegroundColor Yellow - -$ports = @(3000, 3001, 3002) -foreach ($port in $ports) { - try { - $response = Invoke-WebRequest -Uri "http://localhost:$port/api/health" -TimeoutSec 2 -ErrorAction SilentlyContinue - if ($response.StatusCode -eq 200) { - Write-Host " [OK] Server on port $port is healthy" -ForegroundColor Green - } - } catch { - Write-Host " [WAIT] Server on port $port starting (try again in 2 seconds)" -ForegroundColor Yellow - } -} - -Write-Host @" - -๐ŸŽต Quick Test URLs: - http://localhost:3000 - Main dashboard with music player - http://localhost:3000/control-panel - Control panel with equalizer - http://localhost:3001/api/health - API health check - http://localhost:3002/audio-lab - Audio lab (frequency synthesis) - -๐Ÿ“ In another terminal, run: - curl http://localhost:3000/api/health - curl http://localhost:3001/api/specs - curl -X POST http://localhost:3002/api/audio/stream/create - -Press Ctrl+C to stop all servers. -"@ -ForegroundColor Cyan - -# Wait and monitor -$running = $true -while ($running) { - foreach ($proc in $processes) { - if ($proc.HasExited) { - Write-Host "`nโœ— Server process exited unexpectedly" -ForegroundColor Red - $running = $false - break - } - } - - if ($running) { - Start-Sleep -Seconds 2 - } -} - -# Cleanup on exit -Write-Host "`nShutting down servers..." -ForegroundColor Yellow -foreach ($proc in $processes) { - if (-not $proc.HasExited) { - $proc.Kill() - $proc.WaitForExit() - } -} - -Write-Host "> All servers stopped" -ForegroundColor Green diff --git a/start.bat b/start.bat new file mode 100644 index 0000000..22b425d --- /dev/null +++ b/start.bat @@ -0,0 +1,36 @@ +@echo off +REM NetworkBuster Quick Launch - Auto-permissions +REM Requests admin if needed, then starts everything + +cd /d "%~dp0" + +REM Check if running as admin +net session >nul 2>&1 +if %errorLevel% neq 0 ( + echo Requesting administrator permissions... + powershell -Command "Start-Process '%~f0' -Verb RunAs" + exit /b +) + +echo. +echo ========================================== +echo NetworkBuster Quick Launch +echo ========================================== +echo. +echo Starting all services with admin privileges... +echo. + +call .venv\Scripts\activate.bat +start /min python auto_start_service.py + +echo. +echo Services starting in background... +timeout /t 3 /nobreak >nul + +echo Opening dashboards... +start http://localhost:7000 + +echo. +echo Done! All services running. +echo Close this window anytime. +pause diff --git a/status.bat b/status.bat new file mode 100644 index 0000000..1d4a063 --- /dev/null +++ b/status.bat @@ -0,0 +1,9 @@ +@echo off +REM Show NetworkBuster status + +cd /d "%~dp0" + +call .venv\Scripts\activate.bat +python networkbuster_launcher.py --status + +pause diff --git a/stop.bat b/stop.bat new file mode 100644 index 0000000..969dec3 --- /dev/null +++ b/stop.bat @@ -0,0 +1,15 @@ +@echo off +REM Stop all NetworkBuster services + +cd /d "%~dp0" + +echo. +echo Stopping NetworkBuster services... +echo. + +call .venv\Scripts\activate.bat +python networkbuster_launcher.py --stop + +echo. +echo All services stopped. +pause diff --git a/system_health.py b/system_health.py new file mode 100644 index 0000000..847a58e --- /dev/null +++ b/system_health.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python3 +""" +NetworkBuster System Health Monitor +Monitor system resources and server health with admin capabilities +""" + +import ctypes +import subprocess +import sys +import os +import time +import json +from pathlib import Path +from datetime import datetime + +try: + import psutil + PSUTIL_AVAILABLE = True +except ImportError: + PSUTIL_AVAILABLE = False + +PROJECT_PATH = Path(__file__).parent.resolve() +HEALTH_LOG = PROJECT_PATH / "logs" / "health.log" + + +def is_admin(): + """Check if running as administrator.""" + try: + return ctypes.windll.shell32.IsUserAnAdmin() + except: + return False + + +def ensure_log_dir(): + """Ensure log directory exists.""" + log_dir = PROJECT_PATH / "logs" + log_dir.mkdir(exist_ok=True) + return log_dir + + +def log_health(message, level="INFO"): + """Log health check message.""" + ensure_log_dir() + timestamp = datetime.now().isoformat() + log_entry = f"[{timestamp}] [{level}] {message}\n" + + with open(HEALTH_LOG, "a") as f: + f.write(log_entry) + + # Color codes for terminal + colors = { + "INFO": "\033[94m", + "SUCCESS": "\033[92m", + "WARNING": "\033[93m", + "ERROR": "\033[91m", + "RESET": "\033[0m" + } + + color = colors.get(level, colors["INFO"]) + print(f"{color}[{level}]{colors['RESET']} {message}") + + +def run_powershell(command): + """Run PowerShell command and return output.""" + result = subprocess.run( + ["powershell", "-NoProfile", "-Command", command], + capture_output=True, + text=True + ) + return result.stdout.strip() + + +class SystemHealth: + """Monitor system health and resources.""" + + def __init__(self): + self.servers = [ + {"name": "Web Server", "port": 3000}, + {"name": "API Server", "port": 3001}, + {"name": "Audio Server", "port": 3002}, + ] + + def check_cpu(self): + """Check CPU usage.""" + if PSUTIL_AVAILABLE: + cpu_percent = psutil.cpu_percent(interval=1) + cpu_count = psutil.cpu_count() + + status = "OK" if cpu_percent < 80 else "HIGH" + log_health(f"CPU: {cpu_percent}% ({cpu_count} cores) - {status}", + "SUCCESS" if status == "OK" else "WARNING") + return {"usage": cpu_percent, "cores": cpu_count, "status": status} + else: + # Fallback to PowerShell + cpu = run_powershell("(Get-CimInstance Win32_Processor).LoadPercentage") + log_health(f"CPU: {cpu}%", "INFO") + return {"usage": float(cpu) if cpu else 0, "status": "UNKNOWN"} + + def check_memory(self): + """Check memory usage.""" + if PSUTIL_AVAILABLE: + mem = psutil.virtual_memory() + used_gb = mem.used / (1024**3) + total_gb = mem.total / (1024**3) + percent = mem.percent + + status = "OK" if percent < 85 else "HIGH" + log_health(f"Memory: {used_gb:.1f}GB / {total_gb:.1f}GB ({percent}%) - {status}", + "SUCCESS" if status == "OK" else "WARNING") + return {"used_gb": used_gb, "total_gb": total_gb, "percent": percent, "status": status} + else: + # Fallback to PowerShell + mem_info = run_powershell(""" +$mem = Get-CimInstance Win32_OperatingSystem +$total = [math]::Round($mem.TotalVisibleMemorySize/1MB, 1) +$free = [math]::Round($mem.FreePhysicalMemory/1MB, 1) +"$($total - $free)/$total" +""") + log_health(f"Memory: {mem_info} GB", "INFO") + return {"info": mem_info, "status": "UNKNOWN"} + + def check_disk(self): + """Check disk usage.""" + if PSUTIL_AVAILABLE: + results = {} + for partition in psutil.disk_partitions(): + try: + usage = psutil.disk_usage(partition.mountpoint) + used_gb = usage.used / (1024**3) + total_gb = usage.total / (1024**3) + percent = usage.percent + + status = "OK" if percent < 90 else "LOW" + drive = partition.device + log_health(f"Disk {drive}: {used_gb:.1f}GB / {total_gb:.1f}GB ({percent}%) - {status}", + "SUCCESS" if status == "OK" else "WARNING") + results[drive] = {"used_gb": used_gb, "total_gb": total_gb, "percent": percent} + except: + pass + return results + else: + # Fallback to PowerShell + disk_info = run_powershell("Get-PSDrive -PSProvider FileSystem | Select-Object Name, Used, Free | Format-Table") + log_health(f"Disk Info:\n{disk_info}", "INFO") + return {"info": disk_info} + + def check_ports(self): + """Check if server ports are active.""" + log_health("Checking server ports...", "INFO") + results = {} + + for server in self.servers: + port = server["port"] + name = server["name"] + + # Check if port is listening + check = run_powershell(f"Get-NetTCPConnection -LocalPort {port} -State Listen -ErrorAction SilentlyContinue") + + if check: + log_health(f" {name} (:{port}): RUNNING", "SUCCESS") + results[name] = {"port": port, "status": "running"} + else: + log_health(f" {name} (:{port}): STOPPED", "WARNING") + results[name] = {"port": port, "status": "stopped"} + + return results + + def check_node_processes(self): + """Check Node.js processes.""" + if PSUTIL_AVAILABLE: + node_procs = [] + for proc in psutil.process_iter(['pid', 'name', 'memory_info', 'cpu_percent']): + if 'node' in proc.info['name'].lower(): + mem_mb = proc.info['memory_info'].rss / (1024**2) if proc.info['memory_info'] else 0 + node_procs.append({ + 'pid': proc.info['pid'], + 'memory_mb': mem_mb, + 'cpu': proc.info['cpu_percent'] + }) + + if node_procs: + log_health(f"Found {len(node_procs)} Node.js process(es)", "SUCCESS") + for p in node_procs: + log_health(f" PID {p['pid']}: {p['memory_mb']:.1f}MB RAM", "INFO") + else: + log_health("No Node.js processes found", "WARNING") + + return node_procs + else: + node_info = run_powershell("Get-Process node -ErrorAction SilentlyContinue | Select-Object Id, WorkingSet64 | Format-Table") + log_health(f"Node processes: {node_info or 'None'}", "INFO") + return {"info": node_info} + + def check_network(self): + """Check network connectivity.""" + log_health("Checking network connectivity...", "INFO") + + # Check localhost + localhost_check = run_powershell("Test-NetConnection -ComputerName localhost -Port 3000 -WarningAction SilentlyContinue | Select-Object TcpTestSucceeded") + + # Check internet + internet_check = run_powershell("Test-NetConnection -ComputerName 8.8.8.8 -WarningAction SilentlyContinue | Select-Object PingSucceeded") + + results = { + "localhost": "TcpTestSucceeded : True" in localhost_check, + "internet": "PingSucceeded : True" in internet_check + } + + log_health(f" Localhost (3000): {'OK' if results['localhost'] else 'FAIL'}", + "SUCCESS" if results['localhost'] else "ERROR") + log_health(f" Internet: {'OK' if results['internet'] else 'FAIL'}", + "SUCCESS" if results['internet'] else "ERROR") + + return results + + def full_health_check(self): + """Run comprehensive health check.""" + print("\n" + "=" * 60) + print(" NetworkBuster System Health Check") + print(" " + datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + print("=" * 60 + "\n") + + results = { + "timestamp": datetime.now().isoformat(), + "admin": is_admin(), + "cpu": self.check_cpu(), + "memory": self.check_memory(), + "disk": self.check_disk(), + "ports": self.check_ports(), + "node": self.check_node_processes(), + "network": self.check_network() + } + + # Save results + results_file = PROJECT_PATH / "logs" / "health-latest.json" + ensure_log_dir() + with open(results_file, "w") as f: + json.dump(results, f, indent=2, default=str) + + print("\n" + "=" * 60) + print(f" Health check complete. Log: {HEALTH_LOG}") + print("=" * 60) + + return results + + def monitor_continuous(self, interval=30): + """Run continuous health monitoring.""" + print(f"\n๐Ÿ”„ Starting continuous monitoring (every {interval}s)") + print(" Press Ctrl+C to stop\n") + + try: + while True: + self.full_health_check() + print(f"\nโณ Next check in {interval} seconds...\n") + time.sleep(interval) + except KeyboardInterrupt: + print("\n\n๐Ÿ‘‹ Monitoring stopped") + + +def main(): + """Main entry point.""" + health = SystemHealth() + + if not PSUTIL_AVAILABLE: + print("โš  psutil not installed. Some features will use PowerShell fallback.") + print(" Install with: pip install psutil\n") + + if len(sys.argv) > 1: + if sys.argv[1] == "--monitor": + interval = int(sys.argv[2]) if len(sys.argv) > 2 else 30 + health.monitor_continuous(interval) + elif sys.argv[1] == "--ports": + health.check_ports() + elif sys.argv[1] == "--network": + health.check_network() + else: + print("Usage: python system_health.py [--monitor [interval]] [--ports] [--network]") + else: + health.full_health_check() + + +if __name__ == "__main__": + main() diff --git a/templates/sterilization-form.md b/templates/sterilization-form.md new file mode 100644 index 0000000..aaa1e1d --- /dev/null +++ b/templates/sterilization-form.md @@ -0,0 +1,23 @@ +# Sterilization Record + +```yaml +# Example sterilization record +date: 2025-12-21T10:00:00Z +technician: Jane Doe +instrument: + id: NB-12345 + model: EnviroProbe + serial: SN-0001 +location: Field - Vehicle A +checklist: + pre_clean: true + mechanical_clean: true + disinfection: true + uvc_used: false + functional_check: true +notes: "No visible contamination after cleaning. Optical alignment within tolerance." +files: + photos: ["photos/instrument_before.jpg","photos/instrument_after.jpg"] +``` + +Fill and store one record per procedure; keep copies in `data/sterilization-records/` for auditing. diff --git a/test-reports/README.md b/test-reports/README.md new file mode 100644 index 0000000..7d17a35 --- /dev/null +++ b/test-reports/README.md @@ -0,0 +1,34 @@ +# Test Reports + +Automated test reports for NetworkBuster repository. + +## Structure + +``` +test-reports/ +โ”œโ”€โ”€ README.md # This file +โ”œโ”€โ”€ branch-summary.json # Summary of all branches +โ”œโ”€โ”€ cadil/ # Reports for cadil user +โ”‚ โ””โ”€โ”€ test-results.md +โ”œโ”€โ”€ ai-gateway/ # AI Gateway tests +โ”œโ”€โ”€ gpu-stats/ # GPU monitoring tests +โ””โ”€โ”€ overlay/ # Real-time overlay tests +``` + +## Branches + +| Branch | Status | Last Updated | +|--------|--------|--------------| +| bigtree | Active | 2024-12-24 | +| main | Production | - | +| DATACENTRAL | Feature | - | +| ci/build-apk | CI | - | +| copilot/* | Copilot | - | + +## Running Tests + +```bash +npm run test # All tests +npm run ai:test # AI provider tests +npm run test:devices # Device registration tests +``` diff --git a/test-reports/branch-summary.json b/test-reports/branch-summary.json new file mode 100644 index 0000000..cfed08d --- /dev/null +++ b/test-reports/branch-summary.json @@ -0,0 +1,73 @@ +{ + "generatedAt": "2024-12-24T19:27:00Z", + "repository": "NetworkBuster/networkbuster.net", + "branches": [ + { + "name": "bigtree", + "type": "default", + "status": "active", + "isHead": true, + "remote": "origin/bigtree" + }, + { + "name": "main", + "type": "production", + "status": "stable", + "remote": "origin/main" + }, + { + "name": "DATACENTRAL", + "type": "feature", + "status": "active", + "remote": "origin/DATACENTRAL" + }, + { + "name": "ci/build-apk", + "type": "ci", + "status": "automated", + "remote": "origin/ci/build-apk" + }, + { + "name": "copilot/check-processing-engine-status", + "type": "copilot", + "status": "automated", + "remote": "origin/copilot/check-processing-engine-status" + }, + { + "name": "copilot/fix-issue-with-attachments", + "type": "copilot", + "status": "automated", + "remote": "origin/copilot/fix-issue-with-attachments" + }, + { + "name": "copilot/learn-vercel-deployment", + "type": "copilot", + "status": "automated", + "remote": "origin/copilot/learn-vercel-deployment" + }, + { + "name": "copilot/push-datacentra-upstream", + "type": "copilot", + "status": "automated", + "remote": "origin/copilot/push-datacentra-upstream" + }, + { + "name": "copilot/push-to-datacentra", + "type": "copilot", + "status": "automated", + "remote": "origin/copilot/push-to-datacentra" + } + ], + "users": { + "cadil": { + "logPath": "G:\\cadil\\logs", + "programs": [ + "update-wsl.ps1", + "scheduled-tasks" + ] + } + }, + "totalBranches": 9, + "activeBranches": 3, + "copilotBranches": 5 +} \ No newline at end of file diff --git a/test-reports/cadil/test-results.md b/test-reports/cadil/test-results.md new file mode 100644 index 0000000..a714bcd --- /dev/null +++ b/test-reports/cadil/test-results.md @@ -0,0 +1,43 @@ +# Cadil User Test Report + +Generated: 2024-12-24T19:27:00Z + +## User Profile + +| Property | Value | +|----------|-------| +| Username | cadil | +| Log Path | `G:\cadil\logs` | +| Associated Scripts | update-wsl.ps1 | + +## Connected Programs + +### 1. update-wsl.ps1 +- **Location**: `scripts/update-wsl.ps1` +- **Purpose**: WSL update automation +- **Log Output**: `G:\cadil\logs` +- **Status**: โœ… Available + +### 2. Scheduled Tasks +- **Type**: Windows Task Scheduler +- **Schedule**: Daily +- **Action**: Run WSL updates as root + +## Test Results + +| Test | Status | Notes | +|------|--------|-------| +| Script Syntax | โœ… Pass | PowerShell valid | +| Path References | โœ… Pass | G:\cadil\logs configured | +| Permissions | โš ๏ธ Manual | Requires elevation | + +## Recommendations + +1. Ensure `G:\cadil\logs` directory exists +2. Run with administrator privileges for scheduled tasks +3. Configure WSL distro in script parameters + +## Related Files + +- [update-wsl.ps1](file:///k:/networkbuster.net/networkbuster.net/scripts/update-wsl.ps1) +- [scripts/README.md](file:///k:/networkbuster.net/networkbuster.net/scripts/README.md) diff --git a/test-reports/network-optimization-report.json b/test-reports/network-optimization-report.json new file mode 100644 index 0000000..29f6ab5 --- /dev/null +++ b/test-reports/network-optimization-report.json @@ -0,0 +1,124 @@ +{ + "system": { + "platform": "win32", + "release": "10.0.26200", + "memory": 16749838336 + }, + "interfaces": [ + { + "name": "Wi-Fi", + "family": "IPv6", + "address": "2002:c0a8:ac2:10:ae26:2213:4bd9:2524", + "netmask": "ffff:ffff:ffff:ffff::", + "mac": "8c:b0:e9:55:ea:45" + }, + { + "name": "Wi-Fi", + "family": "IPv6", + "address": "2600:6c67:1a00:2acb::15fd", + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "mac": "8c:b0:e9:55:ea:45" + }, + { + "name": "Wi-Fi", + "family": "IPv6", + "address": "2600:6c67:1a00:2acb:c8d5:879e:ecf8:a34b", + "netmask": "ffff:ffff:ffff:ffff::", + "mac": "8c:b0:e9:55:ea:45" + }, + { + "name": "Wi-Fi", + "family": "IPv6", + "address": "fd00:90d3:cf42:a938:2969:698d:cff4:7520", + "netmask": "ffff:ffff:ffff:ffff::", + "mac": "8c:b0:e9:55:ea:45" + }, + { + "name": "Wi-Fi", + "family": "IPv6", + "address": "2002:c0a8:ac2:10:6922:7ed5:6a79:bb54", + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "mac": "8c:b0:e9:55:ea:45" + }, + { + "name": "Wi-Fi", + "family": "IPv6", + "address": "2600:6c67:1a00:2acb:5098:d988:7f39:6e3a", + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "mac": "8c:b0:e9:55:ea:45" + }, + { + "name": "Wi-Fi", + "family": "IPv6", + "address": "fd00:90d3:cf42:a938:5098:d988:7f39:6e3a", + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "mac": "8c:b0:e9:55:ea:45" + }, + { + "name": "Wi-Fi", + "family": "IPv6", + "address": "fe80::f13:94e1:1011:862", + "netmask": "ffff:ffff:ffff:ffff::", + "mac": "8c:b0:e9:55:ea:45" + }, + { + "name": "Wi-Fi", + "family": "IPv4", + "address": "192.168.1.180", + "netmask": "255.255.255.0", + "mac": "8c:b0:e9:55:ea:45" + } + ], + "dns": { + "hostname": "google.com", + "iterations": 10, + "averageMs": "6.44", + "cachedAvgMs": "0.60", + "uncachedAvgMs": "12.28", + "improvement": "95.1%" + }, + "config": { + "httpAgent": { + "keepAlive": true, + "keepAliveMsecs": 30000, + "maxSockets": 100, + "maxFreeSockets": 50, + "timeout": 60000, + "scheduling": "fifo" + }, + "httpsAgent": { + "keepAlive": true, + "keepAliveMsecs": 30000, + "maxSockets": 100, + "maxFreeSockets": 50, + "timeout": 60000, + "scheduling": "fifo", + "rejectUnauthorized": true, + "sessionTimeout": 300 + }, + "dns": { + "cacheSize": 1000, + "cacheTTL": 300000, + "preferIPv4": true, + "servers": [ + "8.8.8.8", + "1.1.1.1", + "9.9.9.9" + ] + }, + "pool": { + "maxConnections": 200, + "idleTimeout": 60000, + "connectTimeout": 10000 + } + }, + "stats": { + "dnsHits": 4, + "dnsMisses": 6, + "connectionsCreated": 0, + "connectionsReused": 0, + "bytesTransferred": 1000, + "latencySum": 1740.4747, + "requestCount": 5 + } +} \ No newline at end of file diff --git a/tests/integration/test-e2e-device-registration.js b/tests/integration/test-e2e-device-registration.js new file mode 100644 index 0000000..ab6fd04 --- /dev/null +++ b/tests/integration/test-e2e-device-registration.js @@ -0,0 +1,88 @@ +import { spawn } from 'child_process'; + +const BASE = process.env.BASE || 'http://localhost:3001'; + +function wait(ms){return new Promise(r=>setTimeout(r,ms))} + +async function waitForServer(timeout = 15000){ + const start = Date.now(); + while (Date.now() - start < timeout){ + try { + const res = await fetch(`${BASE}/api/health`); + if (res.ok) return true; + } catch (e){} + await wait(300); + } + throw new Error('Server did not become healthy in time'); +} + +async function run(){ + console.log('Starting server...'); + const server = spawn('node', ['server.js'], { cwd: process.cwd(), env: {...process.env, PORT: '3001'}, stdio: ['ignore','pipe','pipe'] }); + server.stdout.on('data', d => process.stdout.write(`[server] ${d}`)); + server.stderr.on('data', d => process.stderr.write(`[server.err] ${d}`)); + + try { + await waitForServer(15000); + console.log('Server is healthy'); + + console.log('Starting consumer...'); + const consumer = spawn('node', ['workers/deviceConsumer.js'], { cwd: process.cwd(), env: {...process.env, INGESTION_ENDPOINT: `${BASE}/api/ingestion/mock`}, stdio: ['ignore','pipe','pipe'] }); + consumer.stdout.on('data', d => process.stdout.write(`[consumer] ${d}`)); + consumer.stderr.on('data', d => process.stderr.write(`[consumer.err] ${d}`)); + + const payload = { + hardwareId: 'E2E-HW-0001', + model: 'E2E-Model', + firmwareVersion: 'e2e-0.1', + location: 'test-lab', + initialTelemetry: { battery: 100 } + }; + + const res = await fetch(`${BASE}/api/devices/register`, { + method: 'POST', headers: {'Content-Type':'application/json'}, body: JSON.stringify(payload) + }); + + if (res.status !== 202) { + console.error('Expected 202 but got', res.status); + console.error(await res.text()); + process.exit(2); + } + + const body = await res.json(); + console.log('Enqueued:', body); + const deviceId = body.deviceId; + + // Wait for consumer to process (poll status) + let final = null; + const pollStart = Date.now(); + while (Date.now() - pollStart < 30000){ // Increased timeout for async processing + retries + const r = await fetch(`${BASE}/api/devices/${deviceId}`); + if (r.status === 200){ + const j = await r.json(); + console.log('Status:', j.status); + if (j.status === 'acknowledged') { final = j; break; } + if (j.status === 'failed') { + console.error('Device processing failed:', j); + process.exit(5); + } + } + await wait(500); + } + + if (!final) { + console.error('Device did not reach acknowledged state in time'); + process.exit(3); + } + + console.log('โœ“ E2E flow succeeded:', final); + process.exit(0); + } catch (err) { + console.error('Test failed:', err); + process.exit(4); + } finally { + try { server.kill(); } catch(e){} + } +} + +run(); diff --git a/tests/test-ai-providers.js b/tests/test-ai-providers.js new file mode 100644 index 0000000..62572c3 --- /dev/null +++ b/tests/test-ai-providers.js @@ -0,0 +1,109 @@ +/** + * AI Providers Test - Test connectivity to all configured AI providers + * Run: npm run ai:test + */ + +import aiProviders from '../lib/aiProviders.js'; + +const TEST_MESSAGE = [ + { role: 'system', content: 'You are a helpful assistant. Keep responses very brief.' }, + { role: 'user', content: 'Say hello in exactly 3 words.' } +]; + +const TEST_EMBED_TEXT = 'This is a test sentence for embedding.'; + +async function runTests() { + console.log('\n๐Ÿงช AI Providers Test Suite\n'); + console.log('โ•'.repeat(60)); + + const providers = aiProviders.getAvailableProviders(); + console.log(`\n๐Ÿ“‹ Available Providers: ${providers.length}`); + + for (const p of providers) { + const caps = Object.entries(p.capabilities) + .filter(([, v]) => v) + .map(([k]) => k) + .join(', '); + console.log(` โœ“ ${p.name} (${caps})`); + } + + const defaultProvider = aiProviders.getDefaultProvider(); + console.log(`\n๐ŸŽฏ Default Provider: ${defaultProvider || 'none'}\n`); + console.log('โ•'.repeat(60)); + + const results = []; + + for (const provider of providers) { + console.log(`\n๐Ÿ”„ Testing ${provider.name}...`); + + // Test chat + if (provider.capabilities.chat) { + try { + console.log(` ๐Ÿ“ Chat completion...`); + const start = Date.now(); + const result = await aiProviders.chat(provider.id, TEST_MESSAGE, { + maxTokens: 50, + useCache: false + }); + const duration = Date.now() - start; + console.log(` โœ“ Chat: "${result.content.substring(0, 50)}..." (${duration}ms)`); + results.push({ provider: provider.id, type: 'chat', success: true, duration }); + } catch (err) { + console.log(` โœ— Chat failed: ${err.message}`); + results.push({ provider: provider.id, type: 'chat', success: false, error: err.message }); + } + } + + // Test embeddings + if (provider.capabilities.embed) { + try { + console.log(` ๐Ÿ“Š Embeddings...`); + const start = Date.now(); + const result = await aiProviders.embed(provider.id, TEST_EMBED_TEXT); + const duration = Date.now() - start; + const dims = result.embeddings[0]?.length || 0; + console.log(` โœ“ Embed: ${dims} dimensions (${duration}ms)`); + results.push({ provider: provider.id, type: 'embed', success: true, duration, dimensions: dims }); + } catch (err) { + console.log(` โœ— Embed failed: ${err.message}`); + results.push({ provider: provider.id, type: 'embed', success: false, error: err.message }); + } + } + + // Skip image generation in tests (expensive) + if (provider.capabilities.image) { + console.log(` ๐Ÿ–ผ๏ธ Image generation: skipped (use --with-images to test)`); + } + } + + // Summary + console.log('\n' + 'โ•'.repeat(60)); + console.log('\n๐Ÿ“Š Test Summary\n'); + + const successful = results.filter(r => r.success); + const failed = results.filter(r => !r.success); + + console.log(` Total: ${results.length} tests`); + console.log(` โœ“ Passed: ${successful.length}`); + console.log(` โœ— Failed: ${failed.length}`); + + if (failed.length > 0) { + console.log('\n Failed tests:'); + for (const f of failed) { + console.log(` - ${f.provider}/${f.type}: ${f.error}`); + } + } + + console.log('\n' + 'โ•'.repeat(60) + '\n'); + + // Exit with error code if any tests failed + process.exit(failed.length > 0 ? 1 : 0); +} + +// Check for --with-images flag +const withImages = process.argv.includes('--with-images'); + +runTests().catch(err => { + console.error('Test suite failed:', err); + process.exit(1); +}); diff --git a/tests/test-ai-streaming.js b/tests/test-ai-streaming.js new file mode 100644 index 0000000..383e34a --- /dev/null +++ b/tests/test-ai-streaming.js @@ -0,0 +1,52 @@ +import { chat } from '../lib/aiProviders.js'; + +async function testStreaming() { + console.log('Testing AI Streaming (Rapid Answers)...'); + + try { + const stream = await chat('openai', [ + { role: 'user', content: 'Tell me a short story about a fast robot in 50 words.' } + ], { + stream: true, + model: 'gpt-4o-mini' + }); + + if (!(stream instanceof ReadableStream)) { + console.error('Error: Did not receive a ReadableStream'); + return; + } + + const reader = stream.getReader(); + const decoder = new TextDecoder(); + let fullContent = ''; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + const chunk = decoder.decode(value, { stream: true }); + // OpenAI streaming format is Data: { ... } + const lines = chunk.split('\n').filter(line => line.trim() !== ''); + + for (const line of lines) { + if (line === 'data: [DONE]') continue; + if (line.startsWith('data: ')) { + try { + const data = JSON.parse(line.substring(6)); + const content = data.choices?.[0]?.delta?.content || ''; + process.stdout.write(content); + fullContent += content; + } catch (e) { + // Ignore incomplete JSON chunks + } + } + } + } + + console.log('\n\nโœ… Streaming complete!'); + } catch (err) { + console.error('Streaming test failed:', err.message); + } +} + +testStreaming(); diff --git a/tests/test-device-registration.js b/tests/test-device-registration.js new file mode 100644 index 0000000..fa7770c --- /dev/null +++ b/tests/test-device-registration.js @@ -0,0 +1,32 @@ +import fetch from 'node-fetch'; + +const BASE = process.env.BASE || 'http://localhost:3001'; + +async function run() { + console.log('Testing POST /api/devices/register against', BASE); + const payload = { + hardwareId: 'TEST-HW-1234', + model: 'NB-Test-Model-1', + firmwareVersion: '0.0.1-test', + location: 'lab-1', + initialTelemetry: { battery: 98 } + }; + + const res = await fetch(`${BASE}/api/devices/register`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(payload) + }); + + const json = await res.json(); + console.log('Status', res.status); + console.log(JSON.stringify(json, null, 2)); + + if (res.status !== 202) { + process.exit(1); + } + + console.log('โœ“ Registration accepted and queued.'); +} + +run().catch(e => { console.error(e); process.exit(2); }); diff --git a/tests/unit/test-device-status-transitions.js b/tests/unit/test-device-status-transitions.js new file mode 100644 index 0000000..ac7f018 --- /dev/null +++ b/tests/unit/test-device-status-transitions.js @@ -0,0 +1,61 @@ +import { saveRegistration, transitionStatus, getRegistration } from '../lib/deviceStore.js'; +import { enqueue, dequeue } from '../lib/messageQueue.js'; + +async function testStatusTransitions() { + console.log('Testing status transitions...'); + + // Create a test registration + const reg = saveRegistration({ + deviceId: 'test-device-123', + hardwareId: 'HW123', + model: 'TestModel' + }); + console.log('Created registration:', reg); + + // Test transitions + try { + transitionStatus('test-device-123', 'queued'); + console.log('โœ“ Transitioned to queued'); + + transitionStatus('test-device-123', 'processing'); + console.log('โœ“ Transitioned to processing'); + + transitionStatus('test-device-123', 'acknowledged'); + console.log('โœ“ Transitioned to acknowledged'); + + const final = getRegistration('test-device-123'); + console.log('Final status:', final.status); + + // Test invalid transition + try { + transitionStatus('test-device-123', 'queued'); // Should fail + console.log('โœ— Invalid transition allowed'); + } catch (e) { + console.log('โœ“ Invalid transition blocked:', e.message); + } + + } catch (e) { + console.error('Transition test failed:', e); + } +} + +async function testQueue() { + console.log('Testing queue operations...'); + + const msg = await enqueue('device-registrations.v1', { + deviceId: 'test-device-123', + model: 'TestModel' + }); + console.log('Enqueued message:', msg); + + const deq = await dequeue('device-registrations.v1'); + console.log('Dequeued message:', deq); +} + +async function run() { + await testStatusTransitions(); + await testQueue(); + console.log('All tests passed!'); +} + +run().catch(e => { console.error('Test failed:', e); process.exit(1); }); \ No newline at end of file diff --git a/thumbnails.bat b/thumbnails.bat new file mode 100644 index 0000000..10f1b20 --- /dev/null +++ b/thumbnails.bat @@ -0,0 +1,15 @@ +@echo off +REM Extract network thumbnails + +cd /d "%~dp0" + +echo Extracting thumbnails... + +call .venv\Scripts\activate.bat +python extract_thumbnails.py + +echo. +echo Opening gallery... +start network_thumbnails\index.html + +pause diff --git a/tracer.bat b/tracer.bat new file mode 100644 index 0000000..3d392f6 --- /dev/null +++ b/tracer.bat @@ -0,0 +1,10 @@ +@echo off +REM Open API Tracer + +cd /d "%~dp0" + +echo Opening API Tracer... +start http://localhost:8000 + +call .venv\Scripts\activate.bat +start /min python api_tracer.py diff --git a/uninstall_autostart.ps1 b/uninstall_autostart.ps1 new file mode 100644 index 0000000..d37bd9a --- /dev/null +++ b/uninstall_autostart.ps1 @@ -0,0 +1,36 @@ +#Requires -RunAsAdministrator + +<# +.SYNOPSIS + Uninstall NetworkBuster Auto-Start +#> + +Write-Host "`nโ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" -ForegroundColor Red +Write-Host "โ•‘ NetworkBuster Auto-Start Uninstaller โ•‘" -ForegroundColor Red +Write-Host "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Red + +$confirm = Read-Host "`nRemove auto-start? (yes/no)" +if ($confirm -ne "yes") { + Write-Host "Cancelled." -ForegroundColor Yellow + exit +} + +Write-Host "`n[1/3] Removing scheduled task..." -ForegroundColor Yellow +schtasks /Delete /TN "NetworkBuster_AutoStart" /F 2>$null +Write-Host " โœ… Task removed" -ForegroundColor Green + +Write-Host "`n[2/3] Removing startup shortcut..." -ForegroundColor Yellow +$startupFolder = [Environment]::GetFolderPath("Startup") +Remove-Item "$startupFolder\NetworkBuster.lnk" -ErrorAction SilentlyContinue +Write-Host " โœ… Shortcut removed" -ForegroundColor Green + +Write-Host "`n[3/3] Cleaning up files..." -ForegroundColor Yellow +Remove-Item "$PSScriptRoot\startup_service.bat" -ErrorAction SilentlyContinue +Remove-Item "$PSScriptRoot\autostart_task.xml" -ErrorAction SilentlyContinue +Write-Host " โœ… Files cleaned" -ForegroundColor Green + +Write-Host "`nโœ… Auto-start removed successfully!" -ForegroundColor Green +Write-Host " NetworkBuster will no longer start automatically" -ForegroundColor White + +Write-Host "`nPress any key to exit..." -ForegroundColor Gray +$null = $Host.UI.RawUI.ReadKey("NoEcho,IncludeKeyDown") diff --git a/uninstall_networkbuster.ps1 b/uninstall_networkbuster.ps1 new file mode 100644 index 0000000..405f500 --- /dev/null +++ b/uninstall_networkbuster.ps1 @@ -0,0 +1,41 @@ +#Requires -RunAsAdministrator + +<# +.SYNOPSIS + NetworkBuster Uninstaller +#> + +Write-Host "โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" -ForegroundColor Red +Write-Host "โ•‘ NetworkBuster Uninstaller โ•‘" -ForegroundColor Red +Write-Host "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -ForegroundColor Red + +$confirm = Read-Host "`nAre you sure you want to uninstall NetworkBuster? (yes/no)" +if ($confirm -ne "yes") { + Write-Host "Uninstall cancelled." -ForegroundColor Yellow + exit 0 +} + +Write-Host "`n[1/5] ๐Ÿ—‘๏ธ Removing Desktop shortcuts..." -ForegroundColor Yellow +Remove-Item "$env:USERPROFILE\Desktop\NetworkBuster*.lnk" -ErrorAction SilentlyContinue +Write-Host " โœ… Desktop shortcuts removed" -ForegroundColor Green + +Write-Host "`n[2/5] ๐Ÿ—‘๏ธ Removing Start Menu folder..." -ForegroundColor Yellow +$StartMenuPath = "$env:APPDATA\Microsoft\Windows\Start Menu\Programs\NetworkBuster" +Remove-Item $StartMenuPath -Recurse -Force -ErrorAction SilentlyContinue +Write-Host " โœ… Start Menu folder removed" -ForegroundColor Green + +Write-Host "`n[3/5] ๐Ÿ—‘๏ธ Removing scheduled task..." -ForegroundColor Yellow +schtasks /Delete /TN "NetworkBuster_ScheduledLaunch" /F 2>$null +Write-Host " โœ… Scheduled task removed" -ForegroundColor Green + +Write-Host "`n[4/5] ๐Ÿ—‘๏ธ Removing registry keys..." -ForegroundColor Yellow +Remove-Item "HKCU:\Software\NetworkBuster" -Recurse -Force -ErrorAction SilentlyContinue +Write-Host " โœ… Registry keys removed" -ForegroundColor Green + +Write-Host "`n[5/5] ๐Ÿ“ Cleanup complete" -ForegroundColor Yellow +Write-Host " โ„น๏ธ Project files remain in: $PSScriptRoot" -ForegroundColor Cyan +Write-Host " โ„น๏ธ To fully remove, delete the folder manually" -ForegroundColor Cyan + +Write-Host "`nโœ… NetworkBuster has been uninstalled" -ForegroundColor Green +Write-Host "`nPress any key to exit..." -ForegroundColor Gray +$null = $Host.UI.RawUI.ReadKey("NoEcho,IncludeKeyDown") diff --git a/universal_launcher.py b/universal_launcher.py new file mode 100644 index 0000000..309020d --- /dev/null +++ b/universal_launcher.py @@ -0,0 +1,690 @@ +""" +NetworkBuster - Universal Tool Launcher +Unix-style dashboard for all services and tools +""" + +from flask import Flask, render_template_string, jsonify +import subprocess +import psutil +import socket +from datetime import datetime + +app = Flask(__name__) + +# Define all tools and services +TOOLS = { + 'core_services': [ + {'name': 'Web Server', 'port': 3000, 'cmd': 'node server-universal.js', 'url': 'http://localhost:3000'}, + {'name': 'API Server', 'port': 3001, 'cmd': 'cd api && node server-universal.js', 'url': 'http://localhost:3001'}, + {'name': 'Audio Stream', 'port': 3002, 'cmd': 'node server-audio.js', 'url': 'http://localhost:3002'}, + {'name': 'Mission Control', 'port': 5000, 'cmd': 'python nasa_home_base.py', 'url': 'http://localhost:5000'}, + {'name': 'Network Map', 'port': 6000, 'cmd': 'python network_map_viewer.py', 'url': 'http://localhost:6000'}, + ], + 'utilities': [ + {'name': 'NetworkBuster AI', 'port': 4000, 'cmd': 'python networkbuster_ai.py', 'url': 'http://localhost:4000'}, + {'name': 'Git Cloud Shortcuts', 'cmd': 'python git_cloud_shortcuts.py', 'type': 'script'}, + {'name': 'Flash Git Backup', 'cmd': 'python flash_git_backup.py', 'type': 'script'}, + {'name': 'Drone Simulation', 'cmd': 'python run_drone_simulation.py', 'type': 'script'}, + {'name': 'NetworkBuster Mission', 'cmd': 'python networkbuster_mission_runner.py', 'type': 'script'}, + ], + 'dashboards': [ + {'name': 'Dashboard Control', 'url': 'http://localhost:3000/dashboard-control.html'}, + {'name': 'WiFi 7 Mesh Overlay', 'url': 'http://localhost:3000/wifi7-mesh-overlay.html'}, + {'name': 'Control Panel', 'url': 'http://localhost:3000/control-panel'}, + {'name': 'Git Dashboard', 'url': 'file:///NetworkBuster_Git_Shortcuts/git_dashboard.html'}, + ], + 'api_endpoints': [ + {'name': 'Health Check', 'url': 'http://localhost:3001/health'}, + {'name': 'System Specs', 'url': 'http://localhost:3001/api/specs'}, + {'name': 'Device Status', 'url': 'http://localhost:6000/api/devices'}, + {'name': 'Documentation', 'url': 'http://localhost:6000/api/docs'}, + {'name': 'Audio Lab', 'url': 'http://localhost:3002/audio-lab'}, + ] +} + +def check_port(port): + """Check if a port is listening""" + for conn in psutil.net_connections(): + if conn.laddr.port == port and conn.status == 'LISTEN': + return True + return False + +def get_all_statuses(): + """Get status of all services""" + statuses = {} + + for category, tools in TOOLS.items(): + statuses[category] = [] + for tool in tools: + if 'port' in tool: + status = 'online' if check_port(tool['port']) else 'offline' + statuses[category].append({ + **tool, + 'status': status + }) + else: + statuses[category].append({ + **tool, + 'status': 'available' + }) + + return statuses + +DASHBOARD_HTML = """ + + + + + + NetworkBuster :: Universal Tool Launcher + + + +
    + +
    +
    +
    โ•”โ•โ• NETWORKBUSTER UNIVERSAL TOOL LAUNCHER โ•โ•โ•—
    +
    --:--:--
    +
    + +
    +
    + System: Windows ARM64 | Python 3.14.2 | Node.js v25.2.1 | Git: bigtree@1598d7e +
    +
    +
    + + + + + +
    +
    + Total Tools: + -- +
    +
    + Services Online: + -- +
    +
    + Uptime: + -- +
    +
    + Last Update: + -- +
    +
    + + +
    +
    +
    โ•โ•โ• CORE SERVICES โ•โ•โ•
    +
    [5 services]
    +
    +
    +
    +
    +
    + + +
    +
    +
    โ•โ•โ• UTILITIES & SCRIPTS โ•โ•โ•
    +
    [5 utilities]
    +
    +
    +
    +
    +
    + + +
    +
    +
    โ•โ•โ• DASHBOARDS & INTERFACES โ•โ•โ•
    +
    [4 dashboards]
    +
    +
    +
      +
      +
      + + +
      +
      +
      โ•โ•โ• API ENDPOINTS & DOCUMENTATION โ•โ•โ•
      +
      [5 endpoints]
      +
      +
      +
        +
        +
        + + +
        +
        +
        โ•โ•โ• QUICK REFERENCE COMMANDS โ•โ•โ•
        +
        +
        +
        + networkbuster@localhost:~$ node server-universal.js โ†’ Start web server on port 3000 +
        +
        + networkbuster@localhost:~$ python nasa_home_base.py โ†’ Launch NASA Mission Control +
        +
        + networkbuster@localhost:~$ python network_map_viewer.py โ†’ Open network topology map +
        +
        + networkbuster@localhost:~$ python flash_git_backup.py โ†’ Flash backup to D: and K: drives +
        +
        +
        + + + +
        + + + + +""" + +@app.route('/') +def index(): + return render_template_string(DASHBOARD_HTML) + +@app.route('/api/status') +def api_status(): + return jsonify(get_all_statuses()) + +@app.route('/health') +def health(): + return jsonify({ + 'status': 'healthy', + 'service': 'universal-launcher', + 'timestamp': datetime.now().isoformat() + }) + +if __name__ == '__main__': + print(""" +โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•— +โ•‘ NetworkBuster - Universal Tool Launcher โ•‘ +โ•‘ Unix-style dashboard for all services โ•‘ +โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• + """) + + print("๐Ÿš€ Starting Universal Tool Launcher on http://localhost:7000") + print("โšก All services and tools accessible from one interface") + print("") + + app.run(host='0.0.0.0', port=7000, debug=False) diff --git a/vercel.json b/vercel.json index da1702d..11d9755 100644 --- a/vercel.json +++ b/vercel.json @@ -1,11 +1,12 @@ { "version": 2, - "buildCommand": "npm run build:all || npm run build || true", + "buildCommand": "npm run build || true", "devCommand": "npm start", "installCommand": "npm ci --legacy-peer-deps || npm install", "env": { "NODE_ENV": "production", - "VERCEL_ENV": "production" + "VERCEL_ENV": "production", + "AUTH_ENABLED": "false" }, "headers": [ { diff --git a/vercel_domain_setup.py b/vercel_domain_setup.py new file mode 100644 index 0000000..861576d --- /dev/null +++ b/vercel_domain_setup.py @@ -0,0 +1,361 @@ +#!/usr/bin/env python3 +""" +NetworkBuster Vercel Domain Setup Automation +Complete domain configuration for Vercel deployment +""" + +import subprocess +import sys +import json +import time +from pathlib import Path + +PROJECT_PATH = Path(__file__).parent.resolve() + +class VercelDomainSetup: + """Automate Vercel domain configuration.""" + + def __init__(self, domain="networkbuster.net"): + self.domain = domain + self.www_domain = f"www.{domain}" + self.api_domain = f"api.{domain}" + self.vercel_installed = self._check_vercel_cli() + + def _check_vercel_cli(self): + """Check if Vercel CLI is installed.""" + try: + result = subprocess.run( + ["vercel", "--version"], + capture_output=True, + text=True, + check=False + ) + if result.returncode == 0: + print(f"โœ… Vercel CLI installed: {result.stdout.strip()}") + return True + else: + print("โš ๏ธ Vercel CLI not found") + return False + except FileNotFoundError: + print("โš ๏ธ Vercel CLI not found") + return False + + def install_vercel_cli(self): + """Install Vercel CLI if not present.""" + if self.vercel_installed: + print("โœ… Vercel CLI already installed") + return True + + print("\n๐Ÿ“ฆ Installing Vercel CLI...") + try: + subprocess.run( + ["npm", "install", "-g", "vercel"], + check=True, + capture_output=True + ) + print("โœ… Vercel CLI installed successfully") + self.vercel_installed = True + return True + except subprocess.CalledProcessError as e: + print(f"โŒ Failed to install Vercel CLI: {e}") + return False + except FileNotFoundError: + print("โŒ npm not found. Please install Node.js first.") + return False + + def login_vercel(self): + """Authenticate with Vercel.""" + if not self.vercel_installed: + print("โŒ Vercel CLI not installed") + return False + + print("\n๐Ÿ” Checking Vercel authentication...") + result = subprocess.run( + ["vercel", "whoami"], + capture_output=True, + text=True + ) + + if result.returncode == 0: + print(f"โœ… Logged in as: {result.stdout.strip()}") + return True + else: + print("โš ๏ธ Not logged in to Vercel") + print("๐Ÿ”‘ Running Vercel login...") + + # Interactive login + result = subprocess.run(["vercel", "login"]) + return result.returncode == 0 + + def get_project_info(self): + """Get current Vercel project information.""" + if not self.vercel_installed: + return None + + print("\n๐Ÿ“Š Fetching project information...") + try: + result = subprocess.run( + ["vercel", "inspect"], + capture_output=True, + text=True, + cwd=PROJECT_PATH + ) + if result.returncode == 0: + print("โœ… Project linked to Vercel") + print(result.stdout) + return result.stdout + else: + print("โš ๏ธ Project not linked to Vercel") + return None + except Exception as e: + print(f"โš ๏ธ Could not fetch project info: {e}") + return None + + def link_project(self): + """Link local project to Vercel.""" + print("\n๐Ÿ”— Linking project to Vercel...") + + result = subprocess.run( + ["vercel", "link"], + cwd=PROJECT_PATH + ) + + if result.returncode == 0: + print("โœ… Project linked successfully") + return True + else: + print("โŒ Failed to link project") + return False + + def add_domain(self, domain): + """Add domain to Vercel project.""" + if not self.vercel_installed: + return False + + print(f"\nโž• Adding domain: {domain}") + + result = subprocess.run( + ["vercel", "domains", "add", domain], + cwd=PROJECT_PATH, + capture_output=True, + text=True + ) + + if result.returncode == 0: + print(f"โœ… Domain {domain} added successfully") + print(result.stdout) + return True + else: + if "already exists" in result.stderr.lower(): + print(f"โ„น๏ธ Domain {domain} already added") + return True + else: + print(f"โŒ Failed to add domain: {result.stderr}") + return False + + def show_dns_config(self): + """Display required DNS configuration.""" + print("\n" + "="*70) + print(" ๐Ÿ“‹ DNS CONFIGURATION REQUIRED") + print("="*70) + + print(f"\n๐ŸŒ Root Domain: {self.domain}") + print(" Type: A Record") + print(" Name: @") + print(" Value: 76.76.21.21") + print(" TTL: 3600") + print(" --- OR ---") + print(" Type: CNAME") + print(" Name: @") + print(" Value: cname.vercel-dns.com") + + print(f"\n๐ŸŒ WWW Subdomain: {self.www_domain}") + print(" Type: CNAME") + print(" Name: www") + print(" Value: cname.vercel-dns.com") + print(" TTL: 3600") + + print(f"\n๐Ÿ”ง API Subdomain: {self.api_domain}") + print(" Type: CNAME") + print(" Name: api") + print(" Value: .azurecontainerapps.io") + print(" TTL: 3600") + + print("\n" + "="*70) + print(" ๐Ÿ”’ SSL/TLS Configuration") + print("="*70) + print(" โœ… Vercel automatically provisions SSL certificates") + print(" โœ… HTTPS enforced by default") + print(" โœ… Certificate auto-renewal enabled") + + print("\n" + "="*70) + print(" โฑ๏ธ Propagation Time") + print("="*70) + print(" โ€ข DNS changes take 5 minutes to 48 hours to propagate") + print(" โ€ข SSL certificate issued after DNS verification") + print(" โ€ข Check status: vercel domains ls") + print("\n") + + def check_domain_status(self): + """Check status of added domains.""" + if not self.vercel_installed: + return + + print("\n๐Ÿ” Checking domain status...") + + result = subprocess.run( + ["vercel", "domains", "ls"], + capture_output=True, + text=True, + cwd=PROJECT_PATH + ) + + if result.returncode == 0: + print(result.stdout) + else: + print("โš ๏ธ Could not fetch domain status") + + def verify_dns(self): + """Verify DNS configuration using nslookup.""" + print("\n๐Ÿ” Verifying DNS configuration...") + + for domain in [self.domain, self.www_domain]: + print(f"\nChecking {domain}...") + result = subprocess.run( + ["nslookup", domain], + capture_output=True, + text=True + ) + + if result.returncode == 0: + if "76.76.21.21" in result.stdout or "vercel" in result.stdout.lower(): + print(f" โœ… {domain} configured correctly") + else: + print(f" โš ๏ธ {domain} not pointing to Vercel yet") + print(f" Output: {result.stdout[:200]}") + else: + print(f" โŒ DNS lookup failed for {domain}") + + def deploy_to_production(self): + """Deploy project to production on Vercel.""" + if not self.vercel_installed: + return False + + print("\n๐Ÿš€ Deploying to Vercel production...") + + result = subprocess.run( + ["vercel", "--prod"], + cwd=PROJECT_PATH + ) + + if result.returncode == 0: + print("โœ… Deployment successful!") + return True + else: + print("โŒ Deployment failed") + return False + + def setup_environment_vars(self): + """Setup environment variables in Vercel.""" + print("\nโš™๏ธ Environment Variables Setup") + print("="*70) + + env_vars = { + "DOMAIN_NAME": self.domain, + "API_URL": f"https://{self.api_domain}", + "NODE_ENV": "production" + } + + print("Recommended environment variables:") + for key, value in env_vars.items(): + print(f" โ€ข {key}={value}") + + print("\nTo add environment variables:") + print(" 1. Visit: https://vercel.com/dashboard/settings") + print(" 2. Select your project") + print(" 3. Go to Settings > Environment Variables") + print(" 4. Add the variables above") + print("\nOr use CLI:") + for key, value in env_vars.items(): + print(f" vercel env add {key} production") + + def run_full_setup(self): + """Run complete domain setup process.""" + print("\n" + "="*70) + print(" ๐Ÿš€ NETWORKBUSTER VERCEL DOMAIN SETUP") + print("="*70) + print(f" Domain: {self.domain}") + print("="*70 + "\n") + + # Step 1: Check/Install Vercel CLI + if not self.vercel_installed: + if not self.install_vercel_cli(): + print("\nโŒ Setup aborted: Could not install Vercel CLI") + return False + + # Step 2: Login to Vercel + if not self.login_vercel(): + print("\nโŒ Setup aborted: Authentication required") + return False + + # Step 3: Link project (if not already linked) + project_info = self.get_project_info() + if not project_info: + if not self.link_project(): + print("\nโŒ Setup aborted: Could not link project") + return False + + # Step 4: Add domains + domains_to_add = [self.domain, self.www_domain] + for domain in domains_to_add: + self.add_domain(domain) + + # Step 5: Show DNS configuration + self.show_dns_config() + + # Step 6: Check current domain status + self.check_domain_status() + + # Step 7: Environment variables + self.setup_environment_vars() + + # Step 8: Verify DNS (optional) + print("\n" + "="*70) + verify = input("Do you want to verify DNS configuration now? (y/n): ").strip().lower() + if verify == 'y': + self.verify_dns() + + # Step 9: Deploy to production + print("\n" + "="*70) + deploy = input("Deploy to production now? (y/n): ").strip().lower() + if deploy == 'y': + self.deploy_to_production() + + print("\n" + "="*70) + print(" โœ… VERCEL DOMAIN SETUP COMPLETE") + print("="*70) + print("\n๐Ÿ“ Next Steps:") + print(" 1. Configure DNS records at your domain registrar") + print(" 2. Wait for DNS propagation (5 min - 48 hours)") + print(" 3. Monitor domain status: vercel domains ls") + print(" 4. Verify SSL certificate: https://{self.domain}") + print("\n") + + return True + +def main(): + """Main setup function.""" + + # Check if custom domain provided + if len(sys.argv) > 1: + domain = sys.argv[1] + else: + domain = input("Enter your domain (default: networkbuster.net): ").strip() + if not domain: + domain = "networkbuster.net" + + setup = VercelDomainSetup(domain) + setup.run_full_setup() + +if __name__ == "__main__": + main() diff --git a/web-app/dashboard-control.html b/web-app/dashboard-control.html new file mode 100644 index 0000000..01b6461 --- /dev/null +++ b/web-app/dashboard-control.html @@ -0,0 +1,727 @@ + + + + + + NetworkBuster Master Dashboard Control + + + +
        +
        +

        ๐ŸŽ›๏ธ NetworkBuster Master Dashboard Control

        +

        Real-time connection management and system monitoring

        +
        + + +
        +
        + ๐Ÿ“Š + System Statistics +
        +
        +
        +
        Total Connections
        +
        0
        +
        +
        +
        Active
        +
        0
        +
        +
        +
        Inactive
        +
        0
        +
        +
        +
        Uptime
        +
        0s
        +
        +
        +
        Requests
        +
        0
        +
        +
        +
        + +
        + +
        +
        + ๐Ÿ”Œ + Active Connections +
        +
        + +
        +
        + + +
        +
        + โž• + Add Connection +
        +
        +
        + + +
        +
        + + +
        +
        + + +
        + +
        +
        + + +
        +
        + โšก + Quick Actions +
        +
        + + + + +
        +
        +
        + + +
        +
        + ๐ŸŒ + Network Topology +
        +
        + +
        +
        + + +
        +
        + ๐Ÿ“ + Activity Log +
        +
        + +
        +
        +
        + + + + diff --git a/web-app/navigation.js b/web-app/navigation.js index 3ed8f33..9bb0b05 100644 --- a/web-app/navigation.js +++ b/web-app/navigation.js @@ -83,6 +83,20 @@ const BUTTONS = { } }; +// Feature flag for auth links (set via build or environment) +const AUTH_ENABLED = (typeof process !== 'undefined' && process.env && process.env.AUTH_ENABLED === 'true') || false; + +// Remove auth routes/links when disabled +if (!AUTH_ENABLED) { + delete NAVIGATION.apps.authUI; + delete NAVIGATION.api.authLogin; + delete NAVIGATION.api.authSignup; + delete NAVIGATION.api.authDocs; + delete BUTTONS.primary.login; + delete BUTTONS.primary.signup; + console.log('Auth links removed from navigation (AUTH_ENABLED != "true")'); +} + // Generate full URL function getFullUrl(route, useLocal = false) { const base = useLocal ? SITE_CONFIG.localUrl : SITE_CONFIG.baseUrl; @@ -90,7 +104,7 @@ function getFullUrl(route, useLocal = false) { return `http://localhost:${route.port}${route.path}`; } return `${base}${route.path}`; -} +} // Generate navigation HTML function generateNavHTML(category = 'main') { diff --git a/web-app/recycle.html b/web-app/recycle.html new file mode 100644 index 0000000..b9e8f2f --- /dev/null +++ b/web-app/recycle.html @@ -0,0 +1,24 @@ + + + + + NetworkBuster โ€” Recycling Assistant + + + + +

        Personalized Recycling Assistant

        +

        Enter items (one per line) and get recommendations.

        + + + + + + + + +

        Recommendations

        +
        + + + diff --git a/web-app/recycle.js b/web-app/recycle.js new file mode 100644 index 0000000..8060300 --- /dev/null +++ b/web-app/recycle.js @@ -0,0 +1,33 @@ +document.addEventListener('DOMContentLoaded', () => { + const btn = document.getElementById('go'); + const itemsEl = document.getElementById('items'); + const locEl = document.getElementById('location'); + const userEl = document.getElementById('userId'); + const results = document.getElementById('results'); + + btn.addEventListener('click', async () => { + const raw = itemsEl.value.trim(); + if (!raw) return alert('Please add items'); + const items = raw.split(/\r?\n/).map(s => ({ name: s.trim() })); + const payload = { items, location: locEl.value.trim() || undefined, userId: userEl.value.trim() || undefined }; + results.innerHTML = 'Fetching recommendations...'; + try { + const r = await fetch('/api/recycle/recommend', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(payload) }); + const j = await r.json(); + if (!j.ok) { results.innerHTML = `
        Error: ${j.error || 'unknown'}
        `; return } + const list = j.recommendations || []; + results.innerHTML = ''; + for (let i = 0; i < list.length; i++) { + const li = list[i]; + const el = document.createElement('div'); + el.innerHTML = `${li.action} โ€” ${li.reason} (confidence ${Math.round((li.confidence||0)*100)}%)`; + const fbYes = document.createElement('button'); fbYes.textContent = '๐Ÿ‘ Good'; fbYes.style.marginLeft='8px'; + fbYes.onclick = async () => { await fetch('/api/recycle/feedback', {method:'POST',headers:{'Content-Type':'application/json'}, body: JSON.stringify({userId:payload.userId,item:items[i],action:li.action,rating:5})}); alert('Thanks for the feedback!') } + el.appendChild(fbYes); + results.appendChild(el); + } + } catch (err) { + results.innerHTML = `
        ${err.message}
        `; + } + }); +}); diff --git a/web-app/wifi7-mesh-overlay.html b/web-app/wifi7-mesh-overlay.html new file mode 100644 index 0000000..d454474 --- /dev/null +++ b/web-app/wifi7-mesh-overlay.html @@ -0,0 +1,719 @@ + + + + + + WiFi 7 Mesh - Encrypted Port Overlay + + + + + +
        WiFi 7 MESH NETWORK
        + +
        + +
        +
        +
        + WPA3-Enterprise +
        +
        +
        + AES-256-GCM +
        +
        +
        + 5.8 Gbps +
        +
        +
        + +
        + + + + +
        + +
        +
        ๐Ÿ”’ Encrypted Ports
        +
        + +
        +
        + +
        +
        ๐Ÿ“ก WiFi 7 Mesh Nodes
        +
        + +
        +
        +
        +
        +
        + +
        +
        +
        0
        +
        Active Ports
        +
        +
        +
        0
        +
        Encrypted
        +
        +
        +
        0 Gbps
        +
        Throughput
        +
        +
        +
        0 ms
        +
        Latency
        +
        +
        +
        ONLINE
        +
        Mesh Status
        +
        +
        + + + + diff --git a/workers/deviceConsumer.js b/workers/deviceConsumer.js new file mode 100644 index 0000000..40eba51 --- /dev/null +++ b/workers/deviceConsumer.js @@ -0,0 +1,87 @@ +import { dequeue } from '../lib/messageQueue.js'; +import fetch from 'node-fetch'; + +const TOPIC = 'device-registrations.v1'; +const INGESTION_ENDPOINT = process.env.INGESTION_ENDPOINT || 'http://localhost:3001/api/ingestion/mock'; + +import { dequeue } from '../lib/messageQueue.js'; +import { transitionStatus } from '../lib/deviceStore.js'; +import fetch from 'node-fetch'; + +const TOPIC = 'device-registrations.v1'; +const INGESTION_ENDPOINT = process.env.INGESTION_ENDPOINT || 'http://localhost:3001/api/ingestion/mock'; +const MAX_RETRIES = 3; + +async function processMessage(msg, retryCount = 0) { + const deviceId = msg.payload.deviceId; + console.log(`Processing message ${msg.id} for device ${deviceId} (attempt ${retryCount + 1})`); + + try { + // Mark as processing + transitionStatus(deviceId, 'processing', { processingStartedAt: new Date().toISOString() }); + + // Forward to ingestion endpoint + const res = await fetch(INGESTION_ENDPOINT, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(msg.payload) + }); + + const result = await res.json(); + + if (res.ok && result.status === 'acknowledged') { + // Success + transitionStatus(deviceId, 'acknowledged', { + acknowledgedAt: new Date().toISOString(), + ingestionResult: result, + processingAttempts: retryCount + 1 + }); + console.log(`โœ“ Message ${msg.id} acknowledged for ${deviceId}`); + return { success: true, result }; + } else { + // Ingestion failed + throw new Error(`Ingestion failed: ${res.status} - ${result.error || 'Unknown error'}`); + } + } catch (err) { + console.error(`โœ— Processing failed for ${deviceId}:`, err.message); + + if (retryCount < MAX_RETRIES - 1) { + // Retry after delay + const delay = Math.pow(2, retryCount) * 1000; // Exponential backoff + console.log(`Retrying ${deviceId} in ${delay}ms...`); + await new Promise(r => setTimeout(r, delay)); + return processMessage(msg, retryCount + 1); + } else { + // Max retries exceeded + transitionStatus(deviceId, 'failed', { + failedAt: new Date().toISOString(), + error: err.message, + processingAttempts: retryCount + 1 + }); + console.error(`โœ— Max retries exceeded for ${deviceId}`); + return { success: false, error: err.message }; + } + } +} + +async function runConsumer() { + console.log('Device registration consumer started. Polling for messages...'); + console.log(`Ingestion endpoint: ${INGESTION_ENDPOINT}`); + + setInterval(async () => { + try { + const msg = await dequeue(TOPIC); + if (msg) { + await processMessage(msg); + } + } catch (e) { + console.error('Consumer error:', e); + } + }, 2000); // Poll every 2 seconds +} + +if (import.meta.url === `file://${process.cwd()}/workers/deviceConsumer.js`) { + runConsumer(); +} + +export { processMessage, runConsumer }; \ No newline at end of file diff --git a/workers/ingestWorker.js b/workers/ingestWorker.js new file mode 100644 index 0000000..474447d --- /dev/null +++ b/workers/ingestWorker.js @@ -0,0 +1,46 @@ +import { dequeue } from '../lib/messageQueue.js'; +import { transitionStatus } from '../lib/deviceStore.js'; + +const TOPIC = 'device-registrations.v1'; + +async function processNext() { + const msg = await dequeue(TOPIC); + if (!msg) return false; + console.log(`Processing message ${msg.id} for device ${msg.payload.deviceId}`); + + // Simulate processing (e.g., call model ingestion endpoint) + try { + // Simulated processing delay + await new Promise(r => setTimeout(r, 500)); + + // Update status to processed/acknowledged + transitionStatus(msg.payload.deviceId, 'acknowledged', { processedAt: new Date().toISOString(), processedBy: 'ingestWorker' }); + console.log(`Message ${msg.id} processed for ${msg.payload.deviceId}`); + return true; + } catch (err) { + console.error('Processing failed for', msg.id, err); + transitionStatus(msg.payload.deviceId, 'failed', { error: String(err), failedAt: new Date().toISOString() }); + return false; + } +} + +async function runLoop() { + console.log('Ingest worker started. Polling for messages...'); + // simple polling loop + setInterval(async () => { + try { + const handled = await processNext(); + if (!handled) { + // nothing to do + } + } catch (e) { + console.error('Worker error:', e); + } + }, 1000); +} + +if (import.meta.url === `file://${process.cwd()}/workers/ingestWorker.js`) { + runLoop(); +} + +export { processNext, runLoop };