diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000..77d13fe --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,121 @@ +name: Build additional barge modules + +on: + workflow_dispatch: + push: + branches: + - 'main' + tags: + - 'v*.*.*' + pull_request: + branches: + - 'main' + +env: + DOCKERHUB_IMAGE: ${{ 'oceanprotocol/barge' }} + GHCR_IMAGE: ${{ 'ghcr.io/oceanprotocol/barge' }} + +jobs: + build: + runs-on: ubuntu-latest + # Only run when not from dependabot and when the Dockerfile for this matrix component exists + if: ${{ github.actor != 'dependabot[bot]'}} + strategy: + fail-fast: false + matrix: + # we keep this just in case we need to change + platform: ${{ github.event_name == 'pull_request' && fromJSON('["linux/amd64"]') || fromJSON('["linux/amd64"]') }} + component: + - folder: "./additional-modules/storage/" + tag: "-storage" + steps: + - name: Checkout + uses: actions/checkout@v6 + - name: Prepare + run: | + platform=${{ matrix.platform }} + echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + with: + platforms: ${{ matrix.platform }} + #image: tonistiigi/binfmt:qemu-v8.0.4 + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + platforms: ${{ matrix.platform }} + - name: Login to Docker Hub + id: dockerhub_login + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_PUSH_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKER_PUSH_TOKEN }} + if: env.DOCKERHUB_USERNAME != '' && env.DOCKERHUB_TOKEN != '' + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_PUSH_USERNAME }} + password: ${{ secrets.DOCKER_PUSH_TOKEN }} + - name: Login to GitHub Container Registry + id: ghcr_login + env: + GHCR_PUSH_TOKEN: ${{ secrets.GHCR_PUSH_TOKEN }} + if: env.GHCR_PUSH_TOKEN != '' + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GHCR_PUSH_TOKEN }} + - name: Process tag + id: process_tag + run: | + TAG="${{ matrix.component.tag }}" + PROCESSED_TAG="${TAG:1}" + echo "processed_tag=${PROCESSED_TAG}" >> $GITHUB_OUTPUT + - name: Set Docker metadata + id: ocean_node_meta + uses: docker/metadata-action@v5 + with: + images: | + ${{ env.DOCKERHUB_IMAGE }} + ${{ env.GHCR_IMAGE }} + # generate Docker tags based on the following events/attributes + # we only build main branch and PRs + tags: | + type=ref,event=pr,suffix=${{ matrix.component.tag }}, + type=raw,value=${{ steps.process_tag.outputs.processed_tag }} + + # type=semver,pattern={{major}}.{{minor}} + # type=semver,pattern={{major}} + # type=sha + - name: Prepare image tags + id: image_tags + run: | + TAGS="${{ steps.ocean_node_meta.outputs.tags }}" + echo "dockerhub<> $GITHUB_OUTPUT + echo "$TAGS" | grep "^${{ env.DOCKERHUB_IMAGE }}:" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + echo "ghcr<> $GITHUB_OUTPUT + echo "$TAGS" | grep "^${{ env.GHCR_IMAGE }}:" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + - name: Build and push to Docker Hub + if: steps.dockerhub_login.outcome == 'success' + id: build_dockerhub + uses: docker/build-push-action@v5 + with: + builder: ${{ steps.buildx.outputs.name }} + context: ${{ matrix.component.folder }} + platforms: ${{ matrix.platform }} + push: true + tags: ${{ steps.image_tags.outputs.dockerhub }} + labels: ${{ steps.ocean_node_meta.outputs.labels }} + - name: Build and push to GHCR + if: steps.ghcr_login.outcome == 'success' + id: build_ghcr + uses: docker/build-push-action@v5 + with: + builder: ${{ steps.buildx.outputs.name }} + context: ${{ matrix.component.folder }} + platforms: ${{ matrix.platform }} + push: true + tags: ${{ steps.image_tags.outputs.ghcr }} + labels: ${{ steps.ocean_node_meta.outputs.labels }} \ No newline at end of file diff --git a/additional-modules/storage/Dockerfile b/additional-modules/storage/Dockerfile new file mode 100644 index 0000000..9870c23 --- /dev/null +++ b/additional-modules/storage/Dockerfile @@ -0,0 +1,48 @@ +FROM ubuntu:22.04 AS ceph +ENV DEBIAN_FRONTEND=noninteractive +ENV TZ=Etc/UTC + +RUN apt -y update && apt -y install \ + apache2 \ + vsftpd \ + lsb-release \ + wget \ + curl \ + pgp \ + tzdata \ + vim \ + dnsutils \ + iputils-ping \ + iproute2 \ + jq +# ---------------------------------------------------- +# Apache config +# ---------------------------------------------------- +RUN a2enmod rewrite +# ---------------------------------------------------- +# FTP config +# ---------------------------------------------------- +RUN sed -i 's/anonymous_enable=NO/anonymous_enable=YES/' /etc/vsftpd.conf && \ + sed -i 's/#write_enable=YES/write_enable=YES/' /etc/vsftpd.conf +RUN wget \ + -q \ + -O- https://download.ceph.com/keys/release.asc | \ + gpg --dearmor > /etc/apt/trusted.gpg.d/ceph.gpg && \ + echo "deb https://download.ceph.com/debian-reef/ $(lsb_release -sc) main" \ + > /etc/apt/sources.list.d/ceph.list && \ + apt -y update && \ + apt install -y ceph radosgw +RUN apt clean && \ + apt autoremove -y && \ + rm -rf /var/lib/{apt,dpkg,cache,log}/ + +FROM ceph AS radosgw +ENV TZ=Etc/UTC +ENV MGR_USERNAME="admin" +ENV MAIN="none" +# ACCESS_KEY, SECRET_KEY, MGR_PASSWORD: pass at runtime (e.g. compose environment) to avoid embedding in image + +EXPOSE 7480 + +COPY ./entrypoint.sh /entrypoint +ENTRYPOINT ["/entrypoint"] \ No newline at end of file diff --git a/additional-modules/storage/entrypoint.sh b/additional-modules/storage/entrypoint.sh new file mode 100755 index 0000000..a7258ae --- /dev/null +++ b/additional-modules/storage/entrypoint.sh @@ -0,0 +1,310 @@ +#!/usr/bin/env bash + +set -eux +set -o pipefail + +# Defaults when not set at runtime (avoid ENV in Dockerfile for secrets) +ACCESS_KEY="${ACCESS_KEY:-ocean123}" +SECRET_KEY="${SECRET_KEY:-ocean123secret}" +MGR_PASSWORD="${MGR_PASSWORD:-admin}" + +# In Docker, hostname -d is often empty; use defaults so MAIN=none single-node works +ZONE="$(hostname -s | grep -oP '^[a-z]+[0-9]+' || echo 'a')" +ZONE_GROUP="$(hostname -d | grep -oP '^[a-z0-9]+' || echo 'default')" +REALM="$(hostname -d | grep -oP '^[a-z0-9]+' || echo 'default')" +DOMAIN="$(hostname -d || echo "$(hostname -s).local")" + +## +# create ceph.conf +## +echo "create ceph.conf" + +cat <<- EOF > /etc/ceph/ceph.conf +[global] +fsid = $(uuidgen) +mon_host = $(hostname -i) +auth_allow_insecure_global_id_reclaim = false +mon_warn_on_pool_no_redundancy = false +mon_osd_down_out_interval = 60 +mon_osd_report_timeout = 300 +mon_osd_down_out_subtree_limit = host +mon_osd_reporter_subtree_level = rack +osd_scrub_auto_repair = true +osd_pool_default_size = 1 +osd_pool_default_min_size = 1 +osd_pool_default_pg_num = 1 +osd_crush_chooseleaf_type = 0 +osd_objectstore = memstore +EOF + +## +# create mon +## +echo "create ceph mon" + +ceph-authtool \ + --create-keyring /tmp/ceph.mon.keyring \ + --gen-key -n mon. \ + --cap mon 'allow *' +ceph-authtool \ + --create-keyring /etc/ceph/ceph.client.admin.keyring \ + --gen-key -n client.admin \ + --cap mon 'allow *' \ + --cap osd 'allow *' \ + --cap mds 'allow *' \ + --cap mgr 'allow *' +ceph-authtool /tmp/ceph.mon.keyring \ + --import-keyring /etc/ceph/ceph.client.admin.keyring + +monmaptool \ + --create \ + --add "$(hostname -s)" "$(hostname -i)" \ + --fsid "$(grep -oP '(?<=^fsid = )[0-9a-z-]*' /etc/ceph/ceph.conf)" \ + --set-min-mon-release pacific \ + --enable-all-features \ + --clobber \ + /tmp/monmap + +mkdir -p "/var/lib/ceph/mon/ceph-$(hostname -s)" +rm -rf "/var/lib/ceph/mon/ceph-$(hostname -s)/*" +ceph-mon --mkfs -i "$(hostname -s)" --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring +chown -R ceph:ceph /var/lib/ceph/mon/ +ceph-mon --cluster ceph --id "$(hostname -s)" --setuser ceph --setgroup ceph +ceph config set global auth_allow_insecure_global_id_reclaim false + +## +# create mgr +## +echo "create ceph mgr" + +mkdir -p "/var/lib/ceph/mgr/ceph-$(hostname -s)" +ceph auth get-or-create "mgr.$(hostname -s)" mon 'allow profile mgr' osd 'allow *' mds 'allow *' \ + > "/var/lib/ceph/mgr/ceph-$(hostname -s)/keyring" +chown -R ceph:ceph /var/lib/ceph/mgr/ +ceph-mgr --cluster ceph --id "$(hostname -s)" --setuser ceph --setgroup ceph + +## +# crate osd +## +OSD=$(ceph osd create) +echo "create ceph osd.${OSD}" + +mkdir -p "/osd/osd.${OSD}/data" +ceph auth get-or-create "osd.${OSD}" mon 'allow profile osd' mgr 'allow profile osd' osd 'allow *' \ + > "/osd/osd.${OSD}/data/keyring" +ceph-osd -i "${OSD}" --mkfs --osd-data "/osd/osd.${OSD}/data" +chown -R ceph:ceph "/osd/osd.${OSD}/data" +ceph-osd -i "${OSD}" --osd-data "/osd/osd.${OSD}/data" --keyring "/osd/osd.${OSD}/data/keyring" + +## +# create rgw +## +echo "create ceph rgw" + +mkdir -p "/var/lib/ceph/radosgw/ceph-rgw.$(hostname -s)" +ceph auth get-or-create "client.rgw.$(hostname -s)" osd 'allow rwx' mon 'allow rw' \ + -o "/var/lib/ceph/radosgw/ceph-rgw.$(hostname -s)/keyring" +touch "/var/lib/ceph/radosgw/ceph-rgw.$(hostname -s)/done" +chown -R ceph:ceph /var/lib/ceph/radosgw + +if [ "${MAIN}" == "none" ]; then + echo "create admin-user" + radosgw-admin user create \ + --uid=".admin" \ + --display-name="admin" \ + --system \ + --key-type="s3" \ + --access-key="${ACCESS_KEY}" \ + --secret-key="${SECRET_KEY}" + + ceph config set global rgw_enable_usage_log true + ceph config set global rgw_dns_name "$(hostname -s)" + + radosgw --cluster ceph --rgw-zone "default" --name "client.rgw.$(hostname -s)" --setuser ceph --setgroup ceph +fi + +if [ "${MAIN}" == "yes" ]; then + echo "create realm ${REALM}" + radosgw-admin realm create \ + --rgw-realm="${REALM}" \ + --default + + echo "create zonegroup ${ZONE_GROUP}" + radosgw-admin zonegroup create \ + --rgw-realm="${REALM}" \ + --rgw-zonegroup="${ZONE_GROUP}" \ + --endpoints="http://${DOMAIN}:7480" \ + --master \ + --default + + radosgw-admin zonegroup get --rgw-zonegroup="${ZONE_GROUP}" | \ + jq \ + --arg domain "${DOMAIN}" \ + --arg zone1 "dev1-${DOMAIN}" \ + --arg zone2 "dev2-${DOMAIN}" \ + '.hostnames |= [$domain, $zone1, $zone2]' | \ + radosgw-admin zonegroup set --rgw-zonegroup="${ZONE_GROUP}" -i - + + echo "create zone ${ZONE}" + radosgw-admin zone create \ + --rgw-zonegroup="${ZONE_GROUP}" \ + --rgw-zone="${ZONE}" \ + --endpoints="http://${ZONE}-${DOMAIN}:7480" \ + --master \ + --default + + echo "create placement PREMIUM" + radosgw-admin zonegroup placement add \ + --rgw-zonegroup="${ZONE_GROUP}" \ + --placement-id="default-placement" \ + --storage-class="PREMIUM" + + echo "create placement ARCHIVE" + radosgw-admin zonegroup placement add \ + --rgw-zonegroup="${ZONE_GROUP}" \ + --placement-id="default-placement" \ + --storage-class="ARCHIVE" + + echo "create synchronization-user" + radosgw-admin user create \ + --uid=".synchronization" \ + --display-name="synchronization-user" \ + --system \ + --key-type="s3" \ + --access-key="${ACCESS_KEY}" \ + --secret-key="${SECRET_KEY}" + + echo "add synchronization-user to zone ${ZONE}" + radosgw-admin zone modify \ + --rgw-zone="${ZONE}" \ + --access-key="${ACCESS_KEY}" \ + --secret-key="${SECRET_KEY}" + + ## + # disable the defaut sync of buckets between zones, + # but allow specific ones to replicate + ## + radosgw-admin sync group create \ + --group-id=group-main \ + --status=allowed + radosgw-admin sync group flow create \ + --group-id=group-main \ + --flow-id=flow-main \ + --flow-type=symmetrical \ + --zones=dev1,dev2 + radosgw-admin sync group pipe create \ + --group-id=group-main \ + --pipe-id=pipe-main \ + --source-zones='*' \ + --source-bucket='*' \ + --dest-zones='*' \ + --dest-bucket='*' + + ## + # enable mirroring for a specific bucket between zones + ## + # radosgw-admin sync group create \ + # --bucket=test1 \ + # --group-id=group-test1 \ + # --status=enabled + # radosgw-admin sync group pipe create \ + # --bucket=test1 \ + # --group-id=group-test1 \ + # --pipe-id=pipe-test1 \ + # --source-zones='*' \ + # --source-bucket='*' \ + # --dest-zones='*' \ + # --dest-bucket='*' + + echo "create objstorage-admin user" + radosgw-admin user create \ + --uid=".objstorage-admin" \ + --display-name=".objstorage-admin" \ + --system \ + --admin + + radosgw-admin period update \ + --commit +fi + +if [ "${MAIN}" == "no" ]; then + echo "get realm http://${DOMAIN}:7480" + while ! radosgw-admin realm pull \ + --url="http://${DOMAIN}:7480" \ + --access-key="${ACCESS_KEY}" \ + --secret="${SECRET_KEY}"; do sleep 0.5; done + + echo "set default realm to ${ZONE_GROUP}" + radosgw-admin realm default \ + --rgw-realm="${ZONE_GROUP}" + + echo "create zone ${ZONE}" + radosgw-admin zone create \ + --rgw-zonegroup="${ZONE_GROUP}" \ + --rgw-zone="${ZONE}" \ + --access-key="${ACCESS_KEY}" \ + --secret-key="${SECRET_KEY}" \ + --endpoints="http://${ZONE}-${DOMAIN}:7480" \ + --default +fi + +if [ "${MAIN}" == "yes" ] || [ "${MAIN}" == "no" ]; then + echo "create placement PREMIUM for ${ZONE}" + radosgw-admin zone placement add \ + --rgw-zone="${ZONE}" \ + --placement-id="default-placement" \ + --storage-class="PREMIUM" \ + --data-pool "${ZONE}.rgw.buckets.premium.data" + + echo "create placement STANDARD for ${ZONE}" + radosgw-admin zone placement add \ + --rgw-zone="${ZONE}" \ + --placement-id="default-placement" \ + --storage-class="STANDARD" \ + --data-pool "${ZONE}.rgw.buckets.standard.data" + + echo "create placement ARCIVE for ${ZONE}" + radosgw-admin zone placement add \ + --rgw-zone="${ZONE}" \ + --placement-id="default-placement" \ + --storage-class="ARCHIVE" \ + --data-pool "${ZONE}.rgw.buckets.archive.data" \ + --compression lz4 + + radosgw-admin period update --commit + + ceph config set global rgw_enable_usage_log true + radosgw --cluster ceph --rgw-zone "${ZONE}" --name "client.rgw.$(hostname -s)" --setuser ceph --setgroup ceph +fi + +# Configure Cluster +ceph mgr module enable dashboard --force +ceph mgr module enable prometheus --force +ceph mgr module enable diskprediction_local --force +ceph mgr module enable stats --force +ceph mgr module disable nfs +ceph config set mgr mgr/dashboard/ssl false --force +ceph dashboard feature disable rbd cephfs nfs iscsi mirroring +echo "${MGR_PASSWORD}" | ceph dashboard ac-user-create "${MGR_USERNAME}" -i - administrator --force-password +echo "${ACCESS_KEY}" | ceph dashboard set-rgw-api-access-key -i - +echo "${SECRET_KEY}" | ceph dashboard set-rgw-api-secret-key -i - +ceph dashboard set-rgw-api-ssl-verify False + +# Test API +curl -X 'POST' \ + 'http://127.0.0.1:8080/api/auth' \ + -H 'accept: application/vnd.ceph.api.v1.0+json' \ + -H 'Content-Type: application/json' \ + -d "{ + \"username\": \"${MGR_USERNAME}\", + \"password\": \"${MGR_PASSWORD}\" +}" + +## +# log output in forground +## +while ! tail -F /var/log/ceph/ceph* ; do + sleep 0.1 +done + +echo "Container terminated ..." \ No newline at end of file diff --git a/cleanup.sh b/cleanup.sh index bc68647..4719ec2 100755 --- a/cleanup.sh +++ b/cleanup.sh @@ -8,6 +8,7 @@ docker container stop ocean-ganache-1 docker container stop ocean-faucet-1 docker container stop ocean-dashboard-1 docker container stop docker-registry +docker container stop ocean-storage-1 docker container rm ocean-node-1 docker container rm ocean-ipfs-1 @@ -18,6 +19,7 @@ docker container rm ocean-ganache-1 docker container rm ocean-faucet-1 docker container rm ocean-dashboard-1 docker container rm docker-registry +docker container rm ocean-storage-1 docker volume rm ocean-graphipfs docker volume rm ocean-graphpgsql @@ -25,4 +27,3 @@ docker volume rm ocean-provider1db docker volume rm ocean-provider2db docker network rm ocean_backend -docker volume rm $(docker volume ls -q) diff --git a/compose-files/storage.yml b/compose-files/storage.yml new file mode 100644 index 0000000..597578f --- /dev/null +++ b/compose-files/storage.yml @@ -0,0 +1,22 @@ +version: '3' +services: + storage: + image: ghcr.io/oceanprotocol/barge:storage + ports: + - "80:80" + - "443:443" + - "20-21:20-21" + - "7480:7480" # Ceph RGW (S3 + Swift) + - "8080:8080" # Ceph RGW (S3 + Swift) + environment: + MAIN: "none" + ACCESS_KEY: "ocean123" + SECRET_KEY: "ocean123secret" + MGR_USERNAME: "admin" + MGR_PASSWORD: "admin" + networks: + ocean_backend: + ipv4_address: 172.15.0.7 + volumes: + - ${OCEAN_WWW_FOLDER}:/var/www/html + - ${OCEAN_FTP_FOLDER}:/srv/ftp diff --git a/start_ocean.sh b/start_ocean.sh index 01b96a0..5973f1d 100755 --- a/start_ocean.sh +++ b/start_ocean.sh @@ -73,6 +73,13 @@ export OCEAN_CERTS_FOLDER="${OCEAN_HOME}/ocean-certs/" mkdir -p ${OCEAN_CERTS_FOLDER} # copy certs cp -r ./certs/* ${OCEAN_CERTS_FOLDER} + +#www folder +export OCEAN_WWW_FOLDER="${OCEAN_HOME}/storage-www/" +mkdir -p ${OCEAN_WWW_FOLDER} +#ftp folder +export OCEAN_FTP_FOLDER="${OCEAN_HOME}/storage-ftp/" +mkdir -p ${OCEAN_FTP_FOLDER} # Specify which ethereum client to run or connect to: development export CONTRACTS_NETWORK_NAME="development" @@ -161,6 +168,7 @@ COMPOSE_FILES+=" -f ${COMPOSE_DIR}/ipfs.yml" COMPOSE_FILES+=" -f ${COMPOSE_DIR}/ganache.yml" COMPOSE_FILES+=" -f ${COMPOSE_DIR}/ocean_contracts.yml" COMPOSE_FILES+=" -f ${COMPOSE_DIR}/node.yml" +COMPOSE_FILES+=" -f ${COMPOSE_DIR}/storage.yml" DOCKER_COMPOSE_EXTRA_OPTS="${DOCKER_COMPOSE_EXTRA_OPTS:-}" @@ -202,6 +210,10 @@ while :; do COMPOSE_FILES="${COMPOSE_FILES/ -f ${COMPOSE_DIR}\/ipfs.yml/}" printf $COLOR_Y'Starting without IPFS...\n\n'$COLOR_RESET ;; + --no-storage) + COMPOSE_FILES="${COMPOSE_FILES/ -f ${COMPOSE_DIR}\/storage.yml/}" + printf $COLOR_Y'Starting without Storage...\n\n'$COLOR_RESET + ;; --no-elasticsearch) COMPOSE_FILES="${COMPOSE_FILES/ -f ${COMPOSE_DIR}\/elasticsearch.yml/}" printf $COLOR_Y'Starting without Elastic search...\n\n'$COLOR_RESET