diff --git a/.github/workflows/deploy-to-gcp.yml b/.github/workflows/deploy-to-gcp.yml new file mode 100644 index 00000000..13f9d838 --- /dev/null +++ b/.github/workflows/deploy-to-gcp.yml @@ -0,0 +1,57 @@ +name: Deploy to GCP Compute VM + +on: + push: + branches: + - pre-production + - production + +jobs: + deploy: + name: Deploy to GCP VM + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set environment + run: | + if [[ "${GITHUB_REF_NAME}" == "production" ]]; then + echo "SSH_HOST=${{ secrets.GCP_PROD_SSH_HOST }}" >> "$GITHUB_ENV" + echo "COMPOSE_FILE=docker-compose.production.yml" >> "$GITHUB_ENV" + else + echo "SSH_HOST=${{ secrets.GCP_PREPROD_SSH_HOST }}" >> "$GITHUB_ENV" + echo "COMPOSE_FILE=docker-compose.development.yml" >> "$GITHUB_ENV" + fi + + - name: Show CI key fingerprint + shell: bash + env: + KEY: ${{ github.ref_name == 'production' && secrets.GCP_PROD_SSH_PRIVATE_KEY || secrets.GCP_PREPROD_SSH_PRIVATE_KEY }} + run: | + set -euo pipefail + printf '%s\n' "$KEY" > key.pem + chmod 600 key.pem + echo -n "CI key fingerprint: " + ssh-keygen -yf key.pem | ssh-keygen -lf - + + # Use a single SSH step; choose the key by branch + - name: Deploy to VM via SSH + uses: appleboy/ssh-action@v1.0.3 + with: + host: ${{ env.SSH_HOST }} + username: ${{ secrets.GCP_SSH_USER }} + key: ${{ github.ref_name == 'production' && secrets.GCP_PROD_SSH_PRIVATE_KEY || secrets.GCP_PREPROD_SSH_PRIVATE_KEY }} + script: | + set -e + cd ${{ secrets.DEPLOYMENT_PATH }} + + git fetch origin --prune + git checkout "${{ github.ref_name }}" + git pull --ff-only origin "${{ github.ref_name }}" + + docker compose -f "${{ env.COMPOSE_FILE }}" down + docker compose -f "${{ env.COMPOSE_FILE }}" up -d --build + + docker image prune -f diff --git a/.github/workflows/pull_requests.yml b/.github/workflows/pull_requests.yml new file mode 100644 index 00000000..825d3309 --- /dev/null +++ b/.github/workflows/pull_requests.yml @@ -0,0 +1,35 @@ +name: PRs Docker Build Test + +on: + pull_request: + +jobs: + docker-build: + name: Test Production Docker Builds + runs-on: ubuntu-latest + + services: + docker: + image: docker:24.0.5 + options: --privileged + ports: + - 5173:80 + - 8000:8000 + - 8080:8080 + + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # Build Frontend Production Image + - name: Build Frontend Docker Image + run: | + docker build -t frontend-prod -f frontend/Dockerfile ./frontend + + # Build Backend Production Image + - name: Build API Docker Image + run: | + docker build -t api-prod -f api/Dockerfile ./api diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..ea05d4e8 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,63 @@ +# Changelog + +All notable changes to **Meter Manager** will be documented in this file. + +| Version | Changes | +|-----------|---------| +| v0.2.0 | Parts-used report functional with PDF download | +| v0.1.52 | Deploy chlorides for admin testing | +| v0.1.51.1 | Increased frontend signout to 300 minutes | +| v0.1.51 | Improved monitoring well page | +| v0.1.50 | Fixed wells map bug and update register if part used | +| v0.1.49 | Added outside recorder wells to monitoring page | +| v0.1.48 | Changed well owner to be meter water users | +| v0.1.47 | Add TRSS grids to meter map and fixed meter register save bug | +| v0.1.46 | Change how data is displayed in Wells table | +| v0.1.45 | Color code meter markers on map by last PM | +| v0.1.44 | Fix bug in continuous monitoring well data and added data to OSE endpoint | +| v0.1.43 | Fix navigation from work orders to activity, add OSE endpoint for "data issues" | +| v0.1.42 | Fix pagination, add 'uninstall and hold' | +| v0.1.41 | Add UI for water source on wells and some other minor changes | +| v0.1.40 | Add register to UI on meter details | +| v0.1.39 | Default share ose when workorder, OSE access to register information | +| v0.1.38 | Change logout time to 8 hours, show work order count in navigation | +| v0.1.37.1 | Fix various work order bugs | +| v0.1.37 | Update OSE API to include ose_request_id and new endpoint | +| v0.1.36 | Improved work orders, testing still needed | +| v0.1.35.1 | Fix bug with well search failing on certain inputs | +| v0.1.35 | Update continuous data stream IDs for monitoring wells | +| v0.1.34 | Work orders ready for alpha testing, reordered monitoring wells | +| v0.1.33 | Add Meter Status Filter to Meters Table | +| v0.1.32 | Fix Monitoring Wells so that table updates after change | +| v0.1.31 | Added note "verified register ratio" and made it appear by default | +| v0.1.30 | Admin can edit monitoring well data (note that monitoring well table still not updating automatically) | +| v0.1.29 | Fixed bug preventing meter type change | +| v0.1.28 | Full admin UI on meter page | +| v0.1.27 | Give admin ability to add out of order activities, fix zoom on map, other minor changes | +| v0.1.26 | Add functional merge button for admin | +| v0.1.25 | Fix datesort on meter history, give techs limited well management | +| v0.1.24 | Add non-functional merge button for initial testing | +| v0.1.23 | Prevent duplicate activities from being added | +| v0.1.22 | Change ownership so there is now water_users and meter_owner | +| v0.1.21 | Implement Degrees Minutes Seconds (DMS) for lat/long | +| v0.1.20 | Fix monitoring wells sort | +| v0.1.19 | Updated OSE endpoint to have activity_id, reorganized data returned | +| v0.1.18 | Only require well on install activity, display OSE tag | +| v0.1.17 | Restructure security code to prevent database connection problems | +| v0.1.16 | Fixed bug where status is changed when clearing well from meter | +| v0.1.15 | Updated backend to use SQLAlchemy 2 (resolve connection issue?) | +| v0.1.14 | Display RA number instead of well name, well distance is now observation, new default observations | +| v0.1.13 | Add checkbox for sharing activities with OSE | +| v0.1.12 | Change lat/long to DMS, reorder observation inputs, block out of order activities | +| v0.1.11 | Remove all async code to see if it fixes deadlock issue | +| v0.1.10 | Fix owners and osetag on Wells page | +| v0.1.9 | Add owners to Meters table, fix various bugs | +| v0.1.8 | Fix bug in meter selection autocomplete | +| v0.1.7 | Fixed bugs in Add Meter | +| v0.1.6 | Various fixes and meter search via map UI | +| v0.1.5 | Various minor bug fixes | +| v0.1.4 | Updated "current installation" section of activities to match Meters page | +| v0.1.3 | Added user admin, improved appearance, fixed OSE endpoint scope | +| v0.1.2 | Added an initial parts inventory and minor meter installation UI improvements | +| v0.1.1 | Initial version with new clean database | +| v0.0.0 | Initial minimum viable product | diff --git a/LICENSE.md b/LICENSE similarity index 100% rename from LICENSE.md rename to LICENSE diff --git a/README.md b/README.md index 9c13e614..35bacfb3 100644 --- a/README.md +++ b/README.md @@ -1,80 +1,31 @@ -# WaterManagerDB +# Meter Manager -[](https://github.com/NMWDI/WaterManagerDB/actions/workflows/testing.yml) -[](https://github.com/NMWDI/WaterManagerDB/actions/workflows/format_code.yml) +### Purpose -## Versions -- V0.1.52 - Deploy chlorides for admin testing -- V0.1.51.1 - Increased frontend signout to 300 minutes -- V0.1.51 - Improved monitoring well page -- V0.1.50 - Fixed wells map bug and update register if part used -- V0.1.49 - Added outside recorder wells to monitoring page -- V0.1.48 - Changed well owner to be meter water users -- V0.1.47 - Add TRSS grids to meter map and fixed meter register save bug -- V0.1.46 - Change how data is displayed in Wells table -- V0.1.45 - Color code meter markers on map by last PM -- V0.1.44 - Fix bug in continuous monitoring well data and added data to OSE endpoint -- V0.1.43 - Fix navigation from work orders to activity, add OSE endpoint for "data issues" -- V0.1.42 - Fix pagination, add 'uninstall and hold' -- V0.1.41 - Add UI for water source on wells and some other minor changes -- V0.1.40 - Add register to UI on meter details -- V0.1.39 - Default share ose when workorder, OSE access to register information -- V0.1.38 - Change logout time to 8 hours, show work order count in navigation -- V0.1.37.1 - Fix various work order bugs -- V0.1.37 - Update OSE API to include ose_request_id and new endpoint -- V0.1.36 - Improved work orders, testing still needed -- V0.1.35.1 - Fix bug with well search failing on certain inputs -- V0.1.35 - Update continuous data stream IDs for monitoring wells -- V0.1.34 - Work orders ready for alpha testing, reordered monitoring wells -- V0.1.33 - Add Meter Status Filter to Meters Table -- V0.1.32 - Fix Monitoring Wells so that table updates after change -- V0.1.31 - Added note "verified register ratio" and made it appear by default -- V0.1.30 - Admin can edit monitoring well data (note that monitoring well table still not updating automatically) -- V0.1.29 - Fixed bug preventing meter type change -- V0.1.28 - Full admin UI on meter page -- V0.1.27 - Give admin ability to add out of order activities, fix zoom on map, other minor changes -- V0.1.26 - Add functional merge button for admin -- V0.1.25 - Fix datesort on meter history, give techs limited well management -- V0.1.24 - Add non-functional merge button for initial testing -- V0.1.23 - Prevent duplicate activities from being added -- V0.1.22 - Change ownership so there is now water_users and meter_owner -- V0.1.21 - Implement Degrees Minutes Seconds (DMS) for lat/long -- V0.1.20 - Fix monitoring wells sort -- V0.1.19 - Updated OSE endpoint to have activity_id, reorganized data returned -- V0.1.18 - Only require well on install activity, display OSE tag -- V0.1.17 - Restructure security code to prevent database connection problems -- V0.1.16 - Fixed bug where status is changed when clearing well from meter -- V0.1.15 - Updated backend to use SQLAlchemy 2 (resolve connection issue?) -- V0.1.14 - Display RA number instead of well name, well distance is now observation, new default observations -- V0.1.13 - Add checkbox for sharing activities with OSE. -- V0.1.12 - Change lat/long to DMS, reorder observation inputs, block out of order activities -- V0.1.11 - Remove all async code to see if it fixes deadlock issue -- V0.1.10 - Fix owners and osetag on Wells page -- V0.1.9 - Add owners to Meters table, fix various bugs -- V0.1.8 - Fix bug in meter selection autocomplete -- V0.1.7 - Fixed bugs in Add Meter -- V0.1.6 - Various fixes and meter search via map UI -- V0.1.5 - Various minor bug fixes -- V0.1.4 - Updated "current installation" section of activities to match Meters page -- V0.1.3 - Added user admin, improved appearance, fixed OSE endpoint scope. -- V0.1.2 - Added an initial parts inventory and minor meter installation UI improvements -- V0.1.1 - Initial version with new clean database -- V0.0 - Initial minimum viable product +**Meter Manager** is a web application designed to help **PVACD** manage its water data. It provides tools for spatial visualization, maintenance tracking, and regulatory reporting. -## Purpose -This web app facilitates reporting of water management operations to other organizations. The initial goal is to help water conservation districts communicate with local and state governments. However, the interface may eventually be developed to be more general. +--- -## Installation -The app is built with the following components: -* PostgreSQL database with PostGIS extension -* Python FastAPI backend for interfacing with database -* React based frontend +### Features -App components are organized into Docker containers, but it can also be run locally. +- πΊοΈ Interactive map UI for meters and wells +- π§ Meter activities, maintenance history, and preventive maintenance (PM) tracking +- π¦ Inventory and part usage tracking +- π Work order and technician assignment system +- π OSE-compliant reporting endpoint +- π οΈ Admin features for editing, merging, and managing records +- π₯ Role-based access control (techs, admins, etc.) +- π°οΈ TRSS grid overlays for spatial reference +- π§ Continuous monitoring support for observation wells -To run the app, clone the repository and use Docker Compose to run: -``` -/watermanagerdb >> docker compose -f docker-compose.dev.yml --build -``` +--- -The API component will need several environmental variables that should be specified in the file 'api/.env'. See api/.env_example for an example. The PostgreSQL environmental variables should match the database settings. +### Tech Stack + +| Layer | Technology | +|---------------|----------------------| +| **Frontend** | React + TypeScript | +| **Backend** | FastAPI (Python) | +| **Database** | PostgreSQL + PostGIS | +| **Container** | Docker Compose | +| **CI/CD** | GitHub Actions | diff --git a/api/Dockerfile b/api/Dockerfile index cacf5106..9c3e543e 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -1,17 +1,31 @@ -# -FROM python:3.10 +FROM python:3.12.11 -# -WORKDIR . +WORKDIR /app -ENV PYTHONDONTWRITEBYTECODE 1 -ENV PYTHONUNBUFFERED 1 +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 -# +# Install system dependencies (trixie) + JDK (default is 21) + PostgreSQL client +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + libpango-1.0-0 \ + libcairo2 \ + libgdk-pixbuf-2.0-0 \ + libffi-dev \ + default-jdk-headless \ + postgresql-client \ + && rm -rf /var/lib/apt/lists/* + +# Make Java headers discoverable by builds +ENV JAVA_HOME=/usr/lib/jvm/java-21-openjdk-amd64 +ENV JDK_HOME=$JAVA_HOME +ENV PATH="${JAVA_HOME}/bin:${PATH}" +ENV PYTHONPATH=/app + +# Now install the rest of the requirements COPY ./requirements.txt . +RUN pip install --no-cache-dir --upgrade pip setuptools wheel \ + && pip install --no-cache-dir -r requirements.txt -# -RUN pip install --no-cache-dir --upgrade -r requirements.txt +COPY . /app/api -# -COPY . . \ No newline at end of file diff --git a/api/Dockerfile_dev b/api/Dockerfile_dev deleted file mode 100644 index 6408ed13..00000000 --- a/api/Dockerfile_dev +++ /dev/null @@ -1,19 +0,0 @@ -## -#FROM python:3.9 -# -## -#WORKDIR . -# -#ENV PYTHONDONTWRITEBYTECODE 1 -#ENV PYTHONUNBUFFERED 1 -# -#RUN apt update -#RUN apt install libsqlite3-mod-spatialite -y -## -#COPY ./requirements.txt . -# -## -#RUN pip install --no-cache-dir --upgrade -r requirements.txt -# -## dont need to copy because volume mapped. -##COPY . . \ No newline at end of file diff --git a/api/main.py b/api/main.py index 8066b3d9..99ecf150 100644 --- a/api/main.py +++ b/api/main.py @@ -23,12 +23,14 @@ from api.schemas import security_schemas from api.models.main_models import Users -from api.routes.meters import meter_router -from api.routes.well_measurements import well_measurement_router from api.routes.activities import activity_router +from api.routes.admin import admin_router +from api.routes.chlorides import chlorides_router +from api.routes.maintenance import maintenance_router +from api.routes.meters import meter_router from api.routes.OSE import ose_router from api.routes.parts import part_router -from api.routes.admin import admin_router +from api.routes.well_measurements import well_measurement_router from api.routes.wells import well_router from api.security import ( @@ -126,11 +128,13 @@ def login_for_access_token( # ======================================= -authenticated_router.include_router(meter_router) authenticated_router.include_router(activity_router) -authenticated_router.include_router(well_measurement_router) -authenticated_router.include_router(part_router) authenticated_router.include_router(admin_router) +authenticated_router.include_router(chlorides_router) +authenticated_router.include_router(maintenance_router) +authenticated_router.include_router(meter_router) +authenticated_router.include_router(part_router) +authenticated_router.include_router(well_measurement_router) authenticated_router.include_router(well_router) add_pagination(app) diff --git a/api/models/main_models.py b/api/models/main_models.py index 5fc7a208..fcd56376 100644 --- a/api/models/main_models.py +++ b/api/models/main_models.py @@ -69,6 +69,7 @@ class Parts(Base): note: Mapped[Optional[str]] in_use: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True) commonly_used: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False) + price: Mapped[Optional[float]] = mapped_column(Float) part_type_id: Mapped[int] = mapped_column( Integer, ForeignKey("PartTypeLU.id"), nullable=False @@ -580,6 +581,7 @@ class meterRegisters(Base): __tablename__ = "meter_registers" brand: Mapped[str] = mapped_column(String, nullable=False) meter_size: Mapped[float] = mapped_column(Float, nullable=False) + part_id: Mapped[int] = mapped_column(Integer, ForeignKey("Parts.id")) ratio: Mapped[str] = mapped_column(String) dial_units_id: Mapped[int] = mapped_column(Integer, ForeignKey("Units.id"), nullable=False) totalizer_units_id: Mapped[int] = mapped_column(Integer, ForeignKey("Units.id"), nullable=False) diff --git a/api/requirements.txt b/api/requirements.txt index a94165bf..4e25f042 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -1,40 +1,72 @@ -annotated-types==0.6.0 -anyio==3.7.1 -bcrypt==4.0.1 -cffi==1.16.0 -click==8.1.7 -colorama==0.4.6 -cryptography==41.0.7 -dnspython==2.4.2 -ecdsa==0.18.0 -email-validator==2.1.0.post1 -exceptiongroup==1.2.0 -fastapi==0.105.0 -fastapi-pagination==0.12.14 -GeoAlchemy2==0.14.2 -greenlet==3.0.2 -h11==0.14.0 -httptools==0.6.1 -idna==3.6 -packaging==23.2 +Cython>=3.1 +pyjnius>=1.6.1 + +annotated-types==0.7.0 +anyio==4.9.0 +attr==0.3.2 +bcrypt==4.3.0 +brotli==1.1.0 +cffi==1.17.1 +click==8.2.1 +ConfigParser==7.2.0 +contourpy==1.3.2 +cryptography==45.0.4 +cssselect2==0.8.0 +cycler==0.12.1 +docutils==0.21.2 +dotenv==0.9.9 +ecdsa==0.19.1 +fastapi==0.116.1 +fastapi-pagination==0.13.3 +filelock==3.18.0 +fonttools==4.58.5 +GeoAlchemy2==0.17.1 +h11==0.16.0 +HTMLParser==0.0.2 +idna==3.10 +ipython==8.12.3 +ipywidgets==8.1.7 +Jinja2==3.1.6 +keyring==25.6.0 +kiwisolver==1.4.8 +MarkupSafe==3.0.2 +matplotlib==3.10.3 +numpy==2.3.1 +packaging==25.0 passlib==1.7.4 -psycopg==3.1.16 -psycopg-binary==3.1.16 -pyasn1==0.5.1 -pycparser==2.21 -pydantic==2.5.2 -pydantic_core==2.14.5 -python-dotenv==1.0.0 -python-jose==3.3.0 -python-multipart==0.0.6 -PyYAML==6.0.1 -rsa==4.9 -six==1.16.0 -sniffio==1.3.0 -SQLAlchemy==2.0.23 -starlette==0.27.0 -typing_extensions==4.9.0 -tzdata==2023.3 -uvicorn==0.25.0 -watchfiles==0.21.0 -websockets==12.0 +pillow==11.3.0 +protobuf==6.31.1 +psycopg==3.2.9 +pyasn1==0.6.1 +pycparser==2.22 +pydantic==2.11.7 +pydantic_core==2.33.2 +pydyf==0.11.0 +pyOpenSSL==25.1.0 +pyparsing==3.2.3 +pyphen==0.17.2 +pytest==8.4.1 +python-dateutil==2.9.0.post0 +python-dotenv==1.1.1 +python-jose==3.5.0 +python-multipart==0.0.20 +redis==6.2.0 +rsa==4.9.1 +ruff==0.12.3 +six==1.17.0 +sniffio==1.3.1 +Sphinx==8.2.3 +SQLAlchemy==2.0.41 +starlette==0.47.1 +thread==2.0.5 +tinycss2==1.4.0 +tinyhtml5==2.0.0 +typing-inspection==0.4.1 +typing_extensions==4.14.1 +urllib3_secure_extra==0.1.0 +uvicorn==0.35.0 +weasyprint==65.1 +webencodings==0.5.1 +xlsxwriter==3.2.5 +zopfli==0.2.3.post1 +google-cloud-storage==3.3.0 diff --git a/api/routes/activities.py b/api/routes/activities.py index 7b09661b..068063d8 100644 --- a/api/routes/activities.py +++ b/api/routes/activities.py @@ -28,7 +28,6 @@ from api.session import get_db from api.security import get_current_user from api.enums import ScopedUser, WorkOrderStatus -from api.route_util import _patch activity_router = APIRouter() @@ -123,7 +122,7 @@ def post_activity( try: db.add(meter_activity) db.commit() - except IntegrityError as e: + except IntegrityError as _e: raise HTTPException( status_code=409, detail="Activity overlaps with existing activity." ) @@ -466,63 +465,73 @@ def get_service_types(db: Session = Depends(get_db)): def get_note_types(db: Session = Depends(get_db)): return db.scalars(select(NoteTypeLU)).all() -# Get work orders endpoint @activity_router.get( "/work_orders", dependencies=[Depends(ScopedUser.Read)], - response_model=List[meter_schemas.WorkOrder], tags=["Work Orders"], ) def get_work_orders( - filter_by_status: list[WorkOrderStatus] = Query('Open'), + filter_by_status: list[WorkOrderStatus] = Query(['Open']), start_date: datetime = Query(datetime.strptime('2024-06-01', '%Y-%m-%d')), - db: Session = Depends(get_db) - ): + db: Session = Depends(get_db), +): query_stmt = ( select(workOrders) .options( joinedload(workOrders.status), joinedload(workOrders.meter), - joinedload(workOrders.assigned_user) + joinedload(workOrders.assigned_user), ) .join(workOrderStatusLU) .where(workOrderStatusLU.name.in_(filter_by_status)) .where(workOrders.date_created >= start_date) ) - work_orders: list[workOrders] = db.scalars(query_stmt).all() + work_orders = db.scalars(query_stmt).all() - # I was unable to get associated_activities to work with joinedload, so I'm doing it manually here - relevant_activities = db.scalars(select(MeterActivities).where(MeterActivities.work_order_id.in_([wo.id for wo in work_orders]))).all() - - # Create a dictionary where the key is the work order ID and the value is a list of associated activities - associated_activities = {} - for activity in relevant_activities: - if activity.work_order_id in associated_activities: - associated_activities[activity.work_order_id].append(activity) - else: - associated_activities[activity.work_order_id] = [activity] - - # Create a WorkOrder schema for each work order returned - output_work_orders = [] - for wo in work_orders: - work_order_schema = meter_schemas.WorkOrder( - work_order_id = wo.id, - ose_request_id=wo.ose_request_id, - date_created = wo.date_created, - creator = wo.creator, - meter_id = wo.meter.id, - meter_serial = wo.meter.serial_number, - title = wo.title, - description = wo.description, - status = wo.status.name, - notes = wo.notes, - assigned_user_id = wo.assigned_user_id, - assigned_user= wo.assigned_user.username if wo.assigned_user else None, - associated_activities=associated_activities[wo.id] if wo.id in associated_activities else [] - ) - output_work_orders.append(work_order_schema) + # grab activities separately + relevant_activities = db.scalars( + select(MeterActivities) + .options(joinedload(MeterActivities.location)) + .where(MeterActivities.work_order_id.in_([wo.id for wo in work_orders])) + ).all() - return output_work_orders + # group activities by work_order_id + activities_by_wo = {} + for act in relevant_activities: + activities_by_wo.setdefault(act.work_order_id, []).append({ + "id": act.id, + "timestamp_start": act.timestamp_start, + "timestamp_end": act.timestamp_end, + "description": act.description, + "submitting_user_id": act.submitting_user_id, + "meter_id": act.meter_id, + "activity_type_id": act.activity_type_id, + "location_id": act.location_id, + "location_name": act.location.name if act.location else None, + "ose_share": act.ose_share, + "water_users": act.water_users, + }) + + # build output + output = [] + for wo in work_orders: + output.append({ + "work_order_id": wo.id, + "ose_request_id": wo.ose_request_id, + "date_created": wo.date_created, + "creator": wo.creator, + "meter_id": wo.meter.id, + "meter_serial": wo.meter.serial_number, + "title": wo.title, + "description": wo.description, + "status": wo.status.name, + "notes": wo.notes, + "assigned_user_id": wo.assigned_user_id, + "assigned_user": wo.assigned_user.username if wo.assigned_user else None, + "associated_activities": activities_by_wo.get(wo.id, []), + }) + + return output # Create work order endpoint @activity_router.post( @@ -564,7 +573,7 @@ def create_work_order(new_work_order: meter_schemas.CreateWorkOrder, db: Session try: db.add(work_order) db.commit() - except IntegrityError as e: + except IntegrityError as _e: raise HTTPException( status_code=409, detail="Title empty or already exists for this meter." @@ -661,7 +670,7 @@ def patch_work_order(patch_work_order_form: meter_schemas.PatchWorkOrder, user: # Database should block empty title and non-unique (date, title, meter_id) combinations try: db.commit() - except IntegrityError as e: + except IntegrityError as _e: raise HTTPException( status_code=409, detail="Title already exists for this meter." diff --git a/api/routes/admin.py b/api/routes/admin.py index 0d0a8b18..432d4946 100644 --- a/api/routes/admin.py +++ b/api/routes/admin.py @@ -11,10 +11,23 @@ from api.route_util import _patch from api.enums import ScopedUser +from pathlib import Path +from google.cloud import storage +from dotenv import load_dotenv + +import os +import subprocess +import datetime + admin_router = APIRouter() pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto") - +BUCKET_NAME = os.getenv("GCP_BUCKET_NAME", "") +BACKUP_PREFIX = os.getenv("GCP_BACKUP_PREFIX", "") +BACKUP_RETENTION_DAYS = int(os.getenv("BACKUP_RETENTION_DAYS", "30")) +load_dotenv(os.getenv("APPDB_ENV", ".env")) +DATABASE_URL = os.getenv("DATABASE_URL", "") + # define response models @admin_router.post( "/users/update_password", @@ -195,3 +208,52 @@ def update_role(updated_role: security_schemas.UserRole, db: Session = Depends(g .where(UserRoles.id == updated_role.id) .options(joinedload(UserRoles.security_scopes)) ).first() + + +@admin_router.api_route( + "/backup-db/", + methods=["BACKUP"], + tags=["Admin"], + dependencies=[Depends(ScopedUser.Admin)] +) +def backup_and_send(): + if not BUCKET_NAME: + raise ValueError("GCP_BUCKET_NAME environment variable is not set") + if not DATABASE_URL: + raise ValueError("DATABASE_URL environment variable is not set") + + # Use UTC-aware timestamp + timestamp = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d-%H%M%S") + filename = f"backup-{timestamp}.dump" + local_path = Path(f"/tmp/{filename}") + + subprocess.run( + ["pg_dump", "-Fc", DATABASE_URL, "-f", str(local_path)], + check=True + ) + + client = storage.Client() + bucket = client.bucket(BUCKET_NAME) + + blob_name = f"{BACKUP_PREFIX}/{filename}" if BACKUP_PREFIX else filename + blob = bucket.blob(blob_name) + blob.upload_from_filename(local_path) + + print(f"Backup uploaded to gs://{BUCKET_NAME}/{blob_name}") + + local_path.unlink(missing_ok=True) + + # Delete old backups (> BACKUP_RETENTION_DAYS) using UTC-aware cutoff + cutoff_date = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=BACKUP_RETENTION_DAYS) + blobs = client.list_blobs(BUCKET_NAME, prefix=BACKUP_PREFIX) + + deleted = [] + for old_blob in blobs: + if old_blob.time_created < cutoff_date: + old_blob.delete() + deleted.append(old_blob.name) + + return { + "status": f"Database backup uploaded to gs://{BUCKET_NAME}/{blob_name}", + "deleted_old_backups": deleted + } diff --git a/api/routes/chlorides.py b/api/routes/chlorides.py new file mode 100644 index 00000000..add44419 --- /dev/null +++ b/api/routes/chlorides.py @@ -0,0 +1,366 @@ +from typing import Optional, List +from datetime import datetime +import calendar +from fastapi.responses import StreamingResponse +from weasyprint import HTML +from io import BytesIO +from fastapi import APIRouter, Depends, HTTPException, Query +from pydantic import BaseModel +from sqlalchemy import and_, select +from sqlalchemy.orm import Session, joinedload + +from api.schemas import well_schemas +from api.models.main_models import WellMeasurements, Wells, Locations, WellUseLU +from api.session import get_db +from api.enums import ScopedUser, SortDirection + +from pathlib import Path +from jinja2 import Environment, FileSystemLoader, select_autoescape + +import matplotlib +matplotlib.use("Agg") # Force non-GUI backend + +TEMPLATES_DIR = Path(__file__).resolve().parent.parent / "templates" + +templates = Environment( + loader=FileSystemLoader(TEMPLATES_DIR), + autoescape=select_autoescape(["html", "xml"]) +) + +chlorides_router = APIRouter() + +@chlorides_router.get( + "/chlorides", + dependencies=[Depends(ScopedUser.Read)], + response_model=List[well_schemas.WellMeasurementDTO], + tags=["Chlorides"], +) +def read_chlorides( + chloride_group_id: int = Query(..., description="Chloride group ID to filter by"), + db: Session = Depends(get_db) +): + chloride_concentration_group_id = 5 + + return db.scalars( + select(WellMeasurements) + .options( + joinedload(WellMeasurements.submitting_user), + joinedload(WellMeasurements.well) + ) + .join(Wells, Wells.id == WellMeasurements.well_id) + .where( + and_( + WellMeasurements.observed_property_id == chloride_concentration_group_id, + Wells.chloride_group_id == chloride_group_id + ) + ) + ).all() + + +@chlorides_router.get( + "/chloride_groups", + dependencies=[Depends(ScopedUser.Read)], + response_model=List[well_schemas.ChlorideGroupResponse], + tags=["Chlorides"], +) +def get_chloride_groups( + sort_direction: SortDirection = SortDirection.Ascending, + db: Session = Depends(get_db), +): + query = ( + select(Wells) + .options(joinedload(Wells.location), joinedload(Wells.use_type)) + .join(Locations, isouter=True) + .join(WellUseLU, isouter=True) + .where(Wells.chloride_group_id.isnot(None)) + ) + + if sort_direction == SortDirection.Ascending: + query = query.order_by(Wells.chloride_group_id.asc()) + else: + query = query.order_by(Wells.chloride_group_id.desc()) + + wells = db.scalars(query).all() + + groups = {} + for well in wells: + group_id = well.chloride_group_id + if group_id not in groups: + groups[group_id] = [] + if well.ra_number: + groups[group_id].append(well.ra_number) + + return [ + {"id": group_id, "names": sorted(names)} + for group_id, names in groups.items() + ] + +class MinMaxAvg(BaseModel): + min: Optional[float] = None + max: Optional[float] = None + avg: Optional[float] = None + + +class ChlorideReportNums(BaseModel): + north: MinMaxAvg + south: MinMaxAvg + east: MinMaxAvg + west: MinMaxAvg + + +@chlorides_router.get( + "/chlorides/report", + dependencies=[Depends(ScopedUser.Read)], + response_model=ChlorideReportNums, + tags=["Chlorides"], +) +def get_chlorides_report( + from_month: Optional[str] = Query( + None, + description="Month start, 'YYYY-MM'", + pattern=r"^$|^\d{4}-\d{2}$", + ), + to_month: Optional[str] = Query( + None, + description="Month end, 'YYYY-MM'", + pattern=r"^$|^\d{4}-\d{2}$", + ), + db: Session = Depends(get_db), +): + """ + Returns min/max/avg for north/south/east/west halves **within the SE quadrant of New Mexico**, + over the specified [from_month, to_month] inclusive range, for chloride wells in the given group. + """ + + CHLORIDE_OBSERVED_PROPERTY_ID = 5 + + # Parse months + start_dt = _parse_month(from_month) if from_month else None + end_dt = _parse_month(to_month) if to_month else None + if start_dt and not end_dt: + end_dt = start_dt + if end_dt: + end_dt = _month_end(end_dt) + + stmt = ( + select( + WellMeasurements.value, + Locations.latitude, + Locations.longitude, + ) + .join(Wells, Wells.id == WellMeasurements.well_id) + .join(Locations, Locations.id == Wells.location_id) + .where( + and_( + WellMeasurements.observed_property_id == CHLORIDE_OBSERVED_PROPERTY_ID, + Locations.latitude.is_not(None), + Locations.longitude.is_not(None), + # Restrict to NM bbox first + Locations.latitude >= NM_LAT_MIN, + Locations.latitude <= NM_LAT_MAX, + Locations.longitude >= NM_LON_MIN, + Locations.longitude <= NM_LON_MAX, + # Time range (optional) + *( [WellMeasurements.timestamp >= start_dt] if start_dt else [] ), + *( [WellMeasurements.timestamp <= end_dt] if end_dt else [] ), + ) + ) + ) + + rows = db.execute(stmt).all() + + se_rows = [ + (val, lat, lon) + for (val, lat, lon) in rows + if (lat is not None and lon is not None + and SE_MIN_LAT <= float(lat) <= SE_MAX_LAT + and SE_MIN_LON <= float(lon) <= SE_MAX_LON) + ] + + north_vals: List[float] = [] + south_vals: List[float] = [] + east_vals: List[float] = [] + west_vals: List[float] = [] + + for val, lat, lon in se_rows: + # North vs South halves within the SE quadrant + if float(lat) >= SE_MID_LAT: + north_vals.append(float(val)) + else: + south_vals.append(float(val)) + + # East vs West halves within the SE quadrant + if float(lon) >= SE_MID_LON: + east_vals.append(float(val)) + else: + west_vals.append(float(val)) + + return ChlorideReportNums( + north=_stats(north_vals), + south=_stats(south_vals), + east=_stats(east_vals), + west=_stats(west_vals), + ) + + +@chlorides_router.get( + "/chlorides/report/pdf", + dependencies=[Depends(ScopedUser.Read)], + tags=["Chlorides"], +) +def download_chlorides_report_pdf( + from_month: Optional[str] = Query( + None, + description="Month start, 'YYYY-MM'", + pattern=r"^$|^\d{4}-\d{2}$", + ), + to_month: Optional[str] = Query( + None, + description="Month end, 'YYYY-MM'", + pattern=r"^$|^\d{4}-\d{2}$", + ), + db: Session = Depends(get_db), +): + """ + Generate a PDF chloride report (north/south/east/west stats) + for the SE quadrant of New Mexico. + """ + # Re-use your existing logic by calling the data endpointβs function + report = get_chlorides_report(from_month=from_month, to_month=to_month, db=db) + + # Render HTML using a template + template = templates.get_template("chlorides_report.html") + html_content = template.render( + report=report, + from_month=from_month, + to_month=to_month, + ) + + # Convert to PDF + pdf_io = BytesIO() + HTML(string=html_content).write_pdf(pdf_io) + pdf_io.seek(0) + + return StreamingResponse( + pdf_io, + media_type="application/pdf", + headers={ + "Content-Disposition": "attachment; filename=chlorides_report.pdf" + }, + ) + +@chlorides_router.post( + "/chlorides", + dependencies=[Depends(ScopedUser.WellMeasurementWrite)], + response_model=well_schemas.ChlorideMeasurement, + tags=["Chlorides"], +) +def add_chloride_measurement( + chloride_measurement: well_schemas.WellMeasurement, + db: Session = Depends(get_db), +): + # Create a new chloride measurement as a WellMeasurement + well_measurement = WellMeasurements( + timestamp = chloride_measurement.timestamp, + value = chloride_measurement.value, + observed_property_id = 5, # Chloride Concentration + submitting_user_id = chloride_measurement.submitting_user_id, + unit_id = chloride_measurement.unit_id, + well_id = chloride_measurement.well_id + ) + + db.add(well_measurement) + db.commit() + + return well_measurement + +@chlorides_router.patch( + "/chlorides", + dependencies=[Depends(ScopedUser.WellMeasurementWrite)], + response_model=well_schemas.WellMeasurement, + tags=["Chlorides"], +) +def patch_chloride_measurement( + chloride_measurement_patch: well_schemas.PatchChlorideMeasurement, + db: Session = Depends(get_db), +): + # Find the measurement + well_measurement = ( + db.scalars(select(WellMeasurements).where(WellMeasurements.id == chloride_measurement_patch.id)).first() + ) + + # Update the fields, all are mandatory + well_measurement.submitting_user_id = chloride_measurement_patch.submitting_user_id + well_measurement.timestamp = chloride_measurement_patch.timestamp + well_measurement.value = chloride_measurement_patch.value + well_measurement.unit_id = chloride_measurement_patch.unit_id + well_measurement.well_id = chloride_measurement_patch.well_id + + db.commit() + + return well_measurement + +@chlorides_router.delete( + "/chlorides", + dependencies=[Depends(ScopedUser.Admin)], + tags=["Chlorides"], +) +def delete_chloride_measurement(chloride_measurement_id: int, db: Session = Depends(get_db)): + # Find the measurement + well_measurement = ( + db.scalars(select(WellMeasurements).where(WellMeasurements.id == chloride_measurement_id)).first() + ) + + db.delete(well_measurement) + db.commit() + + return True + + +def _parse_month(m: Optional[str]) -> Optional[datetime]: + """ + Accepts 'YYYY-MM' or 'YYYY MM'. Returns the first day of month at 00:00:00. + """ + if not m: + return None + m = m.strip() + # Try 'YYYY-MM' + for fmt in ("%Y-%m", "%Y %m"): + try: + dt = datetime.strptime(m, fmt) + return dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0) + except ValueError: + continue + raise HTTPException(status_code=400, detail="Invalid month format. Use 'YYYY-MM' or 'YYYY MM'.") + +def _month_end(dt: datetime) -> datetime: + last_day = calendar.monthrange(dt.year, dt.month)[1] + return dt.replace(day=last_day, hour=23, minute=59, second=59, microsecond=999999) + +def _stats(values: List[float]) -> MinMaxAvg: + if not values: + return MinMaxAvg() + return MinMaxAvg( + min=min(values), + max=max(values), + avg=(sum(values) / len(values)) + ) + +# Approx NM bounding box (degrees) +NM_LAT_MIN = 33.12500 +NM_LAT_MAX = 34.12500 +NM_LON_MIN = -105.25000 +NM_LON_MAX = -104.25000 + +# Precompute midlines for quadrants +NM_MID_LAT = (NM_LAT_MIN + NM_LAT_MAX) / 2.0 +NM_MID_LON = (NM_LON_MIN + NM_LON_MAX) / 2.0 + +# Southeast quadrant bounds +SE_MIN_LAT = NM_LAT_MIN +SE_MAX_LAT = NM_MID_LAT +SE_MIN_LON = NM_MID_LON +SE_MAX_LON = NM_LON_MAX + +SE_MID_LAT = (SE_MIN_LAT + SE_MAX_LAT) / 2.0 +SE_MID_LON = (SE_MIN_LON + SE_MAX_LON) / 2.0 diff --git a/api/routes/maintenance.py b/api/routes/maintenance.py new file mode 100644 index 00000000..104fb77e --- /dev/null +++ b/api/routes/maintenance.py @@ -0,0 +1,329 @@ +from fastapi import Depends, APIRouter, HTTPException, Query +from sqlalchemy.orm import Session +from pydantic import BaseModel +from typing import List +from datetime import datetime +import calendar +from fastapi.responses import StreamingResponse +from weasyprint import HTML +from io import BytesIO +from collections import defaultdict +from matplotlib.pyplot import figure, close +from base64 import b64encode +from api.models.main_models import ( + Users, + Meters, + MeterActivities, + ActivityTypeLU, + Locations, +) +from api.session import get_db +from api.enums import ScopedUser +from pathlib import Path +from jinja2 import Environment, FileSystemLoader, select_autoescape + +import matplotlib +matplotlib.use("Agg") # Force non-GUI backend + + +TEMPLATES_DIR = Path(__file__).resolve().parent.parent / "templates" + +templates = Environment( + loader=FileSystemLoader(TEMPLATES_DIR), + autoescape=select_autoescape(["html", "xml"]) +) + +maintenance_router = APIRouter() + + +class MeterSummary(BaseModel): + meter: str + count: int + + +class MaintenanceRow(BaseModel): + date_time: datetime + technician: str + meter: str + trss: str + number_of_repairs: int + number_of_pms: int + + +class MaintenanceSummaryResponse(BaseModel): + repairs_by_meter: List[MeterSummary] + pms_by_meter: List[MeterSummary] + table_rows: List[MaintenanceRow] + + +@maintenance_router.get( + "/maintenance", + tags=["Maintenance"], + response_model=MaintenanceSummaryResponse, + dependencies=[Depends(ScopedUser.Read)], +) +def get_maintenance_summary( + from_month: str = Query(..., pattern=r"^\d{4}-\d{2}$"), + to_month: str = Query(..., pattern=r"^\d{4}-\d{2}$"), + trss: str = Query(...), + technicians: List[int] = Query(...), + db: Session = Depends(get_db), +): + # Parse from/to month into datetime range + try: + from_date = datetime.strptime(from_month, "%Y-%m").replace(day=1) + to_dt = datetime.strptime(to_month, "%Y-%m") + year, month = to_dt.year, to_dt.month + today = datetime.now() + + if year == today.year and month == today.month: + to_date = today + else: + last_day = calendar.monthrange(year, month)[1] + to_date = to_dt.replace(day=last_day, hour=23, minute=59, second=59) + except ValueError: + raise HTTPException( + status_code=400, + detail="Invalid date format. Use YYYY-MM." + ) + + # Filter by technicians if -1 is not present + filter_techs = -1 not in technicians + + # Optional TRSS-based meter filtering + matching_meter_ids = None + if trss: + try: + # normalize input (strip spaces) + trss_str = trss.strip() + + location_ids = ( + db.query(Locations.id) + .filter(Locations.trss.like(f"{trss_str}%")) + .all() + ) + location_ids = [loc_id for (loc_id,) in location_ids] + + if location_ids: + meter_subq = ( + db.query(Meters.id) + .filter(Meters.location_id.in_(location_ids)) + ) + matching_meter_ids = [m_id for (m_id,) in meter_subq.all()] + except Exception: + pass # Ignore invalid TRSS input silently + + # Base query + query = ( + db.query( + MeterActivities.timestamp_start.label("date_time"), + Users.full_name.label("technician"), + Meters.serial_number.label("meter"), + ActivityTypeLU.name.label("activity_type"), + Locations.trss.label("trss") + ) + .join(Users, Users.id == MeterActivities.submitting_user_id) + .join(Meters, Meters.id == MeterActivities.meter_id) + .join( + ActivityTypeLU, + ActivityTypeLU.id == MeterActivities.activity_type_id + ) + .join(Locations, Locations.id == Meters.location_id, isouter=True) + .filter(MeterActivities.timestamp_start >= from_date) + .filter(MeterActivities.timestamp_start <= to_date) + ) + + if filter_techs: + query = query.filter( + MeterActivities.submitting_user_id.in_(technicians) + ) + + if matching_meter_ids is not None: + if not matching_meter_ids: + # TRSS valid but no meters matched β return empty results + return { + "repairs_by_meter": [], + "pms_by_meter": [], + "table_rows": [], + } + query = query.filter(MeterActivities.meter_id.in_(matching_meter_ids)) + + base_query = query.order_by(MeterActivities.timestamp_start).all() + + # Aggregate repairs and PMs + repairs_by_meter = defaultdict(int) + pms_by_meter = defaultdict(int) + grouped_rows = defaultdict(lambda: {"number_of_repairs": 0, "number_of_pms": 0}) + + for row in base_query: + key = (row.date_time, row.technician, row.meter, row.trss) + if row.activity_type == "Repair": + repairs_by_meter[row.meter] += 1 + grouped_rows[key]["number_of_repairs"] += 1 + elif row.activity_type == "Preventative Maintenance": + pms_by_meter[row.meter] += 1 + grouped_rows[key]["number_of_pms"] += 1 + + repairs_result = [{"meter": meter, "count": count} for meter, count in repairs_by_meter.items()] + pms_result = [{"meter": meter, "count": count} for meter, count in pms_by_meter.items()] + + table_rows = [] + for (date_time, technician, meter, trss), counts in grouped_rows.items(): + table_rows.append({ + "date_time": date_time, + "technician": technician, + "meter": meter, + "trss": trss or "", + "number_of_repairs": counts["number_of_repairs"], + "number_of_pms": counts["number_of_pms"], + }) + + return { + "repairs_by_meter": repairs_result, + "pms_by_meter": pms_result, + "table_rows": table_rows, + } + + +@maintenance_router.get( + "/maintenance/pdf", + tags=["Maintenance"], + dependencies=[Depends(ScopedUser.Read)], +) +def download_parts_used_pdf( + from_month: str = Query(..., pattern=r"^\d{4}-\d{2}$"), + to_month: str = Query(..., pattern=r"^\d{4}-\d{2}$"), + trss: str = Query(...), + technicians: List[int] = Query(...), + db: Session = Depends(get_db), +): + try: + from_date = datetime.strptime(from_month, "%Y-%m").replace(day=1) + to_dt = datetime.strptime(to_month, "%Y-%m") + year, month = to_dt.year, to_dt.month + today = datetime.now() + if year == today.year and month == today.month: + to_date = today + else: + last_day = calendar.monthrange(year, month)[1] + to_date = to_dt.replace(day=last_day, hour=23, minute=59, second=59) + except ValueError: + raise HTTPException(status_code=400, detail="Invalid date format. Use YYYY-MM.") + + filter_techs = -1 not in technicians + + # Optional TRSS filtering via Locations β Meters + matching_meter_ids = None + if trss: + try: + parts = list(map(int, trss.strip().split("."))) + if len(parts) >= 4: + township, range_, section, quarter = parts[:4] + + location_ids = [ + loc_id for (loc_id,) in db.query(Locations.id).filter( + Locations.township == township, + Locations.range == range_, + Locations.section == section, + Locations.quarter == quarter, + ).all() + ] + + if location_ids: + matching_meter_ids = [ + meter_id for (meter_id,) in db.query(Meters.id).filter( + Meters.location_id.in_(location_ids) + ).all() + ] + except Exception: + pass # Silently skip TRSS filtering if malformed + + query = ( + db.query( + MeterActivities.timestamp_start.label("date_time"), + Users.full_name.label("technician"), + Meters.serial_number.label("meter"), + ActivityTypeLU.name.label("activity_type") + ) + .join(Users, Users.id == MeterActivities.submitting_user_id) + .join(Meters, Meters.id == MeterActivities.meter_id) + .join( + ActivityTypeLU, + ActivityTypeLU.id == MeterActivities.activity_type_id + ) + .filter(MeterActivities.timestamp_start >= from_date) + .filter(MeterActivities.timestamp_start <= to_date) + ) + + if filter_techs: + query = query.filter(MeterActivities.submitting_user_id.in_(technicians)) + + if matching_meter_ids is not None: + if not matching_meter_ids: + return StreamingResponse(BytesIO(), media_type="application/pdf") # Empty PDF + query = query.filter(MeterActivities.meter_id.in_(matching_meter_ids)) + + base_query = query.order_by(MeterActivities.timestamp_start).all() + + repairs_by_meter = defaultdict(int) + pms_by_meter = defaultdict(int) + grouped_rows = defaultdict(lambda: {"number_of_repairs": 0, "number_of_pms": 0}) + + for row in base_query: + key = (row.date_time, row.technician, row.meter) + if row.activity_type == "Repair": + repairs_by_meter[row.meter] += 1 + grouped_rows[key]["number_of_repairs"] += 1 + elif row.activity_type == "Preventative Maintenance": + pms_by_meter[row.meter] += 1 + grouped_rows[key]["number_of_pms"] += 1 + + table_rows = [] + for (date_time, technician, meter), counts in grouped_rows.items(): + table_rows.append({ + "date_time": date_time.strftime("%Y-%m-%d %H:%M"), + "technician": technician, + "meter": meter, + "number_of_repairs": counts["number_of_repairs"], + "number_of_pms": counts["number_of_pms"], + }) + + # Generate pie charts as base64 PNGs + def make_pie_chart(data: dict, title: str): + if not data: + return "" + fig = figure(figsize=(5, 5)) + ax = fig.add_subplot(111) + ax.pie( + data.values(), + labels=data.keys(), + autopct="%1.1f%%", + startangle=140, + ) + ax.set_title(title) + buf = BytesIO() + fig.savefig(buf, format="png", bbox_inches="tight") + close(fig) + return b64encode(buf.getvalue()).decode("utf-8") + + repair_chart_b64 = make_pie_chart(repairs_by_meter, "Repairs by Meter") + pm_chart_b64 = make_pie_chart(pms_by_meter, "Preventative Maintenances by Meter") + + template = templates.get_template("maintenance_summary.html") + html = template.render( + from_month=from_month, + to_month=to_month, + repair_chart=repair_chart_b64, + pm_chart=pm_chart_b64, + table_rows=table_rows, + ) + + pdf_io = BytesIO() + HTML(string=html).write_pdf(pdf_io) + pdf_io.seek(0) + + return StreamingResponse( + pdf_io, + media_type="application/pdf", + headers={"Content-Disposition": "attachment; filename=maintenance_summary.pdf"}, + ) diff --git a/api/routes/meters.py b/api/routes/meters.py index 445299b3..c94357ea 100644 --- a/api/routes/meters.py +++ b/api/routes/meters.py @@ -172,56 +172,78 @@ def get_meters_locations( search_string: str = None, db: Session = Depends(get_db), ): - # Build the query statement based on query params - # joinedload loads relationships, outer joins on relationship tables makes them search/sortable query_statement = ( - select(Meters).join(Wells, isouter=True).join(Locations, isouter=True) - ) - - # Ensure there are coordinates and meter is installed - query_statement = query_statement.where( - and_( - Locations.latitude.is_not(None), - Locations.longitude.is_not(None), - Meters.status_id == 1, + select( + Meters.id, + Meters.serial_number, + Wells.id.label("well_id"), + Wells.ra_number, + Wells.name, + Locations.id.label("location_id"), + Locations.latitude, + Locations.longitude, + Locations.trss, + ) + .select_from(Meters) + .join(Wells, Meters.well_id == Wells.id, isouter=True) + .join(Locations, Wells.location_id == Locations.id, isouter=True) + .where( + and_( + Locations.latitude.is_not(None), + Locations.longitude.is_not(None), + Meters.status_id == 1, # Only installed meters + ) ) ) if search_string: + ilike_term = f"%{search_string}%" query_statement = query_statement.where( or_( - Meters.serial_number.ilike(f"%{search_string}%"), - Wells.ra_number.ilike(f"%{search_string}%"), - Locations.trss.ilike(f"%{search_string}%"), + Meters.serial_number.ilike(ilike_term), + Wells.ra_number.ilike(ilike_term), + Locations.trss.ilike(ilike_term), ) ) - - meters = db.scalars(query_statement).all() - meter_ids = [meter.id for meter in meters] - - # Get the date of the last PM for each meter - pm_query = text('select max(timestamp_start) ' - 'as last_pm, meter_id from "MeterActivities" ' - 'where activity_type_id=4 and meter_id = ANY(:mids) ' - 'group by meter_id') - - pm_years = db.execute(pm_query,{'mids':meter_ids}).fetchall() - # Create a dictionary of meter_id to last_pm - pm_dict = {pm[1]: pm[0] for pm in pm_years} + result = db.execute(query_statement).fetchall() + meter_ids = [row.id for row in result] + + if not meter_ids: + return [] # Short-circuit if nothing matched + + # Query latest PMs for those meters + pm_query = text( + """ + SELECT MAX(timestamp_start) AS last_pm, meter_id + FROM "MeterActivities" + WHERE activity_type_id = 4 + AND meter_id = ANY(:mids) + GROUP BY meter_id + """ + ) + pm_years = db.execute(pm_query, {"mids": meter_ids}).fetchall() + pm_dict = {row.meter_id: row.last_pm for row in pm_years} - # Create a list of MeterMapDTO objects + # Map to DTOs manually for added performance meter_map_list = [] - for meter in meters: - # Find the last PM year for the meter - last_pm = pm_dict.get(meter.id, None) + for row in result: meter_map_list.append( meter_schemas.MeterMapDTO( - id=meter.id, - serial_number=meter.serial_number, - well=meter.well, - location=meter.well.location, - last_pm=last_pm + id=row.id, + serial_number=row.serial_number, + well={ + "id": row.well_id, + "ra_number": row.ra_number, + "name": row.name, + }, + location={ + "id": row.location_id, + "latitude": row.latitude, + "longitude": row.longitude, + "trss": row.trss, + }, + last_pm=pm_dict.get(row.id) ) ) diff --git a/api/routes/parts.py b/api/routes/parts.py index b66d8a4c..3eb9325c 100644 --- a/api/routes/parts.py +++ b/api/routes/parts.py @@ -1,20 +1,36 @@ -from fastapi import Depends, APIRouter, HTTPException +from fastapi import Depends, APIRouter, HTTPException, Query from sqlalchemy.orm import Session, joinedload -from sqlalchemy import select -from typing import List - +from sqlalchemy import select, func +from typing import List, Union, Optional +from datetime import datetime +import calendar +from fastapi.responses import StreamingResponse +from weasyprint import HTML +from io import BytesIO from api.models.main_models import ( Parts, + PartsUsed, PartAssociation, PartTypeLU, Meters, MeterTypeLU, + meterRegisters, + MeterActivities, ) from api.schemas import part_schemas from api.session import get_db -from api.route_util import _get, _patch +from api.route_util import _get from api.enums import ScopedUser from sqlalchemy.exc import IntegrityError +from pathlib import Path +from jinja2 import Environment, FileSystemLoader, select_autoescape + +TEMPLATES_DIR = Path(__file__).resolve().parent.parent / "templates" + +templates = Environment( + loader=FileSystemLoader(TEMPLATES_DIR), + autoescape=select_autoescape(["html", "xml"]) +) part_router = APIRouter() @@ -25,8 +41,201 @@ dependencies=[Depends(ScopedUser.Read)], tags=["Parts"], ) -def get_parts(db: Session = Depends(get_db)): - return db.scalars(select(Parts).options(joinedload(Parts.part_type))).all() +def get_parts( + db: Session = Depends(get_db), + in_use: Optional[bool] = Query( + None, + description="Filter by in_use status" + ), +): + stmt = select(Parts).options(joinedload(Parts.part_type)) + + if in_use is not None: + stmt = stmt.where(Parts.in_use == in_use) + + return db.scalars(stmt).all() + + +@part_router.get( + "/parts/used", + tags=["Parts"], + dependencies=[Depends(ScopedUser.Read)], +) +def get_parts_used_summary( + from_month: str = Query(..., pattern=r"^\d{4}-\d{2}$"), + to_month: str = Query(..., pattern=r"^\d{4}-\d{2}$"), + parts: List[int] = Query(...), + db: Session = Depends(get_db), +): + try: + # Parse and normalize start of "from" month + from_date = datetime.strptime(from_month, "%Y-%m").replace(day=1) + + # Determine end of "to" month + to_dt = datetime.strptime(to_month, "%Y-%m") + year, month = to_dt.year, to_dt.month + today = datetime.now() + + if year == today.year and month == today.month: + to_date = today + else: + last_day = calendar.monthrange(year, month)[1] + to_date = to_dt.replace( + day=last_day, + hour=23, + minute=59, + second=59 + ) + except ValueError: + raise HTTPException( + status_code=400, + detail="Invalid date format. Use YYYY-MM." + ) + + usage_subq = ( + db.query( + PartsUsed.c.part_id.label("used_part_id"), + func.count(PartsUsed.c.part_id).label("quantity") + ) + .join( + MeterActivities, + MeterActivities.id == PartsUsed.c.meter_activity_id + ) + .filter( + MeterActivities.timestamp_start >= from_date, + MeterActivities.timestamp_start <= to_date, + PartsUsed.c.part_id.in_(parts), + ) + .group_by(PartsUsed.c.part_id) + .subquery() + ) + + query = ( + db.query( + Parts.id.label("id"), + Parts.part_number, + Parts.description, + Parts.price, + func.coalesce(usage_subq.c.quantity, 0).label("quantity") + ) + .outerjoin(usage_subq, Parts.id == usage_subq.c.used_part_id) + .filter(Parts.id.in_(parts)) + .order_by(Parts.part_number) + ) + + results = [] + for row in query.all(): + price = row.price or 0 + total = price * row.quantity + results.append({ + "id": row.id, + "part_number": row.part_number, + "description": row.description, + "price": price, + "quantity": row.quantity, + "total": total, + }) + + return results + + +@part_router.get( + "/parts/used/pdf", + tags=["Parts"], + dependencies=[Depends(ScopedUser.Read)], +) +def download_parts_used_pdf( + from_month: str = Query(..., pattern=r"^\d{4}-\d{2}$"), + to_month: str = Query(..., pattern=r"^\d{4}-\d{2}$"), + parts: List[int] = Query(...), + db: Session = Depends(get_db), +): + try: + from_date = datetime.strptime(from_month, "%Y-%m").replace(day=1) + to_dt = datetime.strptime(to_month, "%Y-%m") + year, month = to_dt.year, to_dt.month + today = datetime.now() + + if year == today.year and month == today.month: + to_date = today + else: + last_day = calendar.monthrange(year, month)[1] + to_date = to_dt.replace( + day=last_day, + hour=23, + minute=59, + second=59 + ) + except ValueError: + raise HTTPException( + status_code=400, + detail="Invalid date format. Use YYYY-MM." + ) + + usage_subq = ( + db.query( + PartsUsed.c.part_id.label("used_part_id"), + func.count(PartsUsed.c.part_id).label("quantity") + ) + .join( + MeterActivities, + MeterActivities.id == PartsUsed.c.meter_activity_id + ) + .filter( + MeterActivities.timestamp_start >= from_date, + MeterActivities.timestamp_start <= to_date, + PartsUsed.c.part_id.in_(parts), + ) + .group_by(PartsUsed.c.part_id) + .subquery() + ) + + query = ( + db.query( + Parts.id.label("id"), + Parts.part_number, + Parts.description, + Parts.price, + func.coalesce(usage_subq.c.quantity, 0).label("quantity") + ) + .outerjoin(usage_subq, Parts.id == usage_subq.c.used_part_id) + .filter(Parts.id.in_(parts)) + .order_by(Parts.part_number) + ) + + results = [] + running_total = 0.0 + for row in query.all(): + price = row.price or 0 + quantity = row.quantity or 0 + total = price * quantity + running_total += total + results.append({ + "part_number": row.part_number, + "description": row.description, + "price": price, + "quantity": quantity, + "total": total, + "running_total": running_total, + }) + + template = templates.get_template("parts_used_report.html") + html_content = template.render( + rows=results, + from_month=from_month, + to_month=to_month + ) + pdf_io = BytesIO() + HTML(string=html_content).write_pdf(pdf_io) + pdf_io.seek(0) + + return StreamingResponse( + pdf_io, + media_type="application/pdf", + headers={ + "Content-Disposition": "attachment; filename=parts_used_report.pdf" + }, + ) @part_router.get( @@ -41,12 +250,12 @@ def get_part_types(db: Session = Depends(get_db)): @part_router.get( "/part", - response_model=part_schemas.Part, + response_model=Union[part_schemas.Part, part_schemas.Register], dependencies=[Depends(ScopedUser.Read)], tags=["Parts"], ) def get_part(part_id: int, db: Session = Depends(get_db)): - return db.scalars( + selected_part = db.scalars( select(Parts) .where(Parts.id == part_id) .options( @@ -55,6 +264,27 @@ def get_part(part_id: int, db: Session = Depends(get_db)): ) ).first() + # Create the part_schemas.Part instance + returned_part = part_schemas.Part.model_validate(selected_part) + + # If part_type is a Register, we need to load the register details + if selected_part and selected_part.part_type.name == "Register": + register_details = db.scalars( + select(meterRegisters).where( + meterRegisters.part_id == selected_part.id + ) + ).first() + + register_details = part_schemas.Register.register_details.model_validate(register_details) + + # Update the returned_part to include register details + returned_part = part_schemas.Register( + **returned_part.model_dump(exclude_unset=True), + register_settings=register_details + ) + + return returned_part + @part_router.patch( "/part", @@ -66,6 +296,8 @@ def update_part(updated_part: part_schemas.Part, db: Session = Depends(get_db)): # Update the part (this won't include secondary attributes like associations) part_db = _get(db, Parts, updated_part.id) for k, v in updated_part.model_dump(exclude_unset=True).items(): + if k in ["part_type", "meter_types"]: + continue try: setattr(part_db, k, v) except AttributeError as e: @@ -115,6 +347,7 @@ def create_part(new_part: part_schemas.Part, db: Session = Depends(get_db)): note=new_part.note, in_use=new_part.in_use, commonly_used=new_part.commonly_used, + price=new_part.price, ) try: diff --git a/api/routes/well_measurements.py b/api/routes/well_measurements.py index 7cd465d4..10baa025 100644 --- a/api/routes/well_measurements.py +++ b/api/routes/well_measurements.py @@ -1,15 +1,37 @@ -from typing import List +from typing import List, Optional from datetime import datetime +import calendar +import re -from fastapi import Depends, APIRouter, Query +from fastapi import Depends, APIRouter, Query, HTTPException +from fastapi.responses import StreamingResponse from sqlalchemy.orm import Session, joinedload from sqlalchemy import select, and_ +from weasyprint import HTML +from io import BytesIO +from collections import defaultdict +from matplotlib.pyplot import figure, close +from base64 import b64encode + from api.schemas import well_schemas from api.models.main_models import WellMeasurements, ObservedPropertyTypeLU, Units, Wells from api.session import get_db from api.enums import ScopedUser +from pathlib import Path +from jinja2 import Environment, FileSystemLoader, select_autoescape + +import matplotlib +matplotlib.use("Agg") # Force non-GUI backend + +TEMPLATES_DIR = Path(__file__).resolve().parent.parent / "templates" + +templates = Environment( + loader=FileSystemLoader(TEMPLATES_DIR), + autoescape=select_autoescape(["html", "xml"]) +) + well_measurement_router = APIRouter() @@ -50,154 +72,397 @@ def add_waterlevel( response_model=List[well_schemas.WellMeasurementDTO], tags=["WaterLevels"], ) -def read_waterlevels(well_id: int = None, db: Session = Depends(get_db)): - return db.scalars( - select(WellMeasurements) - .options(joinedload(WellMeasurements.submitting_user)) - .join(ObservedPropertyTypeLU) - .where( - and_( - ObservedPropertyTypeLU.name == "Depth to water", - WellMeasurements.well_id == well_id, +def read_waterlevels( + well_ids: List[int] = Query(..., description="One or more well IDs"), + from_month: Optional[str] = Query(None, pattern=r"^$|^\d{4}-\d{2}$"), + to_month: Optional[str] = Query(None, pattern=r"^$|^\d{4}-\d{2}$"), + isAveragingAllWells: bool = Query(False), + isComparingTo1970Average: bool = Query(False), + comparisonYear: Optional[str] = Query(None, pattern=r"^$|^\d{4}$"), + db: Session = Depends(get_db), +): + MONITORING_USE_TYPE_ID = 11 + synthetic_id_counter = -1 + + def group_and_average(measurements, group_by_label: str): + grouped = defaultdict(list) + for m in measurements: + key = m.timestamp.strftime("%Y-%m" if group_by_label == "month" else "%Y-%m-%d") + grouped[key].append(m.value) + + result = [] + for time_str, values in sorted(grouped.items()): + dt = datetime.strptime(time_str, "%Y-%m" if group_by_label == "month" else "%Y-%m-%d") + avg_value = sum(values) / len(values) + nonlocal synthetic_id_counter + result.append(well_schemas.WellMeasurementDTO( + id=synthetic_id_counter, + timestamp=dt, + value=avg_value, + submitting_user={"full_name": "System"}, + well={"ra_number": "Average of wells"} + )) + synthetic_id_counter -= 1 + return result + + def get_measurements_by_ids(well_ids, start, end): + stmt = ( + select(WellMeasurements) + .options(joinedload(WellMeasurements.submitting_user), joinedload(WellMeasurements.well)) + .join(ObservedPropertyTypeLU) + .where( + and_( + ObservedPropertyTypeLU.name == "Depth to water", + WellMeasurements.well_id.in_(well_ids), + *( [WellMeasurements.timestamp >= start] if start else [] ), + *( [WellMeasurements.timestamp <= end] if end else [] ), + ) ) + .order_by(WellMeasurements.well_id, WellMeasurements.timestamp) ) - ).all() - -@well_measurement_router.patch( - "/waterlevels", - dependencies=[Depends(ScopedUser.Admin)], - response_model=well_schemas.WellMeasurement, - tags=["WaterLevels"], -) -def patch_waterlevel(waterlevel_patch: well_schemas.PatchWaterLevel, db: Session = Depends(get_db)): - # Find the measurement - well_measurement = ( - db.scalars(select(WellMeasurements).where(WellMeasurements.id == waterlevel_patch.levelmeasurement_id)).first() - ) - - # Update the fields, all are mandatory - well_measurement.submitting_user_id = waterlevel_patch.submitting_user_id - well_measurement.timestamp = waterlevel_patch.timestamp - well_measurement.value = waterlevel_patch.value - - db.commit() - - return well_measurement - -@well_measurement_router.delete( - "/waterlevels", - dependencies=[Depends(ScopedUser.Admin)], - tags=["WaterLevels"], -) -def delete_waterlevel(waterlevel_id: int, db: Session = Depends(get_db)): - # Find the measurement - well_measurement = ( - db.scalars(select(WellMeasurements).where(WellMeasurements.id == waterlevel_id)).first() - ) - - db.delete(well_measurement) - db.commit() - - return True - - -# ----------------- Chloride Concentration ----------------- # + return db.scalars(stmt).all() + + # Helper: add a comparison average for any given year (same rules as 1970) + def add_year_average(year: int, label: str): + # Determine comparison window shape based on requested range size + if (to_date - from_date).days >= 365: + start = datetime(year, 1, 1) + end = datetime(year, 12, 31, 23, 59, 59) + else: + start = datetime(year, from_date.month, 1) + last_day = calendar.monthrange(year, to_date.month)[1] + end = datetime(year, to_date.month, last_day, 23, 59, 59) + + monitoring_ids = [ + row[0] for row in db.execute( + select(Wells.id).where(Wells.use_type_id == MONITORING_USE_TYPE_ID) + ).all() + ] + year_measurements = get_measurements_by_ids(monitoring_ids, start, end) + averaged = group_and_average(year_measurements, "month") # Always by month + for dto in averaged: + dto.well.ra_number = label + response_data.extend(averaged) + + # Parse dates + from_date, to_date = None, None + if from_month and to_month: + try: + from_date = datetime.strptime(from_month, "%Y-%m").replace(day=1) + to_dt = datetime.strptime(to_month, "%Y-%m") + today = datetime.now() + if to_dt.year == today.year and to_dt.month == today.month: + to_date = today + else: + last_day = calendar.monthrange(to_dt.year, to_dt.month)[1] + to_date = to_dt.replace(day=last_day, hour=23, minute=59, second=59) + except ValueError: + raise HTTPException(status_code=400, detail="Invalid date format. Use YYYY-MM.") + + if not well_ids and not isComparingTo1970Average and not comparisonYear: + return [] + + group_by = None + if from_month and to_month: + group_by = "month" if (to_date - from_date).days >= 365 else "day" + + response_data = [] + + # Averaged selection (if requested) + if isAveragingAllWells and well_ids: + current_measurements = get_measurements_by_ids(well_ids, from_date, to_date) + averaged = group_and_average(current_measurements, group_by) + response_data.extend(averaged) + + # Raw per-well (if not averaging) + if not isAveragingAllWells and well_ids: + response_data.extend(get_measurements_by_ids(well_ids, from_date, to_date)) + + # 1970 comparison (existing behavior) + if isComparingTo1970Average: + add_year_average(1970, "1970 Average") + + # Dynamic comparison year (NEW) + if comparisonYear: + try: + year_int = int(comparisonYear) + except ValueError: + raise HTTPException(status_code=400, detail="comparisonYear must be a 4-digit year") + + current_year = datetime.now().year + if year_int < 1900 or year_int > current_year: + raise HTTPException(status_code=400, detail=f"comparisonYear must be between 1900 and {current_year}") + + # Avoid duplicate if user asked for 1970 both ways + already_added_1970 = isComparingTo1970Average and year_int == 1970 + if not already_added_1970: + add_year_average(year_int, f"{year_int} Average") + + return response_data @well_measurement_router.get( - "/chlorides", + "/waterlevels/pdf", dependencies=[Depends(ScopedUser.Read)], - response_model=List[well_schemas.WellMeasurementDTO], - tags=["Chlorides"], + tags=["WaterLevels"], ) -def read_chlorides( - chloride_group_id: int = Query(..., description="Chloride group ID to filter by"), - db: Session = Depends(get_db) +def download_waterlevels_pdf( + well_ids: List[int] = Query(..., description="One or more well IDs"), + from_month: str = Query(..., pattern=r"^\d{4}-\d{2}$"), + to_month: str = Query(..., pattern=r"^\d{4}-\d{2}$"), + isAveragingAllWells: bool = Query(False), + isComparingTo1970Average: bool = Query(False), + comparisonYear: Optional[str] = Query(None, pattern=r"^$|^\d{4}$"), + db: Session = Depends(get_db), ): - chloride_concentration_group_id = 5 - - return db.scalars( - select(WellMeasurements) - .options( - joinedload(WellMeasurements.submitting_user), - joinedload(WellMeasurements.well) - ) - .join(Wells, Wells.id == WellMeasurements.well_id) - .where( - and_( - WellMeasurements.observed_property_id == chloride_concentration_group_id, - Wells.chloride_group_id == chloride_group_id + MONITORING_USE_TYPE_ID = 11 + synthetic_id_counter = -1 + + def group_and_average(measurements, group_by_label: str, ra_label: str): + from collections import defaultdict + grouped = defaultdict(list) + for m in measurements: + key = m.timestamp.strftime("%Y-%m" if group_by_label == "month" else "%Y-%m-%d") + grouped[key].append(m.value) + + result = [] + for time_str, values in sorted(grouped.items()): + dt = datetime.strptime(time_str, "%Y-%m" if group_by_label == "month" else "%Y-%m-%d") + avg_value = sum(values) / len(values) + nonlocal synthetic_id_counter + result.append({ + "id": synthetic_id_counter, + "timestamp": dt, + "value": avg_value, + "well_ra_number": ra_label, + }) + synthetic_id_counter -= 1 + return result + + def get_measurements_by_ids(well_ids, start, end): + stmt = ( + select(WellMeasurements) + .options(joinedload(WellMeasurements.submitting_user), joinedload(WellMeasurements.well)) + .join(ObservedPropertyTypeLU) + .where( + and_( + ObservedPropertyTypeLU.name == "Depth to water", + WellMeasurements.well_id.in_(well_ids), + WellMeasurements.timestamp >= start, + WellMeasurements.timestamp <= end, + ) ) + .order_by(WellMeasurements.well_id, WellMeasurements.timestamp) + ) + return db.scalars(stmt).all() + + # Parse dates + try: + from_date = datetime.strptime(from_month, "%Y-%m").replace(day=1) + to_dt = datetime.strptime(to_month, "%Y-%m") + today = datetime.now() + if to_dt.year == today.year and to_dt.month == today.month: + to_date = today + else: + last_day = calendar.monthrange(to_dt.year, to_dt.month)[1] + to_date = to_dt.replace(day=last_day, hour=23, minute=59, second=59) + except ValueError: + raise HTTPException(status_code=400, detail="Invalid date format. Use YYYY-MM.") + + # treat "" as not provided + comparisonYear = comparisonYear or None + + if not well_ids and not isComparingTo1970Average and not comparisonYear: + raise HTTPException(status_code=400, detail="well_ids is required") + + group_by = "month" if (to_date - from_date).days >= 365 else "day" + results = [] + + # Averaging for selected wells + if isAveragingAllWells and well_ids: + current_measurements = get_measurements_by_ids(well_ids, from_date, to_date) + results.extend(group_and_average(current_measurements, group_by, "Average of wells")) + + # Raw per-well data + if not isAveragingAllWells and well_ids: + raw = get_measurements_by_ids(well_ids, from_date, to_date) + for m in raw: + results.append({ + "id": m.id, + "timestamp": m.timestamp, + "value": m.value, + "well_ra_number": m.well.ra_number if m.well else "Unknown" + }) + + # Helper: add comparison average for any given year (same window rules as 1970) + def add_year_average(year: int, label: str): + if (to_date - from_date).days >= 365: + start = datetime(year, 1, 1) + end = datetime(year, 12, 31, 23, 59, 59) + else: + start = datetime(year, from_date.month, 1) + last_day = calendar.monthrange(year, to_date.month)[1] + end = datetime(year, to_date.month, last_day, 23, 59, 59) + + monitoring_ids = [row[0] for row in db.execute( + select(Wells.id).where(Wells.use_type_id == MONITORING_USE_TYPE_ID) + ).all()] + year_measurements = get_measurements_by_ids(monitoring_ids, start, end) + averaged = group_and_average(year_measurements, "month", label) # Always monthly for comparison + results.extend(averaged) + + # 1970 Comparison + if isComparingTo1970Average: + add_year_average(1970, "1970 Average") + + # Dynamic comparison year + if comparisonYear: + try: + year_int = int(comparisonYear) + except ValueError: + raise HTTPException(status_code=400, detail="comparisonYear must be a 4-digit year") + now_year = datetime.now().year + if year_int < 1900 or year_int > now_year: + raise HTTPException(status_code=400, detail=f"comparisonYear must be between 1900 and {now_year}") + + # avoid duplicate series if user chose 1970 in both mechanisms + if not (isComparingTo1970Average and year_int == 1970): + add_year_average(year_int, f"{year_int} Average") + + report_title = "ROSWELL ARTESIAN BASIN" + report_subtext = None + + if isAveragingAllWells: + num_wells = len(well_ids) + well_word = "WELL" if num_wells == 1 else "WELLS" + report_subtext = ( + f"MONTHLY AVERAGE WATER LEVEL WITHIN {num_wells} PVACD RECORDER {well_word}\n" + "AVERAGES TAKEN FROM STEEL TAPE MEASUREMENTS MADE\n" + "ON OR NEAR THE 5TH, 15TH AND 25TH OF EACH MONTH" ) - ).all() - -@well_measurement_router.post( - "/chlorides", - dependencies=[Depends(ScopedUser.WellMeasurementWrite)], - response_model=well_schemas.ChlorideMeasurement, - tags=["Chlorides"], -) -def add_chloride_measurement( - chloride_measurement: well_schemas.WellMeasurement, - db: Session = Depends(get_db), -): - # Create a new chloride measurement as a WellMeasurement - well_measurement = WellMeasurements( - timestamp = chloride_measurement.timestamp, - value = chloride_measurement.value, - observed_property_id = 5, # Chloride Concentration - submitting_user_id = chloride_measurement.submitting_user_id, - unit_id = chloride_measurement.unit_id, - well_id = chloride_measurement.well_id + from_year = from_date.year if from_date else None + + def shift_year_safe(dt, new_year: int): + """Shift dt to new_year, handling Feb 29 / month-end safely.""" + try: + return dt.replace(year=new_year) + except ValueError: + last_day = calendar.monthrange(new_year, dt.month)[1] + return dt.replace(year=new_year, day=min(dt.day, last_day)) + + # Prepare data for table + chart (apply timeshift to comparison series) + rows = [] + data_by_well = defaultdict(list) + + # Precompute which series should be shifted (e.g., "1970 Average", "2021 Average") + shift_years = set() + if isComparingTo1970Average: + shift_years.add(1970) + if comparisonYear: + try: + shift_years.add(int(comparisonYear)) + except ValueError: + pass # already validated above; safe guard + + for record in results: + original_ts = record["timestamp"] + value = record["value"] + well_label = record["well_ra_number"] + + # Table rows keep original timestamp + rows.append({ + "timestamp": original_ts.strftime("%Y-%m-%d %H:%M"), + "depth_to_water": value, + "well_ra_number": well_label, + }) + + chart_ts = original_ts + # Detect labels like "1970 Average" or "2021 Average" and shift to from_year + if from_year: + m = re.match(r"^(\d{4}) Average$", well_label) + if m: + yr = int(m.group(1)) + if yr in shift_years: + chart_ts = shift_year_safe(original_ts, from_year) + + data_by_well[well_label].append((chart_ts, value)) + + def make_line_chart(data: dict, title: str): + if not data: + return "" + fig = figure(figsize=(10, 6)) + ax = fig.add_subplot(111) + for ra, measurements in data.items(): + sorted_measurements = sorted(measurements, key=lambda x: x[0]) + timestamps = [ts for ts, _ in sorted_measurements] + values = [val for _, val in sorted_measurements] + ax.plot(timestamps, values, label=ra, marker='o') + ax.set_title(title) + ax.set_xlabel("Time") + ax.set_ylabel("Depth to Water") + ax.invert_yaxis() + ax.legend() + fig.autofmt_xdate() + buf = BytesIO() + fig.savefig(buf, format="png", bbox_inches="tight") + close(fig) + return b64encode(buf.getvalue()).decode("utf-8") + + chart_b64 = make_line_chart(data_by_well, "Depth of Water over Time") + html = templates.get_template("waterlevels_report.html").render( + from_month=from_month, + to_month=to_month, + observation_chart=chart_b64, + rows=rows, + report_title=report_title, + report_subtext=report_subtext, ) - db.add(well_measurement) - db.commit() + pdf_io = BytesIO() + HTML(string=html).write_pdf(pdf_io) + pdf_io.seek(0) + + return StreamingResponse( + pdf_io, + media_type="application/pdf", + headers={"Content-Disposition": "attachment; filename=waterlevels_report.pdf"}, + ) - return well_measurement @well_measurement_router.patch( - "/chlorides", - dependencies=[Depends(ScopedUser.WellMeasurementWrite)], + "/waterlevels", + dependencies=[Depends(ScopedUser.Admin)], response_model=well_schemas.WellMeasurement, - tags=["Chlorides"], + tags=["WaterLevels"], ) -def patch_chloride_measurement( - chloride_measurement_patch: well_schemas.PatchChlorideMeasurement, - db: Session = Depends(get_db), -): +def patch_waterlevel(waterlevel_patch: well_schemas.PatchWaterLevel, db: Session = Depends(get_db)): # Find the measurement well_measurement = ( - db.scalars(select(WellMeasurements).where(WellMeasurements.id == chloride_measurement_patch.id)).first() + db.scalars(select(WellMeasurements).where(WellMeasurements.id == waterlevel_patch.levelmeasurement_id)).first() ) # Update the fields, all are mandatory - well_measurement.submitting_user_id = chloride_measurement_patch.submitting_user_id - well_measurement.timestamp = chloride_measurement_patch.timestamp - well_measurement.value = chloride_measurement_patch.value - well_measurement.unit_id = chloride_measurement_patch.unit_id - well_measurement.well_id = chloride_measurement_patch.well_id + well_measurement.submitting_user_id = waterlevel_patch.submitting_user_id + well_measurement.timestamp = waterlevel_patch.timestamp + well_measurement.value = waterlevel_patch.value db.commit() return well_measurement @well_measurement_router.delete( - "/chlorides", + "/waterlevels", dependencies=[Depends(ScopedUser.Admin)], - tags=["Chlorides"], + tags=["WaterLevels"], ) -def delete_chloride_measurement(chloride_measurement_id: int, db: Session = Depends(get_db)): +def delete_waterlevel(waterlevel_id: int, db: Session = Depends(get_db)): # Find the measurement well_measurement = ( - db.scalars(select(WellMeasurements).where(WellMeasurements.id == chloride_measurement_id)).first() + db.scalars(select(WellMeasurements).where(WellMeasurements.id == waterlevel_id)).first() ) db.delete(well_measurement) db.commit() return True - - - diff --git a/api/routes/wells.py b/api/routes/wells.py index e7f9ade2..e9be8421 100644 --- a/api/routes/wells.py +++ b/api/routes/wells.py @@ -1,6 +1,6 @@ -from typing import List +from typing import List, Optional -from fastapi import Depends, APIRouter, HTTPException +from fastapi import Depends, APIRouter, HTTPException, Query from sqlalchemy import or_, select, desc, text from sqlalchemy.orm import Session, joinedload from sqlalchemy.exc import IntegrityError @@ -64,6 +64,7 @@ def get_wells( sort_by: WellSortByField = WellSortByField.Name, sort_direction: SortDirection = SortDirection.Ascending, has_chloride_group: bool = None, + chloride_group_id: Optional[str] = Query(None, pattern=r"^$|^\d+$"), db: Session = Depends(get_db), ): def sort_by_field_to_schema_field(name: WellSortByField): @@ -104,6 +105,12 @@ def sort_by_field_to_schema_field(name: WellSortByField): if has_chloride_group is not None: query_statement = query_statement.where(Wells.chloride_group_id.isnot(None)) + if chloride_group_id: + query_statement = query_statement.where( + Wells.chloride_group_id == int(chloride_group_id) + ) + + if sort_by: schema_field_name = sort_by_field_to_schema_field(sort_by) @@ -163,7 +170,7 @@ def update_well( try: db.add(well_to_patch) db.commit() - except IntegrityError as e: + except IntegrityError as _e: raise HTTPException(status_code=409, detail="RA number already exists") # Get updated model with relationships @@ -214,7 +221,7 @@ def create_well(new_well: well_schemas.SubmitWellCreate, db: Session = Depends(g db.commit() db.refresh(new_well_model) - except IntegrityError as e: + except IntegrityError as _e: db.rollback() db.delete(new_location_model) db.commit() @@ -230,10 +237,6 @@ def create_well(new_well: well_schemas.SubmitWellCreate, db: Session = Depends(g return new_well_model - -# Get List of well for MapView -# Get search for well similar to /well but no pagination and only for installed well -# Returns all installed well with a location when search is None @well_router.get( "/well_locations", dependencies=[Depends(ScopedUser.Read)], @@ -242,13 +245,20 @@ def create_well(new_well: well_schemas.SubmitWellCreate, db: Session = Depends(g ) def get_wells_locations( search_string: str = None, + has_chloride_group: bool = None, + limit: int = 500, + offset: int = 0, db: Session = Depends(get_db), ): - # Build the query statement based on query params - # joinedload loads relationships, outer joins on relationship tables makes them search/sortable query_statement = ( select(Wells) - .options(joinedload(Wells.location), joinedload(Wells.use_type)) + .options( + joinedload(Wells.location), + joinedload(Wells.use_type), + ) + .where( + Wells.location_id.isnot(None) + ) ) if search_string: @@ -261,11 +271,11 @@ def get_wells_locations( ) ) + if has_chloride_group is not None: + query_statement = query_statement.where(Wells.chloride_group_id.isnot(None)) - return db.scalars(query_statement).all() - + return db.scalars(query_statement.offset(offset).limit(limit)).all() -# End @well_router.get( "/well", @@ -340,40 +350,3 @@ def merge_well(well: well_schemas.SubmitWellMerge, db: Session = Depends(get_db) return True -@well_router.get( - "/chloride_groups", - dependencies=[Depends(ScopedUser.Read)], - response_model=List[well_schemas.ChlorideGroupResponse], - tags=["Chlorides"], -) -def get_chloride_groups( - sort_direction: SortDirection = SortDirection.Ascending, - db: Session = Depends(get_db), -): - query = ( - select(Wells) - .options(joinedload(Wells.location), joinedload(Wells.use_type)) - .join(Locations, isouter=True) - .join(WellUseLU, isouter=True) - .where(Wells.chloride_group_id.isnot(None)) - ) - - if sort_direction == SortDirection.Ascending: - query = query.order_by(Wells.chloride_group_id.asc()) - else: - query = query.order_by(Wells.chloride_group_id.desc()) - - wells = db.scalars(query).all() - - groups = {} - for well in wells: - group_id = well.chloride_group_id - if group_id not in groups: - groups[group_id] = [] - if well.ra_number: - groups[group_id].append(well.ra_number) - - return [ - {"id": group_id, "names": sorted(names)} - for group_id, names in groups.items() - ] diff --git a/api/schemas/part_schemas.py b/api/schemas/part_schemas.py index 0243bb04..1f147c1a 100644 --- a/api/schemas/part_schemas.py +++ b/api/schemas/part_schemas.py @@ -15,12 +15,29 @@ class Part(ORMBase): note: str | None = None in_use: bool commonly_used: bool - + price: float | None = None part_type_id: int - part_type: PartTypeLU | None = None + part_type: PartTypeLU | None = None meter_types: list[MeterTypeLU] | None = None +class Register(Part): + ''' + Adds on register specific fields to the Part model. + Note: There is also a MeterRegister schema that is used on the Meters view. I might want + to merge these two in the future, but for now they are separate. + ''' + class register_details(ORMBase): + brand: str + meter_size: float + ratio: str + dial_units_id: int | None = None + totalizer_units_id: int | None = None + number_of_digits: int | None = None + multiplier: float | None = None + + register_settings: register_details + class PartUsed(ORMBase): part_id: int diff --git a/api/templates/chlorides_report.html b/api/templates/chlorides_report.html new file mode 100644 index 00000000..1e15688e --- /dev/null +++ b/api/templates/chlorides_report.html @@ -0,0 +1,73 @@ + + +
+ + + + ++ From: {{ from_month or "All Data" }} + + To: {{ to_month or "All Data" }} +
+| Region | +Min | +Max | +Average | +
|---|---|---|---|
| North | +{{ report.north.min }} | +{{ report.north.max }} | +{{ "%.2f"|format(report.north.avg or 0) }} | +
| South | +{{ report.south.min }} | +{{ report.south.max }} | +{{ "%.2f"|format(report.south.avg or 0) }} | +
| East | +{{ report.east.min }} | +{{ report.east.max }} | +{{ "%.2f"|format(report.east.avg or 0) }} | +
| West | +{{ report.west.min }} | +{{ report.west.max }} | +{{ "%.2f"|format(report.west.avg or 0) }} | +
+ From: {{ from_month }} + To: {{ to_month }} +
+ + {% if repair_chart %} +| Date / Time | +Technician | +Meter | +Number of Repairs | +Number of Preventative Maintenances | +
|---|---|---|---|---|
| {{ row.date_time }} | +{{ row.technician }} | +{{ row.meter }} | +{{ row.number_of_repairs }} | +{{ row.number_of_pms }} | +
+ From: + {{ from_month }} + To: + {{ to_month }} +
+| Part # | +Description | +Price | +Quantity | +Total | +Running Total | +
|---|---|---|---|---|---|
| {{ row.part_number }} | +{{ row.description }} | +${{ "%.2f"|format(row.price) }} | +{{ row.quantity }} | +${{ "%.2f"|format(row.total) }} | +${{ "%.2f"|format(row.running_total) }} | +
+ {{ report_subtext }} +
+ {% endif %} + ++ From: {{ from_month }} + To: {{ to_month }} +
+ + {% if observation_chart %} +| Date / Time | +Depth to Water (ft) | +Well | +
|---|---|---|
| {{ row.timestamp }} | +{{ row.depth_to_water }} | +{{ row.well_ra_number }} | +