diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..8ed5ddf --- /dev/null +++ b/.gitattributes @@ -0,0 +1,3 @@ +*.onnx filter=lfs diff=lfs merge=lfs -text +*.jpg filter=lfs diff=lfs merge=lfs -text +*.mp4 filter=lfs diff=lfs merge=lfs -text diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml new file mode 100644 index 0000000..dc31ac5 --- /dev/null +++ b/.github/workflows/publish.yaml @@ -0,0 +1,35 @@ +name: Publish Package + +on: + push: + tags: + - 'v*' # Triggers on version tags like v1.0.0 + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.7' # Specify the Python version + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine + + - name: Build the package + run: | + python setup.py sdist bdist_wheel + + - name: Publish to PyPI + env: + TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} + TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} + run: | + twine upload dist/* \ No newline at end of file diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 0000000..8cdeb67 --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,53 @@ +name: Test Package + +on: + push: + branches: + - '**' + tags-ignore: + - 'v*' + pull_request: + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.11' + + - name: Install system dependencies + run: | + sudo apt-get update + sudo apt-get install -y libglib2.0-0 libsm6 libxrender1 libxext6 + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install pytest + + - name: Lint with flake8 + run: | + pip install flake8 + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics || true + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + + - name: Run tests + run: | + pytest --maxfail=3 --disable-warnings -q || echo "No tests found" + + - name: Build the package + run: | + pip install build + python -m build + + - name: Check the package + run: | + pip install twine + twine check dist/* \ No newline at end of file diff --git a/.gitignore b/.gitignore index 04a19b2..c086726 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,3 @@ -# Virtual environment -env/ - # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] @@ -14,7 +11,6 @@ __pycache__/ build/ develop-eggs/ dist/ -downloads/ eggs/ .eggs/ lib/ diff --git a/README.md b/README.md index a9cc2b5..34d7b74 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,56 @@ -# PeSAR: Perception for Search and Rescue -✈️ *AI-powered visual detection system for aerial search operations* -![Alt Text](data/output.gif) +# PiSAR: Pipeline for Aerial Search and Rescue +✈️ *AI-powered visual detection pipeline for aerial search operations* + +PiSAR is an open-source pipeline designed to streamline aerial search and rescue missions using advanced AI-based visual detection. It enables rapid analysis of aerial imagery and video to assist responders in locating people or objects of interest. + +**Try PiSAR online:** +You can test PiSAR directly in your browser via our [PiSAR Space](https://huggingface.co/spaces/eadali/PiSAR). + +![Demo GIF](data/output.gif) ## Installation + ### Prerequisites -- Python 3.8 or higher -- CUDA (optional, for GPU support) +- Python 3.8+ +- pip3 (Python package installer) +- *(Optional)* CUDA-enabled GPU & CUDA Toolkit for GPU acceleration +### Setup -### Steps -1. Clone the repository: -```bash - git clone https://github.com/your-username/aerial-object-detection.git - cd aerial-object-detection -``` +1. **Clone the repository** + ```bash + git clone https://github.com/eadali/PiSAR.git + cd PiSAR + ``` -2. Install the required dependencies: -```bash - pip3 install -r requirements.txt -``` +2. **(Recommended) Create a virtual environment** + ```bash + python3 -m venv pisar + source pisar/bin/activate + ``` -3. (Optional) If you want to use GPU acceleration, ensure you have the correct version of PyTorch installed with CUDA support. You can install it using: -```bash - pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cu118 -``` +3. **Install dependencies** + - **CPU only:** + ```bash + pip3 install -r requirements.txt + ``` + - **GPU (CUDA) support:** + ```bash + pip3 install -r requirements-cuda.txt + ``` + +4. **(Optional) Install PyTorch with a specific CUDA version** + See [PyTorch's official instructions](https://pytorch.org/get-started/locally/). + +5. **Verify installation** + ```bash + python3 -c "import torch; print(torch.cuda.is_available())" + ``` +*See `requirements.txt` and `requirements-cuda.txt` for details.* + +--- ## Usage ### Running the Script @@ -52,5 +77,8 @@ The script supports the following command-line arguments: | --device | Device to run the model on (cpu or cuda). | cpu | +--- + ## License -This project is licensed under the MIT License. See the LICENSE file for details. + +This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details. diff --git a/config/yolo8n-bytetrack-cpu.yaml b/config/yolo8n-bytetrack-cpu.yaml new file mode 100644 index 0000000..c085178 --- /dev/null +++ b/config/yolo8n-bytetrack-cpu.yaml @@ -0,0 +1,14 @@ +# YOLOv8n + ByteTrack Configuration +pipeline: + detector: + model: yolov8n + categories: ['LightVehicle', 'Person', 'Building', 'UPole', 'Boat', 'Bike', 'Container', 'Truck', 'Gastank', 'Digger', 'Solarpanels', 'Bus'] + thresholds: + confidence: 0.6 + iou: 0.4 + slicing: + overlap: 0.2 + device: cpu + + tracker: + algorithm: bytetrack diff --git a/config/yolo8n-bytetrack-cuda.yaml b/config/yolo8n-bytetrack-cuda.yaml new file mode 100644 index 0000000..ef1852f --- /dev/null +++ b/config/yolo8n-bytetrack-cuda.yaml @@ -0,0 +1,14 @@ +# YOLOv8n + ByteTrack Configuration +pipeline: + detector: + model: yolov8n + categories: ['LightVehicle', 'Person', 'Building', 'UPole', 'Boat', 'Bike', 'Container', 'Truck', 'Gastank', 'Digger', 'Solarpanels', 'Bus'] + thresholds: + confidence: 0.6 + iou: 0.4 + slicing: + overlap: 0.2 + device: cuda:0 + + tracker: + algorithm: bytetrack diff --git a/data/WALDO30_yolov8n_640x640.pt b/data/WALDO30_yolov8n_640x640.pt deleted file mode 100644 index e83cf3d..0000000 Binary files a/data/WALDO30_yolov8n_640x640.pt and /dev/null differ diff --git a/data/image_dense_example.png b/data/image_dense_example.png deleted file mode 100644 index 319f920..0000000 --- a/data/image_dense_example.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:01e9312b2e407a5830926b288291b8304e00b49b6ce6ad284cd466965303fec3 -size 398270 diff --git a/data/output.gif b/data/output.gif deleted file mode 100644 index 63904a0..0000000 Binary files a/data/output.gif and /dev/null differ diff --git a/data/video_dense_example.mp4 b/data/video_dense_example.mp4 deleted file mode 100644 index 6473d20..0000000 --- a/data/video_dense_example.mp4 +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5a2f004df630fa1e620927109858ed872d430656d1e5bd94ac21d18a8156fcc1 -size 5182797 diff --git a/demo.py b/demo.py index 3084c31..77152a6 100644 --- a/demo.py +++ b/demo.py @@ -1,81 +1,86 @@ import argparse import cv2 -import tqdm -from models import build_model -from engine import run_on_frame -from visualization import draw_estimations - - -# Constants -WINDOW_NAME = 'Aerial Detections' - - -def get_args_parser(): - parser = argparse.ArgumentParser('Set aerial object detector', add_help=False) - # Input arguments - parser.add_argument('--image-input', help='Path to image file') - parser.add_argument('--video-input', help='Path to video file') - # Detector arguments - parser.add_argument('--detector', default='waldo30', type=str, help='Detector model') - parser.add_argument('--confidence-threshold', default=0.8, type=float, help='Confidence threshold for detections') - parser.add_argument('--overlap-height-ratio', default=0.2, type=float, help='Overlap height ratio') - parser.add_argument('--overlap-width-ratio', default=0.2, type=float, help='Overlap width ratio') - # Tracker arguments - parser.add_argument('--tracker', type=str, help='Tracker type') - # Device arguments - parser.add_argument('--device', default='cpu', type=str, help='Device to run the model on') - return parser - - -def frame_from_video(video): - while video.isOpened(): - success, frame = video.read() - if success: - yield frame - else: +import numpy as np +from tqdm import tqdm +from pipeline import build_pipeline +from util import cfg, load_config, load_onnx_model +import supervision as sv + +WINDOW_NAME = "Aerial Detections" + + +def get_args(): + parser = argparse.ArgumentParser(description="Aerial object detection and tracking") + parser.add_argument("config", type=str, help="Path to config file") + parser.add_argument("--onnx-path", type=str, required=True, help="Path to ONNX model file") + input_group = parser.add_mutually_exclusive_group(required=True) + input_group.add_argument("--image", type=str, help="Path to image file") + input_group.add_argument("--video", type=str, help="Path to video file") + input_group.add_argument("--camid", type=int, help="Camera ID for video capture") + return parser.parse_args() + + +def frame_generator(source): + cap = cv2.VideoCapture(source) + while cap.isOpened(): + ret, frame = cap.read() + if not ret: break - - -def process_image(model, image_path): - image = cv2.imread(image_path) - class_id_to_name = model.get_class_mapping() - estimations = run_on_frame(model, image) - vis_image = draw_estimations(image, estimations, class_id_to_name) - cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL) - cv2.imshow(WINDOW_NAME, vis_image) - cv2.waitKey(0) - cv2.destroyAllWindows() - - -def process_video(model, video_path): - video = cv2.VideoCapture(video_path) - num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) - frame_gen = frame_from_video(video) - class_id_to_name = model.get_class_mapping() - - for frame in tqdm.tqdm(frame_gen, total=num_frames): - estimations = run_on_frame(model, frame) - vis_frame = draw_estimations(frame, estimations, class_id_to_name) - cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL) - cv2.imshow(WINDOW_NAME, vis_frame) - if cv2.waitKey(1) == 27: # ESC key to quit - break - - video.release() - cv2.destroyAllWindows() - - -def main(args): - model = build_model(args) - if args.image_input: - process_image(model, args.image_input) - elif args.video_input: - process_video(model, args.video_input) + yield frame + cap.release() + + +def annotate_frame(frame, detections, class_map): + box_annotator = sv.BoxAnnotator(thickness=2) + label_annotator = sv.LabelAnnotator(text_scale=0.5, text_thickness=1, text_padding=1) + labels = [] + for class_id, tracker_id in zip(detections.class_id, detections.tracker_id): + class_name = class_map.get(class_id, "Unknown") + if np.isnan(tracker_id): + labels.append(class_name) + else: + labels.append(f"#{int(tracker_id)} {class_name}") + frame = box_annotator.annotate(scene=frame, detections=detections) + frame = label_annotator.annotate(scene=frame, detections=detections, labels=labels) + return frame + + +def show_frame(window, frame): + cv2.namedWindow(window, cv2.WINDOW_NORMAL) + cv2.imshow(window, frame) + key = cv2.waitKey(1) + # Quit on 'q' or ESC + if key in (ord('q'), 27): + return False + return True + + +def main(): + args = get_args() + load_config(cfg, args.config) + pipeline = build_pipeline(cfg.pipeline) + load_onnx_model(pipeline.detector, args.onnx_path) + category_mapping = pipeline.detector.get_category_mapping() + + if args.image: + image = cv2.imread(args.image) + if image is None: + print(f"Error: Unable to load image {args.image}") + return + detections = pipeline(image) + vis = annotate_frame(image, detections, category_mapping) + cv2.imshow(WINDOW_NAME, vis) + cv2.waitKey(0) + cv2.destroyAllWindows() else: - print("Error: No input provided. Use --image-input or --video-input.") + source = args.camid if args.camid is not None else args.video + for frame in tqdm(frame_generator(source), desc="Processing"): + detections = pipeline(frame) + vis = annotate_frame(frame, detections, category_mapping) + if not show_frame(WINDOW_NAME, vis): + break + cv2.destroyAllWindows() -if __name__ == '__main__': - parser = argparse.ArgumentParser('Aerial object detection and tracking inference script', parents=[get_args_parser()]) - args = parser.parse_args() - main(args) +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/downloads/forest.jpg b/downloads/forest.jpg new file mode 100644 index 0000000..81c8304 --- /dev/null +++ b/downloads/forest.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:549e362299f6967f4e152d20d7bd1d7c1e7e8ec2a747a63b57669ae1734c7d33 +size 148388 diff --git a/downloads/forest.mp4 b/downloads/forest.mp4 new file mode 100644 index 0000000..4000cb8 --- /dev/null +++ b/downloads/forest.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a52f979cd37c39b382028c35a52628fe5ef9c9fd4d20896fc416924bcbc6b0f +size 6403647 diff --git a/downloads/yolo8n-416.onnx b/downloads/yolo8n-416.onnx new file mode 100644 index 0000000..6fcef8f --- /dev/null +++ b/downloads/yolo8n-416.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afbf81f217973b0c9e0b256c0b156d2f08b55169b7ac3f9e2e52004a2449fad1 +size 12162747 diff --git a/downloads/yolo8n-640.onnx b/downloads/yolo8n-640.onnx new file mode 100644 index 0000000..e1b49b8 --- /dev/null +++ b/downloads/yolo8n-640.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:346359fb811146dc1793406cbad1ac5f88dddce172276f24beae789e85d5efba +size 12259807 diff --git a/engine.py b/engine.py deleted file mode 100644 index 510311b..0000000 --- a/engine.py +++ /dev/null @@ -1,6 +0,0 @@ -import cv2 - - -def run_on_frame(model, frame): - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - return model(frame) diff --git a/models/__init__.py b/models/__init__.py deleted file mode 100644 index a4e39ba..0000000 --- a/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .model import build as build_model diff --git a/models/bytetrack.py b/models/bytetrack.py deleted file mode 100644 index 66c5ef4..0000000 --- a/models/bytetrack.py +++ /dev/null @@ -1,38 +0,0 @@ -import torch -import supervision as sv - - -class ByteTrack: - def __init__(self): - self.tracker = sv.ByteTrack() - self.smoother = sv.DetectionsSmoother() - - @staticmethod - def _convert_pytorch_to_supervision(detections): - """Convert PyTorch detections to Supervision Detections.""" - boxes = detections['boxes'].cpu().numpy() - scores = detections['scores'].cpu().numpy() - labels = detections['labels'].cpu().numpy() - return sv.Detections(xyxy=boxes, confidence=scores, class_id=labels) - - @staticmethod - def _convert_supervision_to_pytorch(detections): - """Convert Supervision Detections to PyTorch detections.""" - return { - 'boxes': torch.tensor(detections.xyxy, dtype=torch.float32), - 'labels': torch.tensor(detections.class_id, dtype=torch.int64), - 'scores': torch.tensor(detections.confidence, dtype=torch.float32), - 'ids': torch.tensor(detections.tracker_id, dtype=torch.float32) - } - - def __call__(self, detections): - """Process detections using ByteTrack.""" - supervision_detections = self._convert_pytorch_to_supervision(detections) - tracked_detections = self.tracker.update_with_detections(supervision_detections) - smoothed_detections = self.smoother.update_with_detections(tracked_detections) - return self._convert_supervision_to_pytorch(smoothed_detections) - - -def build(args): - """Factory function to create a ByteTrack instance.""" - return ByteTrack() diff --git a/models/dummytrack.py b/models/dummytrack.py deleted file mode 100644 index e8b97e6..0000000 --- a/models/dummytrack.py +++ /dev/null @@ -1,16 +0,0 @@ -import torch -import numpy as np -from typing import Dict - - -class DummyTrack: - """A placeholder tracker that assigns NaN values as tracker IDs.""" - def __call__(self, detections: Dict[str, torch.Tensor]): - num_boxes = detections['boxes'].shape[0] - detections['ids'] = torch.full((num_boxes,), np.nan, dtype=torch.float32) - return detections - - -def build(args): - """Factory function to create a DummyTrack instance.""" - return DummyTrack() diff --git a/models/model.py b/models/model.py deleted file mode 100644 index 16870f9..0000000 --- a/models/model.py +++ /dev/null @@ -1,43 +0,0 @@ -from .waldo30 import build as build_waldo30 -from .bytetrack import build as build_bytetrack -from .dummytrack import build as build_bypasstrack - - -class Model: - def __init__(self, detector, tracker=None): - self.detector = detector - self.tracker = tracker - - def __call__(self, frame): - detections = self._apply_detector(frame) - detections = self._apply_tracker(detections) - return detections - - def _apply_detector(self, frame): - return self.detector(frame) - - def _apply_tracker(self, detections): - return self.tracker(detections) - - def get_class_mapping(self): - return self.detector.get_class_mapping() - - -def _build_detector(args): - if args.detector == 'waldo30': - return build_waldo30(args) - raise ValueError(f'Invalid detector: {args.detector}') - - -def _build_tracker(args): - if args.tracker == 'bytetrack': - return build_bytetrack(args) - if args.tracker is None: - return build_bypasstrack(args) - raise ValueError(f'Invalid tracker: {args.tracker}') - - -def build(args): - detector = _build_detector(args) - tracker = _build_tracker(args) - return Model(detector=detector, tracker=tracker) diff --git a/models/waldo30.py b/models/waldo30.py deleted file mode 100644 index 7cf7dd1..0000000 --- a/models/waldo30.py +++ /dev/null @@ -1,92 +0,0 @@ -from sahi import AutoDetectionModel -from sahi.predict import get_sliced_prediction -import torch - - -class WALDO30: - def __init__( - self, - path, - confidence_threshold=0.8, - device="cpu", - overlap_height_ratio=0.2, - overlap_width_ratio=0.2, - ): - self.overlap_height_ratio = overlap_height_ratio - self.overlap_width_ratio = overlap_width_ratio - self.slice_height = 640 - self.slice_width = 640 - - self.detection_model = AutoDetectionModel.from_pretrained( - model_type="yolov8", - model_path=path, - confidence_threshold=confidence_threshold, - image_size=640, - device=device, - load_at_init=True, - ) - - self.class_id_to_name = { - 0: "Car", - 1: "Person", - 4: "Boat", - 5: "Bike", - 7: "Truck", - 11: "Bus", - } - - # Define allowed class IDs for filtering - self.allowed_class_ids = {0, 1, 4, 5, 7, 11} - - def __call__(self, frame): - result = get_sliced_prediction( - frame, - self.detection_model, - slice_height=self.slice_height, - slice_width=self.slice_width, - overlap_height_ratio=self.overlap_height_ratio, - overlap_width_ratio=self.overlap_width_ratio, - verbose=0 - ) - - if not result.object_prediction_list: - return self._create_empty_detection() - - boxes, labels, scores = self._extract_detections(result) - return { - "boxes": torch.tensor(boxes, dtype=torch.float32), - "labels": torch.tensor(labels, dtype=torch.int64), - "scores": torch.tensor(scores, dtype=torch.float32), - } - - def _extract_detections(self, result): - boxes, labels, scores = [], [], [] - - for prediction in result.object_prediction_list: - if prediction.category.id not in self.allowed_class_ids: - continue # Skip unwanted classes - boxes.append(prediction.bbox.to_voc_bbox()) - labels.append(prediction.category.id) - scores.append(prediction.score.value) - - return boxes, labels, scores - - def _create_empty_detection(self): - return { - "boxes": torch.empty((0, 4), dtype=torch.float32), - "labels": torch.empty((0,), dtype=torch.int64), - "scores": torch.empty((0,), dtype=torch.float32), - } - - def get_class_mapping(self): - return self.class_id_to_name - - -def build(args): - return WALDO30( - path="data/WALDO30_yolov8n_640x640.pt", - confidence_threshold=args.confidence_threshold, - device=args.device, - overlap_height_ratio=args.overlap_height_ratio, - overlap_width_ratio=args.overlap_width_ratio, - ) diff --git a/pipeline/__init__.py b/pipeline/__init__.py new file mode 100644 index 0000000..ffcfe15 --- /dev/null +++ b/pipeline/__init__.py @@ -0,0 +1,14 @@ +from .pipeline import Pipeline +from .detectors import build_detector +from .trackers import build_tracker + +def build_pipeline(config): + """ + Build and return a pipeline based on the provided configuration. + """ + # Build detector and tracker using the config + detector = build_detector(config.detector) + tracker = build_tracker(config.tracker) + + # Create and return a Pipeline object with detector and tracker + return Pipeline(detector=detector, tracker=tracker) \ No newline at end of file diff --git a/pipeline/detectors/__init__.py b/pipeline/detectors/__init__.py new file mode 100644 index 0000000..c26a4d6 --- /dev/null +++ b/pipeline/detectors/__init__.py @@ -0,0 +1,14 @@ +from .yolo import YOLO + + +def build_detector(config): + """ + Build the detection model based on the provided configuration. + """ + # Initialize the YOLO object detection model + return YOLO(config.thresholds.confidence, + config.thresholds.iou, + config.slicing.overlap, + config.categories, + config.device) + \ No newline at end of file diff --git a/pipeline/detectors/yolo.py b/pipeline/detectors/yolo.py new file mode 100644 index 0000000..839d521 --- /dev/null +++ b/pipeline/detectors/yolo.py @@ -0,0 +1,80 @@ +import numpy as np +import supervision as sv +from sahi import AutoDetectionModel +from sahi.predict import get_sliced_prediction + +class YOLO: + def __init__(self, confidence_threshold, iou_threshold, slicing_overlap, categories, device): + """ + YOLO detector wrapper using SAHI for sliced prediction. + + Args: + confidence_threshold (float): Minimum confidence for detections. + iou_threshold (float): IoU threshold for NMS (not used directly here). + slicing_overlap (float): Overlap ratio for slicing. + categories (list): List of class names. + device (str): Device to run the model on ('cpu' or 'cuda'). + """ + self.model = None + self.confidence_threshold = confidence_threshold + self.iou_threshold = iou_threshold + self.slicing_overlap = slicing_overlap + self.categories = categories + self.category_mapping = {str(i): category for i, category in enumerate(categories)} + self.device = device + + def load_onnx_model(self, path): + """ + Loads the ONNX model using SAHI's AutoDetectionModel. + """ + self.model = AutoDetectionModel.from_pretrained( + model_type='yolov8onnx', + model_path=path, + confidence_threshold=self.confidence_threshold, + category_mapping=self.category_mapping, + device=self.device + ) + + def __call__(self, frame): + """ + Runs sliced prediction on the input frame and returns a supervision.Detections object. + """ + # Get input shape from ONNX model + input_shape = self.model.model.get_inputs()[0].shape[2] + result = get_sliced_prediction( + frame, + self.model, + slice_height=input_shape, + slice_width=input_shape, + overlap_height_ratio=self.slicing_overlap, + overlap_width_ratio=self.slicing_overlap, + verbose=False, + ) + boxes = [] + confidences = [] + class_ids = [] + for det in result.object_prediction_list: + boxes.append(det.bbox.to_xyxy()) + confidences.append(det.score.value) + class_ids.append(det.category.id) + if boxes: + boxes = np.array(boxes) + confidences = np.array(confidences) + class_ids = np.array(class_ids) + else: + boxes = np.zeros((0, 4)) + confidences = np.zeros((0,)) + class_ids = np.zeros((0,)) + detections = sv.Detections( + xyxy=boxes, + confidence=confidences, + class_id=class_ids, + ) + return detections + + def get_category_mapping(self): + """ + Returns the category mapping. + """ + # Convert string keys to integers + return {int(k): v for k, v in self.category_mapping.items()} \ No newline at end of file diff --git a/pipeline/pipeline.py b/pipeline/pipeline.py new file mode 100644 index 0000000..0fc64a7 --- /dev/null +++ b/pipeline/pipeline.py @@ -0,0 +1,26 @@ +class Pipeline: + def __init__(self, detector, tracker): + """ + Initialize the Pipeline class with a detector and tracker. + + Args: + detector (object): The object detection model. + tracker (object): The object tracking model. + """ + self.detector = detector + self.tracker = tracker + + def load_state_dict(self, onnx_path): + self.detector.load_state_dict(onnx_path) + + def __call__(self, frame): + """ + Run the detection and tracking on the input image. + + Args: + frame (np.ndarray): The input image to process. + + Returns: + supervision.Detections: Detections object after tracking. + """ + return self.tracker(self.detector(frame)) \ No newline at end of file diff --git a/pipeline/trackers/__init__.py b/pipeline/trackers/__init__.py new file mode 100644 index 0000000..15d2217 --- /dev/null +++ b/pipeline/trackers/__init__.py @@ -0,0 +1,8 @@ +from .bytetrack import ByteTrack + +def build_tracker(cfg): + """ + Build the tracking model based on the provided configuration. + """ + # Initialize the tracker + return ByteTrack() \ No newline at end of file diff --git a/pipeline/trackers/bytetrack.py b/pipeline/trackers/bytetrack.py new file mode 100644 index 0000000..420f6b8 --- /dev/null +++ b/pipeline/trackers/bytetrack.py @@ -0,0 +1,14 @@ +import supervision as sv + + +class ByteTrack: + def __init__(self): + self.tracker = sv.ByteTrack() + self.smoother = sv.DetectionsSmoother() + + def __call__(self, detections): + """Process detections using ByteTrack.""" + # supervision_detections = self._convert_pytorch_to_supervision(detections) + tracked_detections = self.tracker.update_with_detections(detections) + smoothed_detections = self.smoother.update_with_detections(tracked_detections) + return smoothed_detections \ No newline at end of file diff --git a/pisar/bin/Activate.ps1 b/pisar/bin/Activate.ps1 new file mode 100644 index 0000000..b49d77b --- /dev/null +++ b/pisar/bin/Activate.ps1 @@ -0,0 +1,247 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove VIRTUAL_ENV_PROMPT altogether. + if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { + Remove-Item -Path env:VIRTUAL_ENV_PROMPT + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } + $env:VIRTUAL_ENV_PROMPT = $Prompt +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/pisar/bin/activate b/pisar/bin/activate new file mode 100644 index 0000000..82e36eb --- /dev/null +++ b/pisar/bin/activate @@ -0,0 +1,70 @@ +# This file must be used with "source bin/activate" *from bash* +# You cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # Call hash to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + hash -r 2> /dev/null + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +# on Windows, a path can contain colons and backslashes and has to be converted: +if [ "${OSTYPE:-}" = "cygwin" ] || [ "${OSTYPE:-}" = "msys" ] ; then + # transform D:\path\to\venv to /d/path/to/venv on MSYS + # and to /cygdrive/d/path/to/venv on Cygwin + export VIRTUAL_ENV=$(cygpath /home/eadali/Desktop/PiSAR/pisar) +else + # use the path as-is + export VIRTUAL_ENV=/home/eadali/Desktop/PiSAR/pisar +fi + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/"bin":$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + PS1='(pisar) '"${PS1:-}" + export PS1 + VIRTUAL_ENV_PROMPT='(pisar) ' + export VIRTUAL_ENV_PROMPT +fi + +# Call hash to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +hash -r 2> /dev/null diff --git a/pisar/bin/activate.csh b/pisar/bin/activate.csh new file mode 100644 index 0000000..ad0dcd3 --- /dev/null +++ b/pisar/bin/activate.csh @@ -0,0 +1,27 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. + +# Created by Davide Di Blasi . +# Ported to Python 3.3 venv by Andrew Svetlov + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV /home/eadali/Desktop/PiSAR/pisar + +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/"bin":$PATH" + + +set _OLD_VIRTUAL_PROMPT="$prompt" + +if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then + set prompt = '(pisar) '"$prompt" + setenv VIRTUAL_ENV_PROMPT '(pisar) ' +endif + +alias pydoc python -m pydoc + +rehash diff --git a/pisar/bin/activate.fish b/pisar/bin/activate.fish new file mode 100644 index 0000000..28074b1 --- /dev/null +++ b/pisar/bin/activate.fish @@ -0,0 +1,69 @@ +# This file must be used with "source /bin/activate.fish" *from fish* +# (https://fishshell.com/). You cannot run it directly. + +function deactivate -d "Exit virtual environment and return to normal shell environment" + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + set -gx PATH $_OLD_VIRTUAL_PATH + set -e _OLD_VIRTUAL_PATH + end + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + set -e _OLD_FISH_PROMPT_OVERRIDE + # prevents error when using nested fish instances (Issue #93858) + if functions -q _old_fish_prompt + functions -e fish_prompt + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + end + end + + set -e VIRTUAL_ENV + set -e VIRTUAL_ENV_PROMPT + if test "$argv[1]" != "nondestructive" + # Self-destruct! + functions -e deactivate + end +end + +# Unset irrelevant variables. +deactivate nondestructive + +set -gx VIRTUAL_ENV /home/eadali/Desktop/PiSAR/pisar + +set -gx _OLD_VIRTUAL_PATH $PATH +set -gx PATH "$VIRTUAL_ENV/"bin $PATH + +# Unset PYTHONHOME if set. +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # fish uses a function instead of an env var to generate the prompt. + + # Save the current fish_prompt function as the function _old_fish_prompt. + functions -c fish_prompt _old_fish_prompt + + # With the original prompt function renamed, we can override with our own. + function fish_prompt + # Save the return status of the last command. + set -l old_status $status + + # Output the venv prompt; color taken from the blue of the Python logo. + printf "%s%s%s" (set_color 4B8BBE) '(pisar) ' (set_color normal) + + # Restore the return status of the previous command. + echo "exit $old_status" | . + # Output the original/"old" prompt. + _old_fish_prompt + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" + set -gx VIRTUAL_ENV_PROMPT '(pisar) ' +end diff --git a/pisar/bin/coloredlogs b/pisar/bin/coloredlogs new file mode 100755 index 0000000..fbd331e --- /dev/null +++ b/pisar/bin/coloredlogs @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from coloredlogs.cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/f2py b/pisar/bin/f2py new file mode 100755 index 0000000..fba27b3 --- /dev/null +++ b/pisar/bin/f2py @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from numpy.f2py.f2py2e import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/fonttools b/pisar/bin/fonttools new file mode 100755 index 0000000..32e92a6 --- /dev/null +++ b/pisar/bin/fonttools @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from fontTools.__main__ import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/humanfriendly b/pisar/bin/humanfriendly new file mode 100755 index 0000000..26b3e4c --- /dev/null +++ b/pisar/bin/humanfriendly @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from humanfriendly.cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/isympy b/pisar/bin/isympy new file mode 100755 index 0000000..07847a3 --- /dev/null +++ b/pisar/bin/isympy @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from isympy import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/normalizer b/pisar/bin/normalizer new file mode 100755 index 0000000..9c2a721 --- /dev/null +++ b/pisar/bin/normalizer @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from charset_normalizer import cli +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli.cli_detect()) diff --git a/pisar/bin/numpy-config b/pisar/bin/numpy-config new file mode 100755 index 0000000..47abe95 --- /dev/null +++ b/pisar/bin/numpy-config @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from numpy._configtool import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/onnxruntime_test b/pisar/bin/onnxruntime_test new file mode 100755 index 0000000..9f677b0 --- /dev/null +++ b/pisar/bin/onnxruntime_test @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from onnxruntime.tools.onnxruntime_test import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/pip b/pisar/bin/pip new file mode 100755 index 0000000..c6619a1 --- /dev/null +++ b/pisar/bin/pip @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/pip3 b/pisar/bin/pip3 new file mode 100755 index 0000000..c6619a1 --- /dev/null +++ b/pisar/bin/pip3 @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/pip3.12 b/pisar/bin/pip3.12 new file mode 100755 index 0000000..c6619a1 --- /dev/null +++ b/pisar/bin/pip3.12 @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/proton b/pisar/bin/proton new file mode 100755 index 0000000..b02b542 --- /dev/null +++ b/pisar/bin/proton @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from triton.profiler.proton import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/proton-viewer b/pisar/bin/proton-viewer new file mode 100755 index 0000000..4ca3b6f --- /dev/null +++ b/pisar/bin/proton-viewer @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from triton.profiler.viewer import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/pyftmerge b/pisar/bin/pyftmerge new file mode 100755 index 0000000..23ddaae --- /dev/null +++ b/pisar/bin/pyftmerge @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from fontTools.merge import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/pyftsubset b/pisar/bin/pyftsubset new file mode 100755 index 0000000..d1304db --- /dev/null +++ b/pisar/bin/pyftsubset @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from fontTools.subset import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/python b/pisar/bin/python new file mode 120000 index 0000000..b8a0adb --- /dev/null +++ b/pisar/bin/python @@ -0,0 +1 @@ +python3 \ No newline at end of file diff --git a/pisar/bin/python3 b/pisar/bin/python3 new file mode 120000 index 0000000..ae65fda --- /dev/null +++ b/pisar/bin/python3 @@ -0,0 +1 @@ +/usr/bin/python3 \ No newline at end of file diff --git a/pisar/bin/python3.12 b/pisar/bin/python3.12 new file mode 120000 index 0000000..b8a0adb --- /dev/null +++ b/pisar/bin/python3.12 @@ -0,0 +1 @@ +python3 \ No newline at end of file diff --git a/pisar/bin/sahi b/pisar/bin/sahi new file mode 100755 index 0000000..215b645 --- /dev/null +++ b/pisar/bin/sahi @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from sahi.cli import app +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(app()) diff --git a/pisar/bin/torchfrtrace b/pisar/bin/torchfrtrace new file mode 100755 index 0000000..a856985 --- /dev/null +++ b/pisar/bin/torchfrtrace @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from tools.flight_recorder.fr_trace import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/torchrun b/pisar/bin/torchrun new file mode 100755 index 0000000..0f2bcf3 --- /dev/null +++ b/pisar/bin/torchrun @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from torch.distributed.run import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/tqdm b/pisar/bin/tqdm new file mode 100755 index 0000000..ede107b --- /dev/null +++ b/pisar/bin/tqdm @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from tqdm.cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/bin/ttx b/pisar/bin/ttx new file mode 100755 index 0000000..d910b42 --- /dev/null +++ b/pisar/bin/ttx @@ -0,0 +1,8 @@ +#!/home/eadali/Desktop/PiSAR/pisar/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from fontTools.ttx import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/pisar/lib64 b/pisar/lib64 new file mode 120000 index 0000000..7951405 --- /dev/null +++ b/pisar/lib64 @@ -0,0 +1 @@ +lib \ No newline at end of file diff --git a/pisar/pyvenv.cfg b/pisar/pyvenv.cfg new file mode 100644 index 0000000..668e506 --- /dev/null +++ b/pisar/pyvenv.cfg @@ -0,0 +1,5 @@ +home = /usr/bin +include-system-site-packages = false +version = 3.12.3 +executable = /usr/bin/python3.12 +command = /usr/bin/python3 -m venv /home/eadali/Desktop/PiSAR/pisar diff --git a/pisar/share/man/man1/isympy.1 b/pisar/share/man/man1/isympy.1 new file mode 100644 index 0000000..0ff9661 --- /dev/null +++ b/pisar/share/man/man1/isympy.1 @@ -0,0 +1,188 @@ +'\" -*- coding: us-ascii -*- +.if \n(.g .ds T< \\FC +.if \n(.g .ds T> \\F[\n[.fam]] +.de URL +\\$2 \(la\\$1\(ra\\$3 +.. +.if \n(.g .mso www.tmac +.TH isympy 1 2007-10-8 "" "" +.SH NAME +isympy \- interactive shell for SymPy +.SH SYNOPSIS +'nh +.fi +.ad l +\fBisympy\fR \kx +.if (\nx>(\n(.l/2)) .nr x (\n(.l/5) +'in \n(.iu+\nxu +[\fB-c\fR | \fB--console\fR] [\fB-p\fR ENCODING | \fB--pretty\fR ENCODING] [\fB-t\fR TYPE | \fB--types\fR TYPE] [\fB-o\fR ORDER | \fB--order\fR ORDER] [\fB-q\fR | \fB--quiet\fR] [\fB-d\fR | \fB--doctest\fR] [\fB-C\fR | \fB--no-cache\fR] [\fB-a\fR | \fB--auto\fR] [\fB-D\fR | \fB--debug\fR] [ +-- | PYTHONOPTIONS] +'in \n(.iu-\nxu +.ad b +'hy +'nh +.fi +.ad l +\fBisympy\fR \kx +.if (\nx>(\n(.l/2)) .nr x (\n(.l/5) +'in \n(.iu+\nxu +[ +{\fB-h\fR | \fB--help\fR} +| +{\fB-v\fR | \fB--version\fR} +] +'in \n(.iu-\nxu +.ad b +'hy +.SH DESCRIPTION +isympy is a Python shell for SymPy. It is just a normal python shell +(ipython shell if you have the ipython package installed) that executes +the following commands so that you don't have to: +.PP +.nf +\*(T< +>>> from __future__ import division +>>> from sympy import * +>>> x, y, z = symbols("x,y,z") +>>> k, m, n = symbols("k,m,n", integer=True) + \*(T> +.fi +.PP +So starting isympy is equivalent to starting python (or ipython) and +executing the above commands by hand. It is intended for easy and quick +experimentation with SymPy. For more complicated programs, it is recommended +to write a script and import things explicitly (using the "from sympy +import sin, log, Symbol, ..." idiom). +.SH OPTIONS +.TP +\*(T<\fB\-c \fR\*(T>\fISHELL\fR, \*(T<\fB\-\-console=\fR\*(T>\fISHELL\fR +Use the specified shell (python or ipython) as +console backend instead of the default one (ipython +if present or python otherwise). + +Example: isympy -c python + +\fISHELL\fR could be either +\&'ipython' or 'python' +.TP +\*(T<\fB\-p \fR\*(T>\fIENCODING\fR, \*(T<\fB\-\-pretty=\fR\*(T>\fIENCODING\fR +Setup pretty printing in SymPy. By default, the most pretty, unicode +printing is enabled (if the terminal supports it). You can use less +pretty ASCII printing instead or no pretty printing at all. + +Example: isympy -p no + +\fIENCODING\fR must be one of 'unicode', +\&'ascii' or 'no'. +.TP +\*(T<\fB\-t \fR\*(T>\fITYPE\fR, \*(T<\fB\-\-types=\fR\*(T>\fITYPE\fR +Setup the ground types for the polys. By default, gmpy ground types +are used if gmpy2 or gmpy is installed, otherwise it falls back to python +ground types, which are a little bit slower. You can manually +choose python ground types even if gmpy is installed (e.g., for testing purposes). + +Note that sympy ground types are not supported, and should be used +only for experimental purposes. + +Note that the gmpy1 ground type is primarily intended for testing; it the +use of gmpy even if gmpy2 is available. + +This is the same as setting the environment variable +SYMPY_GROUND_TYPES to the given ground type (e.g., +SYMPY_GROUND_TYPES='gmpy') + +The ground types can be determined interactively from the variable +sympy.polys.domains.GROUND_TYPES inside the isympy shell itself. + +Example: isympy -t python + +\fITYPE\fR must be one of 'gmpy', +\&'gmpy1' or 'python'. +.TP +\*(T<\fB\-o \fR\*(T>\fIORDER\fR, \*(T<\fB\-\-order=\fR\*(T>\fIORDER\fR +Setup the ordering of terms for printing. The default is lex, which +orders terms lexicographically (e.g., x**2 + x + 1). You can choose +other orderings, such as rev-lex, which will use reverse +lexicographic ordering (e.g., 1 + x + x**2). + +Note that for very large expressions, ORDER='none' may speed up +printing considerably, with the tradeoff that the order of the terms +in the printed expression will have no canonical order + +Example: isympy -o rev-lax + +\fIORDER\fR must be one of 'lex', 'rev-lex', 'grlex', +\&'rev-grlex', 'grevlex', 'rev-grevlex', 'old', or 'none'. +.TP +\*(T<\fB\-q\fR\*(T>, \*(T<\fB\-\-quiet\fR\*(T> +Print only Python's and SymPy's versions to stdout at startup, and nothing else. +.TP +\*(T<\fB\-d\fR\*(T>, \*(T<\fB\-\-doctest\fR\*(T> +Use the same format that should be used for doctests. This is +equivalent to '\fIisympy -c python -p no\fR'. +.TP +\*(T<\fB\-C\fR\*(T>, \*(T<\fB\-\-no\-cache\fR\*(T> +Disable the caching mechanism. Disabling the cache may slow certain +operations down considerably. This is useful for testing the cache, +or for benchmarking, as the cache can result in deceptive benchmark timings. + +This is the same as setting the environment variable SYMPY_USE_CACHE +to 'no'. +.TP +\*(T<\fB\-a\fR\*(T>, \*(T<\fB\-\-auto\fR\*(T> +Automatically create missing symbols. Normally, typing a name of a +Symbol that has not been instantiated first would raise NameError, +but with this option enabled, any undefined name will be +automatically created as a Symbol. This only works in IPython 0.11. + +Note that this is intended only for interactive, calculator style +usage. In a script that uses SymPy, Symbols should be instantiated +at the top, so that it's clear what they are. + +This will not override any names that are already defined, which +includes the single character letters represented by the mnemonic +QCOSINE (see the "Gotchas and Pitfalls" document in the +documentation). You can delete existing names by executing "del +name" in the shell itself. You can see if a name is defined by typing +"'name' in globals()". + +The Symbols that are created using this have default assumptions. +If you want to place assumptions on symbols, you should create them +using symbols() or var(). + +Finally, this only works in the top level namespace. So, for +example, if you define a function in isympy with an undefined +Symbol, it will not work. +.TP +\*(T<\fB\-D\fR\*(T>, \*(T<\fB\-\-debug\fR\*(T> +Enable debugging output. This is the same as setting the +environment variable SYMPY_DEBUG to 'True'. The debug status is set +in the variable SYMPY_DEBUG within isympy. +.TP +-- \fIPYTHONOPTIONS\fR +These options will be passed on to \fIipython (1)\fR shell. +Only supported when ipython is being used (standard python shell not supported). + +Two dashes (--) are required to separate \fIPYTHONOPTIONS\fR +from the other isympy options. + +For example, to run iSymPy without startup banner and colors: + +isympy -q -c ipython -- --colors=NoColor +.TP +\*(T<\fB\-h\fR\*(T>, \*(T<\fB\-\-help\fR\*(T> +Print help output and exit. +.TP +\*(T<\fB\-v\fR\*(T>, \*(T<\fB\-\-version\fR\*(T> +Print isympy version information and exit. +.SH FILES +.TP +\*(T<\fI${HOME}/.sympy\-history\fR\*(T> +Saves the history of commands when using the python +shell as backend. +.SH BUGS +The upstreams BTS can be found at \(lahttps://github.com/sympy/sympy/issues\(ra +Please report all bugs that you find in there, this will help improve +the overall quality of SymPy. +.SH "SEE ALSO" +\fBipython\fR(1), \fBpython\fR(1) diff --git a/pisar/share/man/man1/ttx.1 b/pisar/share/man/man1/ttx.1 new file mode 100644 index 0000000..bba23b5 --- /dev/null +++ b/pisar/share/man/man1/ttx.1 @@ -0,0 +1,225 @@ +.Dd May 18, 2004 +.\" ttx is not specific to any OS, but contrary to what groff_mdoc(7) +.\" seems to imply, entirely omitting the .Os macro causes 'BSD' to +.\" be used, so I give a zero-width space as its argument. +.Os \& +.\" The "FontTools Manual" argument apparently has no effect in +.\" groff 1.18.1. I think it is a bug in the -mdoc groff package. +.Dt TTX 1 "FontTools Manual" +.Sh NAME +.Nm ttx +.Nd tool for manipulating TrueType and OpenType fonts +.Sh SYNOPSIS +.Nm +.Bk +.Op Ar option ... +.Ek +.Bk +.Ar file ... +.Ek +.Sh DESCRIPTION +.Nm +is a tool for manipulating TrueType and OpenType fonts. It can convert +TrueType and OpenType fonts to and from an +.Tn XML Ns -based format called +.Tn TTX . +.Tn TTX +files have a +.Ql .ttx +extension. +.Pp +For each +.Ar file +argument it is given, +.Nm +detects whether it is a +.Ql .ttf , +.Ql .otf +or +.Ql .ttx +file and acts accordingly: if it is a +.Ql .ttf +or +.Ql .otf +file, it generates a +.Ql .ttx +file; if it is a +.Ql .ttx +file, it generates a +.Ql .ttf +or +.Ql .otf +file. +.Pp +By default, every output file is created in the same directory as the +corresponding input file and with the same name except for the +extension, which is substituted appropriately. +.Nm +never overwrites existing files; if necessary, it appends a suffix to +the output file name before the extension, as in +.Pa Arial#1.ttf . +.Ss "General options" +.Bl -tag -width ".Fl t Ar table" +.It Fl h +Display usage information. +.It Fl d Ar dir +Write the output files to directory +.Ar dir +instead of writing every output file to the same directory as the +corresponding input file. +.It Fl o Ar file +Write the output to +.Ar file +instead of writing it to the same directory as the +corresponding input file. +.It Fl v +Be verbose. Write more messages to the standard output describing what +is being done. +.It Fl a +Allow virtual glyphs ID's on compile or decompile. +.El +.Ss "Dump options" +The following options control the process of dumping font files +(TrueType or OpenType) to +.Tn TTX +files. +.Bl -tag -width ".Fl t Ar table" +.It Fl l +List table information. Instead of dumping the font to a +.Tn TTX +file, display minimal information about each table. +.It Fl t Ar table +Dump table +.Ar table . +This option may be given multiple times to dump several tables at +once. When not specified, all tables are dumped. +.It Fl x Ar table +Exclude table +.Ar table +from the list of tables to dump. This option may be given multiple +times to exclude several tables from the dump. The +.Fl t +and +.Fl x +options are mutually exclusive. +.It Fl s +Split tables. Dump each table to a separate +.Tn TTX +file and write (under the name that would have been used for the output +file if the +.Fl s +option had not been given) one small +.Tn TTX +file containing references to the individual table dump files. This +file can be used as input to +.Nm +as long as the referenced files can be found in the same directory. +.It Fl i +.\" XXX: I suppose OpenType programs (exist and) are also affected. +Don't disassemble TrueType instructions. When this option is specified, +all TrueType programs (glyph programs, the font program and the +pre-program) are written to the +.Tn TTX +file as hexadecimal data instead of +assembly. This saves some time and results in smaller +.Tn TTX +files. +.It Fl y Ar n +When decompiling a TrueType Collection (TTC) file, +decompile font number +.Ar n , +starting from 0. +.El +.Ss "Compilation options" +The following options control the process of compiling +.Tn TTX +files into font files (TrueType or OpenType): +.Bl -tag -width ".Fl t Ar table" +.It Fl m Ar fontfile +Merge the input +.Tn TTX +file +.Ar file +with +.Ar fontfile . +No more than one +.Ar file +argument can be specified when this option is used. +.It Fl b +Don't recalculate glyph bounding boxes. Use the values in the +.Tn TTX +file as is. +.El +.Sh "THE TTX FILE FORMAT" +You can find some information about the +.Tn TTX +file format in +.Pa documentation.html . +In particular, you will find in that file the list of tables understood by +.Nm +and the relations between TrueType GlyphIDs and the glyph names used in +.Tn TTX +files. +.Sh EXAMPLES +In the following examples, all files are read from and written to the +current directory. Additionally, the name given for the output file +assumes in every case that it did not exist before +.Nm +was invoked. +.Pp +Dump the TrueType font contained in +.Pa FreeSans.ttf +to +.Pa FreeSans.ttx : +.Pp +.Dl ttx FreeSans.ttf +.Pp +Compile +.Pa MyFont.ttx +into a TrueType or OpenType font file: +.Pp +.Dl ttx MyFont.ttx +.Pp +List the tables in +.Pa FreeSans.ttf +along with some information: +.Pp +.Dl ttx -l FreeSans.ttf +.Pp +Dump the +.Sq cmap +table from +.Pa FreeSans.ttf +to +.Pa FreeSans.ttx : +.Pp +.Dl ttx -t cmap FreeSans.ttf +.Sh NOTES +On MS\-Windows and MacOS, +.Nm +is available as a graphical application to which files can be dropped. +.Sh SEE ALSO +.Pa documentation.html +.Pp +.Xr fontforge 1 , +.Xr ftinfo 1 , +.Xr gfontview 1 , +.Xr xmbdfed 1 , +.Xr Font::TTF 3pm +.Sh AUTHORS +.Nm +was written by +.An -nosplit +.An "Just van Rossum" Aq just@letterror.com . +.Pp +This manual page was written by +.An "Florent Rougon" Aq f.rougon@free.fr +for the Debian GNU/Linux system based on the existing FontTools +documentation. It may be freely used, modified and distributed without +restrictions. +.\" For Emacs: +.\" Local Variables: +.\" fill-column: 72 +.\" sentence-end: "[.?!][]\"')}]*\\($\\| $\\| \\| \\)[ \n]*" +.\" sentence-end-double-space: t +.\" End: \ No newline at end of file diff --git a/requirements-cuda.txt b/requirements-cuda.txt new file mode 100644 index 0000000..e721b6b --- /dev/null +++ b/requirements-cuda.txt @@ -0,0 +1,8 @@ +torch --index-url https://download.pytorch.org/whl/cu128 +torchvision --index-url https://download.pytorch.org/whl/cu128 +onnxruntime-gpu +opencv-python-headless +supervision +sahi + + diff --git a/requirements.txt b/requirements.txt index 8befe14..6dc6c05 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,8 @@ -numpy -opencv-python -sahi -torch -torchvision +torch --index-url https://download.pytorch.org/whl/cpu +torchvision --index-url https://download.pytorch.org/whl/cpu +onnxruntime +opencv-python-headless supervision -ultralytics \ No newline at end of file +sahi + + diff --git a/tests/test_basic.py b/tests/test_basic.py new file mode 100644 index 0000000..d77217c --- /dev/null +++ b/tests/test_basic.py @@ -0,0 +1,10 @@ +import unittest +from pesar import some_function # Replace with actual function to test + +class TestBasic(unittest.TestCase): + + def test_some_function(self): + self.assertEqual(some_function(args), expected_result) # Replace with actual test case + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/util/__init__.py b/util/__init__.py new file mode 100644 index 0000000..d962f36 --- /dev/null +++ b/util/__init__.py @@ -0,0 +1,6 @@ + +from .config import cfg, load_config +from .model_loader import load_onnx_model + + +__all__ = ["cfg", "load_config", "load_onnx_model"] diff --git a/util/config.py b/util/config.py new file mode 100644 index 0000000..224e712 --- /dev/null +++ b/util/config.py @@ -0,0 +1,25 @@ +from .yacs import CfgNode + +cfg = CfgNode(new_allowed=True) +cfg.save_dir = "./" + +# Pipeline configuration +cfg.pipeline = CfgNode(new_allowed=True) +cfg.pipeline.detector = CfgNode(new_allowed=True) +cfg.pipeline.detector.thresholds = CfgNode(new_allowed=True) +cfg.pipeline.detector.slicing = CfgNode(new_allowed=True) +# Tracker configuration +cfg.pipeline.tracker = CfgNode(new_allowed=True) + + +def load_config(cfg, args_cfg): + cfg.defrost() + cfg.merge_from_file(args_cfg) + cfg.freeze() + + +if __name__ == "__main__": + import sys + + with open(sys.argv[1], "w") as f: + print(cfg, file=f) diff --git a/util/model_loader.py b/util/model_loader.py new file mode 100644 index 0000000..4741691 --- /dev/null +++ b/util/model_loader.py @@ -0,0 +1,2 @@ +def load_onnx_model(detector, path): + detector.load_onnx_model(path) \ No newline at end of file diff --git a/util/yacs.py b/util/yacs.py new file mode 100644 index 0000000..1cbe16c --- /dev/null +++ b/util/yacs.py @@ -0,0 +1,531 @@ +# Copyright (c) 2018-present, Facebook, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################## +"""YACS -- Yet Another Configuration System is designed to be a simple +configuration management system for academic and industrial research +projects. + +See README.md for usage and examples. +""" + +import copy +import io +import logging +import os +import sys +from ast import literal_eval + +import yaml + +# Flag for py2 and py3 compatibility to use when separate code paths are necessary +# When _PY2 is False, we assume Python 3 is in use +_PY2 = sys.version_info.major == 2 + +# Filename extensions for loading configs from files +_YAML_EXTS = {"", ".yaml", ".yml"} +_PY_EXTS = {".py"} + +_FILE_TYPES = (io.IOBase,) + +# CfgNodes can only contain a limited set of valid types +_VALID_TYPES = {tuple, list, str, int, float, bool, type(None)} +# py2 allow for str and unicode +if _PY2: + _VALID_TYPES = _VALID_TYPES.union({unicode}) # noqa: F821 + +# Utilities for importing modules from file paths +if _PY2: + # imp is available in both py2 and py3 for now, but is deprecated in py3 + import imp +else: + import importlib.util + +logger = logging.getLogger(__name__) + + +class CfgNode(dict): + """ + CfgNode represents an internal node in the configuration tree. It's a simple + dict-like container that allows for attribute-based access to keys. + """ + + IMMUTABLE = "__immutable__" + DEPRECATED_KEYS = "__deprecated_keys__" + RENAMED_KEYS = "__renamed_keys__" + NEW_ALLOWED = "__new_allowed__" + + def __init__(self, init_dict=None, key_list=None, new_allowed=False): + """ + Args: + init_dict (dict): the possibly-nested dictionary to initailize the + CfgNode. + key_list (list[str]): a list of names which index this CfgNode from + the root. + Currently only used for logging purposes. + new_allowed (bool): whether adding new key is allowed when merging with + other configs. + """ + # Recursively convert nested dictionaries in init_dict into CfgNodes + init_dict = {} if init_dict is None else init_dict + key_list = [] if key_list is None else key_list + init_dict = self._create_config_tree_from_dict(init_dict, key_list) + super(CfgNode, self).__init__(init_dict) + # Manage if the CfgNode is frozen or not + self.__dict__[CfgNode.IMMUTABLE] = False + # Deprecated options + # If an option is removed from the code and you don't want to break existing + # yaml configs, you can add the full config key as a string to the set below. + self.__dict__[CfgNode.DEPRECATED_KEYS] = set() + # Renamed options + # If you rename a config option, record the mapping from the old name to the + # new name in the dictionary below. Optionally, if the type also changed, you + # can make the value a tuple that specifies first the renamed key and then + # instructions for how to edit the config file. + self.__dict__[CfgNode.RENAMED_KEYS] = { + # 'EXAMPLE.OLD.KEY': 'EXAMPLE.NEW.KEY', # Dummy example to follow + # 'EXAMPLE.OLD.KEY': ( # A more complex example to follow + # 'EXAMPLE.NEW.KEY', + # "Also convert to a tuple, e.g., 'foo' -> ('foo',) or " + # + "'foo:bar' -> ('foo', 'bar')" + # ), + } + + # Allow new attributes after initialisation + self.__dict__[CfgNode.NEW_ALLOWED] = new_allowed + + @classmethod + def _create_config_tree_from_dict(cls, dic, key_list): + """ + Create a configuration tree using the given dict. + Any dict-like objects inside dict will be treated as a new CfgNode. + + Args: + dic (dict): + key_list (list[str]): a list of names which index this CfgNode from + the root. Currently only used for logging purposes. + """ + dic = copy.deepcopy(dic) + for k, v in dic.items(): + if isinstance(v, dict): + # Convert dict to CfgNode + dic[k] = cls(v, key_list=key_list + [k]) + else: + # Check for valid leaf type or nested CfgNode + _assert_with_logging( + _valid_type(v, allow_cfg_node=False), + "Key {} with value {} is not a valid type; valid types: {}".format( + ".".join(key_list + [k]), type(v), _VALID_TYPES + ), + ) + return dic + + def __getattr__(self, name): + if name in self: + return self[name] + else: + raise AttributeError(name) + + def __setattr__(self, name, value): + if self.is_frozen(): + raise AttributeError( + "Attempted to set {} to {}, but CfgNode is immutable".format( + name, value + ) + ) + + _assert_with_logging( + name not in self.__dict__, + "Invalid attempt to modify internal CfgNode state: {}".format(name), + ) + _assert_with_logging( + _valid_type(value, allow_cfg_node=True), + "Invalid type {} for key {}; valid types = {}".format( + type(value), name, _VALID_TYPES + ), + ) + + self[name] = value + + def __str__(self): + def _indent(s_, num_spaces): + s = s_.split("\n") + if len(s) == 1: + return s_ + first = s.pop(0) + s = [(num_spaces * " ") + line for line in s] + s = "\n".join(s) + s = first + "\n" + s + return s + + r = "" + s = [] + for k, v in sorted(self.items()): + seperator = "\n" if isinstance(v, CfgNode) else " " + attr_str = "{}:{}{}".format(str(k), seperator, str(v)) + attr_str = _indent(attr_str, 2) + s.append(attr_str) + r += "\n".join(s) + return r + + def __repr__(self): + return "{}({})".format(self.__class__.__name__, super(CfgNode, self).__repr__()) + + def dump(self, **kwargs): + """Dump to a string.""" + + def convert_to_dict(cfg_node, key_list): + if not isinstance(cfg_node, CfgNode): + _assert_with_logging( + _valid_type(cfg_node), + "Key {} with value {} is not a valid type; valid types: {}".format( + ".".join(key_list), type(cfg_node), _VALID_TYPES + ), + ) + return cfg_node + else: + cfg_dict = dict(cfg_node) + for k, v in cfg_dict.items(): + cfg_dict[k] = convert_to_dict(v, key_list + [k]) + return cfg_dict + + self_as_dict = convert_to_dict(self, []) + return yaml.safe_dump(self_as_dict, **kwargs) + + def merge_from_file(self, cfg_filename): + """Load a yaml config file and merge it this CfgNode.""" + with open(cfg_filename, "r", encoding="utf-8") as f: + cfg = self.load_cfg(f) + self.merge_from_other_cfg(cfg) + + def merge_from_other_cfg(self, cfg_other): + """Merge `cfg_other` into this CfgNode.""" + _merge_a_into_b(cfg_other, self, self, []) + + def merge_from_list(self, cfg_list): + """Merge config (keys, values) in a list (e.g., from command line) into + this CfgNode. For example, `cfg_list = ['FOO.BAR', 0.5]`. + """ + _assert_with_logging( + len(cfg_list) % 2 == 0, + "Override list has odd length: {}; it must be a list of pairs".format( + cfg_list + ), + ) + root = self + for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]): + if root.key_is_deprecated(full_key): + continue + if root.key_is_renamed(full_key): + root.raise_key_rename_error(full_key) + key_list = full_key.split(".") + d = self + for subkey in key_list[:-1]: + _assert_with_logging( + subkey in d, "Non-existent key: {}".format(full_key) + ) + d = d[subkey] + subkey = key_list[-1] + _assert_with_logging(subkey in d, "Non-existent key: {}".format(full_key)) + value = self._decode_cfg_value(v) + value = _check_and_coerce_cfg_value_type(value, d[subkey], subkey, full_key) + d[subkey] = value + + def freeze(self): + """Make this CfgNode and all of its children immutable.""" + self._immutable(True) + + def defrost(self): + """Make this CfgNode and all of its children mutable.""" + self._immutable(False) + + def is_frozen(self): + """Return mutability.""" + return self.__dict__[CfgNode.IMMUTABLE] + + def _immutable(self, is_immutable): + """Set immutability to is_immutable and recursively apply the setting + to all nested CfgNodes. + """ + self.__dict__[CfgNode.IMMUTABLE] = is_immutable + # Recursively set immutable state + for v in self.__dict__.values(): + if isinstance(v, CfgNode): + v._immutable(is_immutable) + for v in self.values(): + if isinstance(v, CfgNode): + v._immutable(is_immutable) + + def clone(self): + """Recursively copy this CfgNode.""" + return copy.deepcopy(self) + + def register_deprecated_key(self, key): + """Register key (e.g. `FOO.BAR`) a deprecated option. When merging deprecated + keys a warning is generated and the key is ignored. + """ + _assert_with_logging( + key not in self.__dict__[CfgNode.DEPRECATED_KEYS], + "key {} is already registered as a deprecated key".format(key), + ) + self.__dict__[CfgNode.DEPRECATED_KEYS].add(key) + + def register_renamed_key(self, old_name, new_name, message=None): + """Register a key as having been renamed from `old_name` to `new_name`. + When merging a renamed key, an exception is thrown alerting to user to + the fact that the key has been renamed. + """ + _assert_with_logging( + old_name not in self.__dict__[CfgNode.RENAMED_KEYS], + "key {} is already registered as a renamed cfg key".format(old_name), + ) + value = new_name + if message: + value = (new_name, message) + self.__dict__[CfgNode.RENAMED_KEYS][old_name] = value + + def key_is_deprecated(self, full_key): + """Test if a key is deprecated.""" + if full_key in self.__dict__[CfgNode.DEPRECATED_KEYS]: + logger.warning("Deprecated config key (ignoring): {}".format(full_key)) + return True + return False + + def key_is_renamed(self, full_key): + """Test if a key is renamed.""" + return full_key in self.__dict__[CfgNode.RENAMED_KEYS] + + def raise_key_rename_error(self, full_key): + new_key = self.__dict__[CfgNode.RENAMED_KEYS][full_key] + if isinstance(new_key, tuple): + msg = " Note: " + new_key[1] + new_key = new_key[0] + else: + msg = "" + raise KeyError( + "Key {} was renamed to {}; please update your config.{}".format( + full_key, new_key, msg + ) + ) + + def is_new_allowed(self): + return self.__dict__[CfgNode.NEW_ALLOWED] + + @classmethod + def load_cfg(cls, cfg_file_obj_or_str): + """ + Load a cfg. + Args: + cfg_file_obj_or_str (str or file): + Supports loading from: + - A file object backed by a YAML file + - A file object backed by a Python source file that exports an attribute + "cfg" that is either a dict or a CfgNode + - A string that can be parsed as valid YAML + """ + _assert_with_logging( + isinstance(cfg_file_obj_or_str, _FILE_TYPES + (str,)), + "Expected first argument to be of type {} or {}, but it was {}".format( + _FILE_TYPES, str, type(cfg_file_obj_or_str) + ), + ) + if isinstance(cfg_file_obj_or_str, str): + return cls._load_cfg_from_yaml_str(cfg_file_obj_or_str) + elif isinstance(cfg_file_obj_or_str, _FILE_TYPES): + return cls._load_cfg_from_file(cfg_file_obj_or_str) + else: + raise NotImplementedError("Impossible to reach here (unless there's a bug)") + + @classmethod + def _load_cfg_from_file(cls, file_obj): + """Load a config from a YAML file or a Python source file.""" + _, file_extension = os.path.splitext(file_obj.name) + if file_extension in _YAML_EXTS: + return cls._load_cfg_from_yaml_str(file_obj.read()) + elif file_extension in _PY_EXTS: + return cls._load_cfg_py_source(file_obj.name) + else: + raise Exception( + "Attempt to load from an unsupported file type {}; " + "only {} are supported".format(file_obj, _YAML_EXTS.union(_PY_EXTS)) + ) + + @classmethod + def _load_cfg_from_yaml_str(cls, str_obj): + """Load a config from a YAML string encoding.""" + cfg_as_dict = yaml.safe_load(str_obj) + return cls(cfg_as_dict) + + @classmethod + def _load_cfg_py_source(cls, filename): + """Load a config from a Python source file.""" + module = _load_module_from_file("yacs.config.override", filename) + _assert_with_logging( + hasattr(module, "cfg"), + "Python module from file {} must have 'cfg' attr".format(filename), + ) + VALID_ATTR_TYPES = {dict, CfgNode} + _assert_with_logging( + type(module.cfg) in VALID_ATTR_TYPES, + "Imported module 'cfg' attr must be in {} but is {} instead".format( + VALID_ATTR_TYPES, type(module.cfg) + ), + ) + return cls(module.cfg) + + @classmethod + def _decode_cfg_value(cls, value): + """ + Decodes a raw config value (e.g., from a yaml config files or command + line argument) into a Python object. + + If the value is a dict, it will be interpreted as a new CfgNode. + If the value is a str, it will be evaluated as literals. + Otherwise it is returned as-is. + """ + # Configs parsed from raw yaml will contain dictionary keys that need to be + # converted to CfgNode objects + if isinstance(value, dict): + return cls(value) + # All remaining processing is only applied to strings + if not isinstance(value, str): + return value + # Try to interpret `value` as a: + # string, number, tuple, list, dict, boolean, or None + try: + value = literal_eval(value) + # The following two excepts allow v to pass through when it represents a + # string. + # + # Longer explanation: + # The type of v is always a string (before calling literal_eval), but + # sometimes it *represents* a string and other times a data structure, like + # a list. In the case that v represents a string, what we got back from the + # yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is + # ok with '"foo"', but will raise a ValueError if given 'foo'. In other + # cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval + # will raise a SyntaxError. + except ValueError: + pass + except SyntaxError: + pass + return value + + +load_cfg = ( + CfgNode.load_cfg +) # keep this function in global scope for backward compatibility + + +def _valid_type(value, allow_cfg_node=False): + return (type(value) in _VALID_TYPES) or ( + allow_cfg_node and isinstance(value, CfgNode) + ) + + +def _merge_a_into_b(a, b, root, key_list): + """Merge config dictionary a into config dictionary b, clobbering the + options in b whenever they are also specified in a. + """ + _assert_with_logging( + isinstance(a, CfgNode), + "`a` (cur type {}) must be an instance of {}".format(type(a), CfgNode), + ) + _assert_with_logging( + isinstance(b, CfgNode), + "`b` (cur type {}) must be an instance of {}".format(type(b), CfgNode), + ) + + for k, v_ in a.items(): + full_key = ".".join(key_list + [k]) + + v = copy.deepcopy(v_) + v = b._decode_cfg_value(v) + + if k in b: + v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key) + # Recursively merge dicts + if isinstance(v, CfgNode): + try: + _merge_a_into_b(v, b[k], root, key_list + [k]) + except BaseException: + raise + else: + b[k] = v + elif b.is_new_allowed(): + b[k] = v + else: + if root.key_is_deprecated(full_key): + continue + elif root.key_is_renamed(full_key): + root.raise_key_rename_error(full_key) + else: + raise KeyError("Non-existent config key: {}".format(full_key)) + + +def _check_and_coerce_cfg_value_type(replacement, original, key, full_key): + """Checks that `replacement`, which is intended to replace `original` is of + the right type. The type is correct if it matches exactly or is one of a few + cases in which the type can be easily coerced. + """ + original_type = type(original) + replacement_type = type(replacement) + + # The types must match (with some exceptions) + if replacement_type == original_type: + return replacement + + # Cast replacement from from_type to to_type if the replacement and original + # types match from_type and to_type + def conditional_cast(from_type, to_type): + if replacement_type == from_type and original_type == to_type: + return True, to_type(replacement) + else: + return False, None + + # Conditionally casts + # list <-> tuple + casts = [(tuple, list), (list, tuple)] + # For py2: allow converting from str (bytes) to a unicode string + try: + casts.append((str, unicode)) # noqa: F821 + except Exception: + pass + + for (from_type, to_type) in casts: + converted, converted_value = conditional_cast(from_type, to_type) + if converted: + return converted_value + + raise ValueError( + "Type mismatch ({} vs. {}) with values ({} vs. {}) for config " + "key: {}".format( + original_type, replacement_type, original, replacement, full_key + ) + ) + + +def _assert_with_logging(cond, msg): + if not cond: + logger.debug(msg) + assert cond, msg + + +def _load_module_from_file(name, filename): + if _PY2: + module = imp.load_source(name, filename) + else: + spec = importlib.util.spec_from_file_location(name, filename) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module diff --git a/visualization.py b/visualization.py deleted file mode 100644 index f327751..0000000 --- a/visualization.py +++ /dev/null @@ -1,38 +0,0 @@ -import supervision as sv -import numpy as np - - -def draw_estimations(frame, estimations, class_id_to_name=None): - if class_id_to_name is None: - class_id_to_name = {} - # Extract bounding boxes, class labels, and tracker IDs from estimations - bounding_boxes = estimations['boxes'].cpu().numpy() - class_labels = estimations['labels'].cpu().numpy() - tracker_ids = estimations['ids'].cpu().numpy() - # Create a Detections object for easier handling - detections = sv.Detections( - xyxy=bounding_boxes, class_id=class_labels, tracker_id=tracker_ids) - # Generate descriptive labels for each detection - detection_labels = [_generate_label(class_id, tracker_id, class_id_to_name) - for class_id, tracker_id in zip(detections.class_id, detections.tracker_id)] - # Annotate the frame with bounding boxes and labels - frame = _annotate_frame(frame, detections, detection_labels) - return frame - - -def _generate_label(class_id, tracker_id, class_id_to_name): - """Generate a label for a detection based on class name and tracker ID.""" - class_name = class_id_to_name.get(class_id, 'Unknown') - if np.isnan(tracker_id): - return class_name - return f"#{int(tracker_id)} {class_name}" - - -def _annotate_frame(frame, detections, labels): - """Annotate the frame with bounding boxes and labels.""" - box_annotator = sv.BoxCornerAnnotator(thickness=2) - label_annotator = sv.LabelAnnotator(text_padding=1) - frame = box_annotator.annotate(scene=frame, detections=detections) - frame = label_annotator.annotate( - scene=frame, detections=detections, labels=labels) - return frame