diff --git a/src/ac_training_lab/a1_cam/.gitignore b/src/ac_training_lab/a1_cam/.gitignore deleted file mode 100644 index f3e5b958..00000000 --- a/src/ac_training_lab/a1_cam/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -nohup.out -my_secrets.py diff --git a/src/ac_training_lab/a1_cam/apriltag_images/img1.png b/src/ac_training_lab/a1_cam/apriltag_images/img1.png new file mode 100644 index 00000000..2d5d82a1 Binary files /dev/null and b/src/ac_training_lab/a1_cam/apriltag_images/img1.png differ diff --git a/src/ac_training_lab/a1_cam/apriltag_images/img1_flipped_vertical.png b/src/ac_training_lab/a1_cam/apriltag_images/img1_flipped_vertical.png new file mode 100644 index 00000000..3751bd7e Binary files /dev/null and b/src/ac_training_lab/a1_cam/apriltag_images/img1_flipped_vertical.png differ diff --git a/src/ac_training_lab/a1_cam/README.md b/src/ac_training_lab/a1_cam/archive/README.md similarity index 100% rename from src/ac_training_lab/a1_cam/README.md rename to src/ac_training_lab/a1_cam/archive/README.md diff --git a/src/ac_training_lab/a1_cam/_scripts/boto3_file_upload.py b/src/ac_training_lab/a1_cam/archive/_scripts/boto3_file_upload.py old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/boto3_file_upload.py rename to src/ac_training_lab/a1_cam/archive/_scripts/boto3_file_upload.py diff --git a/src/ac_training_lab/a1_cam/_scripts/client.py b/src/ac_training_lab/a1_cam/archive/_scripts/client.py old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/client.py rename to src/ac_training_lab/a1_cam/archive/_scripts/client.py diff --git a/src/ac_training_lab/a1_cam/_scripts/example_file.txt b/src/ac_training_lab/a1_cam/archive/_scripts/example_file.txt similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/example_file.txt rename to src/ac_training_lab/a1_cam/archive/_scripts/example_file.txt diff --git a/src/ac_training_lab/a1_cam/_scripts/module_4_mwe.py b/src/ac_training_lab/a1_cam/archive/_scripts/module_4_mwe.py old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/module_4_mwe.py rename to src/ac_training_lab/a1_cam/archive/_scripts/module_4_mwe.py diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/MQTT_AUTH_SETUP.md b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/MQTT_AUTH_SETUP.md similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/MQTT_AUTH_SETUP.md rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/MQTT_AUTH_SETUP.md diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/MQTT_AUTH_TEST_RESULTS.md b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/MQTT_AUTH_TEST_RESULTS.md similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/MQTT_AUTH_TEST_RESULTS.md rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/MQTT_AUTH_TEST_RESULTS.md diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/MQTT_TEST_RESULTS.md b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/MQTT_TEST_RESULTS.md similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/MQTT_TEST_RESULTS.md rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/MQTT_TEST_RESULTS.md diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/create_mosquitto_auth.py b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/create_mosquitto_auth.py old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/create_mosquitto_auth.py rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/create_mosquitto_auth.py diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/mosquitto_acl b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/mosquitto_acl similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/mosquitto_acl rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/mosquitto_acl diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/mosquitto_acl_example b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/mosquitto_acl_example old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/mosquitto_acl_example rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/mosquitto_acl_example diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/mosquitto_auth.conf b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/mosquitto_auth.conf similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/mosquitto_auth.conf rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/mosquitto_auth.conf diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/mosquitto_auth_example.conf b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/mosquitto_auth_example.conf similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/mosquitto_auth_example.conf rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/mosquitto_auth_example.conf diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/mosquitto_passwd b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/mosquitto_passwd similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/mosquitto_passwd rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/mosquitto_passwd diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/test_auth_client.py b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/test_auth_client.py old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/test_auth_client.py rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/test_auth_client.py diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/test_auth_device.py b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/test_auth_device.py old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/test_auth_device.py rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/test_auth_device.py diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/test_mqtt_client.py b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/test_mqtt_client.py old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/test_mqtt_client.py rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/test_mqtt_client.py diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/test_mqtt_client_auth.py b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/test_mqtt_client_auth.py old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/test_mqtt_client_auth.py rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/test_mqtt_client_auth.py diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/test_mqtt_device.py b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/test_mqtt_device.py old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/test_mqtt_device.py rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/test_mqtt_device.py diff --git a/src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/test_mqtt_device_auth.py b/src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/test_mqtt_device_auth.py old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/mosquitto_tests/test_mqtt_device_auth.py rename to src/ac_training_lab/a1_cam/archive/_scripts/mosquitto_tests/test_mqtt_device_auth.py diff --git a/src/ac_training_lab/a1_cam/_scripts/prefect/copilot-logs.md b/src/ac_training_lab/a1_cam/archive/_scripts/prefect/copilot-logs.md similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/prefect/copilot-logs.md rename to src/ac_training_lab/a1_cam/archive/_scripts/prefect/copilot-logs.md diff --git a/src/ac_training_lab/a1_cam/_scripts/prefect/create_string.py b/src/ac_training_lab/a1_cam/archive/_scripts/prefect/create_string.py similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/prefect/create_string.py rename to src/ac_training_lab/a1_cam/archive/_scripts/prefect/create_string.py diff --git a/src/ac_training_lab/a1_cam/_scripts/prefect/device_prefect.py b/src/ac_training_lab/a1_cam/archive/_scripts/prefect/device_prefect.py similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/prefect/device_prefect.py rename to src/ac_training_lab/a1_cam/archive/_scripts/prefect/device_prefect.py diff --git a/src/ac_training_lab/a1_cam/_scripts/prefect/receive_string.py b/src/ac_training_lab/a1_cam/archive/_scripts/prefect/receive_string.py similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/prefect/receive_string.py rename to src/ac_training_lab/a1_cam/archive/_scripts/prefect/receive_string.py diff --git a/src/ac_training_lab/a1_cam/_scripts/prefect/run_capture_orchestrator.py b/src/ac_training_lab/a1_cam/archive/_scripts/prefect/run_capture_orchestrator.py similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/prefect/run_capture_orchestrator.py rename to src/ac_training_lab/a1_cam/archive/_scripts/prefect/run_capture_orchestrator.py diff --git a/src/ac_training_lab/a1_cam/_scripts/prefect/run_capture_orchestrator_context7.py b/src/ac_training_lab/a1_cam/archive/_scripts/prefect/run_capture_orchestrator_context7.py similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/prefect/run_capture_orchestrator_context7.py rename to src/ac_training_lab/a1_cam/archive/_scripts/prefect/run_capture_orchestrator_context7.py diff --git a/src/ac_training_lab/a1_cam/_scripts/prefect/worker_service.py b/src/ac_training_lab/a1_cam/archive/_scripts/prefect/worker_service.py similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/prefect/worker_service.py rename to src/ac_training_lab/a1_cam/archive/_scripts/prefect/worker_service.py diff --git a/src/ac_training_lab/a1_cam/_scripts/publish.py b/src/ac_training_lab/a1_cam/archive/_scripts/publish.py old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/publish.py rename to src/ac_training_lab/a1_cam/archive/_scripts/publish.py diff --git a/src/ac_training_lab/a1_cam/_scripts/publish_subscribe.py b/src/ac_training_lab/a1_cam/archive/_scripts/publish_subscribe.py old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/_scripts/publish_subscribe.py rename to src/ac_training_lab/a1_cam/archive/_scripts/publish_subscribe.py diff --git a/src/ac_training_lab/a1_cam/bookworm-tailscale-instructions.txt b/src/ac_training_lab/a1_cam/archive/bookworm-tailscale-instructions.txt similarity index 100% rename from src/ac_training_lab/a1_cam/bookworm-tailscale-instructions.txt rename to src/ac_training_lab/a1_cam/archive/bookworm-tailscale-instructions.txt diff --git a/src/ac_training_lab/a1_cam/device.py b/src/ac_training_lab/a1_cam/archive/device.py similarity index 100% rename from src/ac_training_lab/a1_cam/device.py rename to src/ac_training_lab/a1_cam/archive/device.py diff --git a/src/ac_training_lab/a1_cam/dummy_pkg/README.md b/src/ac_training_lab/a1_cam/archive/dummy_pkg/README.md similarity index 100% rename from src/ac_training_lab/a1_cam/dummy_pkg/README.md rename to src/ac_training_lab/a1_cam/archive/dummy_pkg/README.md diff --git a/src/ac_training_lab/a1_cam/dummy_pkg/pyproject.toml b/src/ac_training_lab/a1_cam/archive/dummy_pkg/pyproject.toml similarity index 100% rename from src/ac_training_lab/a1_cam/dummy_pkg/pyproject.toml rename to src/ac_training_lab/a1_cam/archive/dummy_pkg/pyproject.toml diff --git a/src/ac_training_lab/a1_cam/dummy_pkg/requirements.txt b/src/ac_training_lab/a1_cam/archive/dummy_pkg/requirements.txt similarity index 100% rename from src/ac_training_lab/a1_cam/dummy_pkg/requirements.txt rename to src/ac_training_lab/a1_cam/archive/dummy_pkg/requirements.txt diff --git a/src/ac_training_lab/a1_cam/dummy_pkg/setup.cfg b/src/ac_training_lab/a1_cam/archive/dummy_pkg/setup.cfg similarity index 100% rename from src/ac_training_lab/a1_cam/dummy_pkg/setup.cfg rename to src/ac_training_lab/a1_cam/archive/dummy_pkg/setup.cfg diff --git a/src/ac_training_lab/a1_cam/dummy_pkg/src/libcamera/__init__.py b/src/ac_training_lab/a1_cam/archive/dummy_pkg/src/libcamera/__init__.py similarity index 100% rename from src/ac_training_lab/a1_cam/dummy_pkg/src/libcamera/__init__.py rename to src/ac_training_lab/a1_cam/archive/dummy_pkg/src/libcamera/__init__.py diff --git a/src/ac_training_lab/a1_cam/dummy_pkg/src/libcamera/_libcamera.py b/src/ac_training_lab/a1_cam/archive/dummy_pkg/src/libcamera/_libcamera.py similarity index 100% rename from src/ac_training_lab/a1_cam/dummy_pkg/src/libcamera/_libcamera.py rename to src/ac_training_lab/a1_cam/archive/dummy_pkg/src/libcamera/_libcamera.py diff --git a/src/ac_training_lab/a1_cam/dummy_pkg/src/picamera2/__init__.py b/src/ac_training_lab/a1_cam/archive/dummy_pkg/src/picamera2/__init__.py similarity index 100% rename from src/ac_training_lab/a1_cam/dummy_pkg/src/picamera2/__init__.py rename to src/ac_training_lab/a1_cam/archive/dummy_pkg/src/picamera2/__init__.py diff --git a/src/ac_training_lab/a1_cam/dummy_pkg/src/picamera2/_picamera2.py b/src/ac_training_lab/a1_cam/archive/dummy_pkg/src/picamera2/_picamera2.py similarity index 100% rename from src/ac_training_lab/a1_cam/dummy_pkg/src/picamera2/_picamera2.py rename to src/ac_training_lab/a1_cam/archive/dummy_pkg/src/picamera2/_picamera2.py diff --git a/src/ac_training_lab/a1_cam/dummy_pkg/tests/test_dummy_libcamera.py b/src/ac_training_lab/a1_cam/archive/dummy_pkg/tests/test_dummy_libcamera.py similarity index 100% rename from src/ac_training_lab/a1_cam/dummy_pkg/tests/test_dummy_libcamera.py rename to src/ac_training_lab/a1_cam/archive/dummy_pkg/tests/test_dummy_libcamera.py diff --git a/src/ac_training_lab/a1_cam/dummy_pkg/tests/test_dummy_picamera2.py b/src/ac_training_lab/a1_cam/archive/dummy_pkg/tests/test_dummy_picamera2.py similarity index 100% rename from src/ac_training_lab/a1_cam/dummy_pkg/tests/test_dummy_picamera2.py rename to src/ac_training_lab/a1_cam/archive/dummy_pkg/tests/test_dummy_picamera2.py diff --git a/src/ac_training_lab/a1_cam/my_secrets_example.py b/src/ac_training_lab/a1_cam/archive/my_secrets_example.py similarity index 100% rename from src/ac_training_lab/a1_cam/my_secrets_example.py rename to src/ac_training_lab/a1_cam/archive/my_secrets_example.py diff --git a/src/ac_training_lab/a1_cam/setup.sh b/src/ac_training_lab/a1_cam/archive/setup.sh old mode 100755 new mode 100644 similarity index 100% rename from src/ac_training_lab/a1_cam/setup.sh rename to src/ac_training_lab/a1_cam/archive/setup.sh diff --git a/src/ac_training_lab/a1_cam/test_camera.ipynb b/src/ac_training_lab/a1_cam/archive/test_camera.ipynb similarity index 89% rename from src/ac_training_lab/a1_cam/test_camera.ipynb rename to src/ac_training_lab/a1_cam/archive/test_camera.ipynb index ac0241c4..3044b895 100644 --- a/src/ac_training_lab/a1_cam/test_camera.ipynb +++ b/src/ac_training_lab/a1_cam/archive/test_camera.ipynb @@ -33,7 +33,7 @@ "This notebook uses [Colab Secrets](https://x.com/GoogleColab/status/1719798406195867814) to securely store credentials.\n", "\n", "**To set up secrets in Colab:**\n", - "1. Click the key icon \ud83d\udd11 in the left sidebar\n", + "1. Click the key icon šŸ”‘ in the left sidebar\n", "2. Add the following secrets:\n", " - `MQTT_HOST` - Your MQTT broker host (e.g., `xxxxx.s1.eu.hivemq.cloud`)\n", " - `MQTT_USERNAME` - Your MQTT username\n", @@ -94,7 +94,7 @@ " client = mqtt.Client(mqtt.CallbackAPIVersion.VERSION2, protocol=mqtt.MQTTv5)\n", " \n", " def on_message(client, userdata, msg):\n", - " print(f\"\u2713 Message received on topic: {msg.topic}\")\n", + " print(f\"āœ“ Message received on topic: {msg.topic}\")\n", " print(f\" Payload: {msg.payload}\")\n", " try:\n", " data = json.loads(msg.payload)\n", @@ -102,15 +102,15 @@ " data_queue.put(data)\n", " print(f\" Data added to queue (queue size: {data_queue.qsize()})\")\n", " except json.JSONDecodeError as e:\n", - " print(f\" \u2717 JSON decode error: {e}\")\n", + " print(f\" āœ— JSON decode error: {e}\")\n", " \n", " def on_connect(client, userdata, flags, rc, properties=None):\n", " if rc == 0:\n", - " print(f\"\u2713 Connected successfully to MQTT broker\")\n", + " print(f\"āœ“ Connected successfully to MQTT broker\")\n", " else:\n", - " print(f\"\u2717 Connection failed with result code {rc}\")\n", + " print(f\"āœ— Connection failed with result code {rc}\")\n", " client.subscribe(sensor_data_topic, qos=2)\n", - " print(f\"\u2713 Subscribed to topic: {sensor_data_topic}\")\n", + " print(f\"āœ“ Subscribed to topic: {sensor_data_topic}\")\n", " \n", " def on_disconnect(client, userdata, rc, properties=None):\n", " print(f\"Disconnected with result code {rc}\")\n", @@ -144,12 +144,12 @@ " print(f\"Waiting for response (timeout: {queue_timeout}s)...\")\n", " try:\n", " data = data_queue.get(True, queue_timeout)\n", - " print(f\"\u2713 Response received\")\n", + " print(f\"āœ“ Response received\")\n", " client.loop_stop()\n", " return data\n", " except Empty:\n", " client.loop_stop()\n", - " print(f\"\u2717 Timeout after {queue_timeout}s - no response received\")\n", + " print(f\"āœ— Timeout after {queue_timeout}s - no response received\")\n", " print(\" Possible issues:\")\n", " print(\" - Device is not running or not connected\")\n", " print(\" - Incorrect DEVICE_SERIAL in secrets\")\n", @@ -204,16 +204,16 @@ " data = send_and_receive(client, CAMERA_READ_TOPIC, msg, queue_timeout=30)\n", " \n", " if \"error\" in data:\n", - " print(f\"\\n\u2717 Error from device: {data['error']}\")\n", + " print(f\"\\nāœ— Error from device: {data['error']}\")\n", " else:\n", " image_uri = data[\"image_uri\"]\n", - " print(f\"\\n\u2713 Image URI received: {image_uri}\")\n", + " print(f\"\\nāœ“ Image URI received: {image_uri}\")\n", " \n", " # Download and display image\n", " print(\"Downloading image...\")\n", " response = requests.get(image_uri)\n", " response.raise_for_status()\n", - " print(f\"\u2713 Image downloaded ({len(response.content)} bytes)\")\n", + " print(f\"āœ“ Image downloaded ({len(response.content)} bytes)\")\n", " \n", " img = Image.open(BytesIO(response.content))\n", " \n", @@ -224,13 +224,13 @@ " plt.title('Captured Image from A1 Mini Camera')\n", " plt.show()\n", " \n", - " print(f\"\\n\u2713 Image size: {img.size}\")\n", - " print(\"\u2713 Image captured and displayed successfully!\")\n", + " print(f\"\\nāœ“ Image size: {img.size}\")\n", + " print(\"āœ“ Image captured and displayed successfully!\")\n", " \n", "except TimeoutError as e:\n", - " print(f\"\\n\u2717 {e}\")\n", + " print(f\"\\nāœ— {e}\")\n", "except Exception as e:\n", - " print(f\"\\n\u2717 Unexpected error: {e}\")\n", + " print(f\"\\nāœ— Unexpected error: {e}\")\n", " import traceback\n", " traceback.print_exc()\n", "finally:\n", @@ -271,9 +271,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.0" + "version": "3.12.10" } }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/src/ac_training_lab/a1_cam/calibrate_camera.py b/src/ac_training_lab/a1_cam/calibrate_camera.py new file mode 100644 index 00000000..ae91d4e2 --- /dev/null +++ b/src/ac_training_lab/a1_cam/calibrate_camera.py @@ -0,0 +1,225 @@ +#!/usr/bin/env python3 +""" +Camera Calibration Script for A1 Mini Camera +Based on the apriltag_demo.ipynb approach + +This script performs camera calibration using checkerboard images to generate +accurate camera intrinsics for your working distance (~15cm). + +Instructions: +1. Print a checkerboard pattern (7x10 internal corners, 23mm square size) +2. Take 15-20 photos of the checkerboard at ~15cm distance from different angles +3. Save photos as 'calib_01.jpg', 'calib_02.jpg', etc. in images/ directory +4. Run this script to generate new camera intrinsics + +The script will output: +- New a1_intrinsics.yaml file with corrected camera matrix and distortion coefficients +- Calibration quality metrics (reprojection error) +""" + +import cv2 +import numpy as np +import glob +import yaml +import os +from pathlib import Path + +def calibrate_camera_from_images(image_paths, pattern_size, square_size_m, image_size=None): + """ + Calibrate camera using checkerboard images + + Args: + image_paths: List of paths to calibration images + pattern_size: (cols, rows) of internal checkerboard corners + square_size_m: Size of checkerboard squares in meters + image_size: (width, height) of images, auto-detected if None + + Returns: + ret: Calibration success flag + camera_matrix: 3x3 camera matrix + dist_coeffs: Distortion coefficients + rvecs: Rotation vectors + tvecs: Translation vectors + rms_error: RMS reprojection error + """ + + # Prepare object points - coordinates of corners in checkerboard coordinate system + objp = np.zeros((pattern_size[0] * pattern_size[1], 3), np.float32) + objp[:, :2] = np.mgrid[0:pattern_size[0], 0:pattern_size[1]].T.reshape(-1, 2) + objp *= square_size_m # Scale by actual square size + + # Arrays to store object points and image points from all images + objpoints = [] # 3D points in real world space + imgpoints = [] # 2D points in image plane + + print(f"šŸ” Processing {len(image_paths)} calibration images...") + + successful_images = 0 + for i, image_path in enumerate(image_paths): + print(f" Processing image {i+1}/{len(image_paths)}: {Path(image_path).name}") + + # Read image + img = cv2.imread(image_path) + if img is None: + print(f" āŒ Could not load image: {image_path}") + continue + + # Convert to grayscale + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + + # Auto-detect image size from first successful image + if image_size is None: + image_size = gray.shape[::-1] # (width, height) + + # Find checkerboard corners + ret, corners = cv2.findChessboardCorners(gray, pattern_size, None) + + if ret: + # Refine corner positions for sub-pixel accuracy + corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), + (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)) + + objpoints.append(objp) + imgpoints.append(corners) + successful_images += 1 + print(f" āœ… Found checkerboard corners") + else: + print(f" āŒ Could not find checkerboard corners") + + print(f"\nšŸ“Š Successfully processed {successful_images}/{len(image_paths)} images") + + if successful_images < 10: + print("āš ļø Warning: Less than 10 successful images. Consider taking more calibration photos.") + + if successful_images < 3: + print("āŒ Error: Not enough successful images for calibration (need at least 3)") + return False, None, None, None, None, None + + print("šŸ”§ Performing camera calibration...") + + # Perform camera calibration + ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera( + objpoints, imgpoints, image_size, None, None, + flags=cv2.CALIB_RATIONAL_MODEL + ) + + # Calculate reprojection error + mean_error = 0 + for i in range(len(objpoints)): + imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], camera_matrix, dist_coeffs) + error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2) + mean_error += error + + rms_error = mean_error / len(objpoints) + + return ret, camera_matrix, dist_coeffs, rvecs, tvecs, rms_error + +def save_calibration_to_yaml(camera_matrix, dist_coeffs, image_size, rms_error, output_path): + """Save calibration results to YAML file compatible with detect_apriltag.py""" + + calib_data = { + 'camera_matrix': camera_matrix.tolist(), + 'distortion_coefficients': dist_coeffs.flatten().tolist(), + 'image_size': list(image_size), + 'rms_reprojection_error_pixels': float(rms_error) + } + + # Create config directory if it doesn't exist + os.makedirs(os.path.dirname(output_path), exist_ok=True) + + with open(output_path, 'w') as f: + yaml.dump(calib_data, f, default_flow_style=False) + + print(f"āœ… Saved calibration to: {output_path}") + +def main(): + print("šŸ“· A1 Mini Camera Calibration") + print("=" * 40) + + # Configuration matching detected pattern + pattern_size = (11, 8) # Internal corners (cols, rows) + square_size_mm = 23.0 # Size of each square in mm (measure your printed checkerboard!) + square_size_m = square_size_mm / 1000.0 # Convert to meters + + print(f"šŸ“‹ Checkerboard settings:") + print(f" Internal corners: {pattern_size[0]} x {pattern_size[1]}") + print(f" Square size: {square_size_mm}mm") + print(f" āš ļø Verify your printed checkerboard matches these dimensions!") + + # Look for calibration images + image_dir = "calibration_images" + if not os.path.exists(image_dir): + print(f"\nāŒ Images directory not found: {image_dir}") + print("Please create the calibration_images directory and add your checkerboard calibration photos.") + print("Expected filenames: calib_01.jpg, calib_02.jpg, etc.") + return + + # Find calibration images + image_patterns = [ + f"{image_dir}/calib_*.jpg", + f"{image_dir}/calib_*.png", + f"{image_dir}/calibration_*.jpg", + f"{image_dir}/calibration_*.png", + f"{image_dir}/*.jpg", + f"{image_dir}/*.png" + ] + + image_paths = [] + for pattern in image_patterns: + image_paths.extend(glob.glob(pattern)) + + # Remove duplicates and sort + image_paths = sorted(list(set(image_paths))) + + if not image_paths: + print(f"\nāŒ No calibration images found in {image_dir}/") + print("Please add calibration images and try again.") + print("Supported formats: .jpg, .png") + return + + print(f"\nšŸ–¼ļø Found {len(image_paths)} calibration images") + + # Perform calibration + ret, camera_matrix, dist_coeffs, rvecs, tvecs, rms_error = calibrate_camera_from_images( + image_paths, pattern_size, square_size_m + ) + + if not ret: + print("āŒ Calibration failed!") + return + + print("\nāœ… Calibration successful!") + print(f"šŸ“Š Results:") + print(f" RMS reprojection error: {rms_error:.4f} pixels") + print(f" Camera matrix:") + print(f" fx: {camera_matrix[0,0]:.2f}") + print(f" fy: {camera_matrix[1,1]:.2f}") + print(f" cx: {camera_matrix[0,2]:.2f}") + print(f" cy: {camera_matrix[1,2]:.2f}") + + # Quality assessment + if rms_error < 0.5: + print(" 🟢 Excellent calibration quality") + elif rms_error < 1.0: + print(" 🟔 Good calibration quality") + else: + print(" šŸ”“ Poor calibration quality - consider retaking photos") + + # Save to config directory + output_path = "config/a1_intrinsics_new.yaml" + + # Get image size from first image + first_img = cv2.imread(image_paths[0]) + image_size = (first_img.shape[1], first_img.shape[0]) # (width, height) + + save_calibration_to_yaml(camera_matrix, dist_coeffs, image_size, rms_error, output_path) + + print(f"\nšŸ“ Next steps:") + print(f" 1. Test the new calibration: python detect_apriltag.py images/your_test_image.png") + print(f" 2. If results look good, backup old calibration:") + print(f" mv config/a1_intrinsics.yaml config/a1_intrinsics_backup.yaml") + print(f" 3. Use new calibration:") + print(f" mv config/a1_intrinsics_new.yaml config/a1_intrinsics.yaml") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/ac_training_lab/a1_cam/calibration_images/1.png b/src/ac_training_lab/a1_cam/calibration_images/1.png new file mode 100644 index 00000000..f18cfde3 Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/1.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/10.png b/src/ac_training_lab/a1_cam/calibration_images/10.png new file mode 100644 index 00000000..036d75c6 Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/10.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/11.png b/src/ac_training_lab/a1_cam/calibration_images/11.png new file mode 100644 index 00000000..56765904 Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/11.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/12.png b/src/ac_training_lab/a1_cam/calibration_images/12.png new file mode 100644 index 00000000..3594a6d9 Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/12.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/13.png b/src/ac_training_lab/a1_cam/calibration_images/13.png new file mode 100644 index 00000000..5f8af881 Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/13.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/14.png b/src/ac_training_lab/a1_cam/calibration_images/14.png new file mode 100644 index 00000000..857bbed0 Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/14.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/15.png b/src/ac_training_lab/a1_cam/calibration_images/15.png new file mode 100644 index 00000000..641ec5ad Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/15.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/2.png b/src/ac_training_lab/a1_cam/calibration_images/2.png new file mode 100644 index 00000000..bd917e68 Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/2.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/3.png b/src/ac_training_lab/a1_cam/calibration_images/3.png new file mode 100644 index 00000000..c2dbdade Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/3.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/4.png b/src/ac_training_lab/a1_cam/calibration_images/4.png new file mode 100644 index 00000000..e0d62982 Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/4.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/5.png b/src/ac_training_lab/a1_cam/calibration_images/5.png new file mode 100644 index 00000000..68672f04 Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/5.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/6.png b/src/ac_training_lab/a1_cam/calibration_images/6.png new file mode 100644 index 00000000..8a0aa2b2 Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/6.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/7.png b/src/ac_training_lab/a1_cam/calibration_images/7.png new file mode 100644 index 00000000..3f7727a1 Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/7.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/8.png b/src/ac_training_lab/a1_cam/calibration_images/8.png new file mode 100644 index 00000000..a4bfa615 Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/8.png differ diff --git a/src/ac_training_lab/a1_cam/calibration_images/9.png b/src/ac_training_lab/a1_cam/calibration_images/9.png new file mode 100644 index 00000000..955cdc0c Binary files /dev/null and b/src/ac_training_lab/a1_cam/calibration_images/9.png differ diff --git a/src/ac_training_lab/a1_cam/collect_hand_eye_data.py b/src/ac_training_lab/a1_cam/collect_hand_eye_data.py new file mode 100644 index 00000000..01be563a --- /dev/null +++ b/src/ac_training_lab/a1_cam/collect_hand_eye_data.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 +""" +Hand-Eye Data Collection Helper +Combines AprilTag detection with robot pose to collect hand-eye calibration data + +Usage: +1. Move robot to a pose +2. Run: python collect_hand_eye_data.py --robot-pose x y z roll pitch yaw +3. Script will detect AprilTag and save the pose pair +4. Repeat for 5-10 different robot poses +5. Run hand-eye calibration +""" + +import argparse +import json +import os +import subprocess +import sys +from datetime import datetime + +def get_apriltag_pose(image_path="images/img1_flipped_vertical.png", tag_size=0.03): + """ + Run AprilTag detection and return pose data + + Returns: + dict: AprilTag pose with position and orientation, or None if failed + """ + try: + # Run AprilTag detection + cmd = [ + sys.executable, "detect_apriltag.py", + image_path, + "--tag-size", str(tag_size), + "--save-json", "temp_apriltag_pose.json" + ] + + result = subprocess.run(cmd, capture_output=True, text=True) + + if result.returncode != 0: + print(f"āŒ AprilTag detection failed: {result.stderr}") + return None + + # Load detection results + if not os.path.exists("temp_apriltag_pose.json"): + print("āŒ AprilTag detection output not found") + return None + + with open("temp_apriltag_pose.json", 'r') as f: + data = json.load(f) + + # Clean up temp file + os.remove("temp_apriltag_pose.json") + + if not data['detections']: + print("āŒ No AprilTags detected") + return None + + # Get first detection + detection = data['detections'][0] + + return { + 'position': [ + detection['position']['x'], + detection['position']['y'], + detection['position']['z'] + ], + 'orientation': [ + detection['orientation']['roll'], + detection['orientation']['pitch'], + detection['orientation']['yaw'] + ] + } + + except Exception as e: + print(f"āŒ Error during AprilTag detection: {e}") + return None + +def load_existing_data(data_file): + """Load existing calibration data or create new structure""" + if os.path.exists(data_file): + with open(data_file, 'r') as f: + return json.load(f) + else: + return { + 'calibration_info': { + 'description': 'Hand-eye calibration data for A1 Mini robot arm', + 'date_created': datetime.now().isoformat(), + 'instructions': [ + '1. Move robot to various poses while keeping AprilTag visible', + '2. Run collect_hand_eye_data.py with robot pose for each position', + '3. Collect 5-10 calibration points', + '4. Run: python hand_eye_calibration.py calibrate ' + ] + }, + 'calibration_points': [] + } + +def save_calibration_point(data_file, robot_pose, apriltag_pose): + """Add new calibration point to data file""" + data = load_existing_data(data_file) + + point_id = len(data['calibration_points']) + 1 + + new_point = { + 'point_id': point_id, + 'timestamp': datetime.now().isoformat(), + 'robot_pose': { + 'position': robot_pose['position'], + 'orientation': robot_pose['orientation'] + }, + 'apriltag_pose': { + 'position': apriltag_pose['position'], + 'orientation': apriltag_pose['orientation'] + } + } + + data['calibration_points'].append(new_point) + + with open(data_file, 'w') as f: + json.dump(data, f, indent=2) + + print(f"āœ… Saved calibration point {point_id} to {data_file}") + print(f"šŸ“Š Total points: {len(data['calibration_points'])}") + +def show_calibration_status(data_file): + """Show current calibration data status""" + if not os.path.exists(data_file): + print(f"šŸ“ No calibration data found at {data_file}") + print(" Start collecting data with: python collect_hand_eye_data.py --robot-pose x y z roll pitch yaw") + return + + data = load_existing_data(data_file) + points = data['calibration_points'] + + print(f"šŸ“Š Calibration Data Status: {data_file}") + print(f" Points collected: {len(points)}") + print(f" Recommended minimum: 5-10 points") + + if len(points) >= 5: + print(" āœ… Sufficient data for calibration") + print(f" Run: python hand_eye_calibration.py calibrate {data_file}") + else: + print(f" āš ļø Need {5 - len(points)} more points") + + print(f"\nšŸ“‹ Recent points:") + for point in points[-3:]: # Show last 3 points + robot_pos = point['robot_pose']['position'] + tag_pos = point['apriltag_pose']['position'] + print(f" Point {point['point_id']}: Robot {robot_pos} → Tag {tag_pos}") + +def main(): + parser = argparse.ArgumentParser(description='Collect Hand-Eye Calibration Data') + parser.add_argument('--robot-pose', nargs=6, type=float, metavar=('X', 'Y', 'Z', 'ROLL', 'PITCH', 'YAW'), + help='Robot end-effector pose: x y z roll pitch yaw (m, degrees)') + parser.add_argument('--image', default='images/img1_flipped_vertical.png', + help='AprilTag image to use for detection') + parser.add_argument('--tag-size', type=float, default=0.03, + help='AprilTag size in meters (default: 0.03)') + parser.add_argument('--data-file', default='hand_eye_calibration_data.json', + help='Calibration data file') + parser.add_argument('--status', action='store_true', + help='Show calibration data status') + + args = parser.parse_args() + + if args.status: + show_calibration_status(args.data_file) + return + + if not args.robot_pose: + print("āŒ Please provide robot pose with --robot-pose x y z roll pitch yaw") + print(" or use --status to check current calibration data") + return + + # Parse robot pose + robot_pose = { + 'position': args.robot_pose[:3], # x, y, z in meters + 'orientation': args.robot_pose[3:] # roll, pitch, yaw in degrees + } + + print(f"šŸ¤– Robot pose: {robot_pose['position']} m, {robot_pose['orientation']} deg") + + # Capture AprilTag pose + print(f"šŸ·ļø Detecting AprilTag in {args.image}...") + apriltag_pose = get_apriltag_pose(args.image, args.tag_size) + + if apriltag_pose is None: + print("āŒ Failed to detect AprilTag - check image and try again") + return + + print(f"šŸ“· AprilTag pose: {apriltag_pose['position']} m, {apriltag_pose['orientation']} deg") + + # Save calibration point + save_calibration_point(args.data_file, robot_pose, apriltag_pose) + + # Show status + show_calibration_status(args.data_file) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/ac_training_lab/a1_cam/config/a1_intrinsics.yaml b/src/ac_training_lab/a1_cam/config/a1_intrinsics.yaml new file mode 100644 index 00000000..de56c6cc --- /dev/null +++ b/src/ac_training_lab/a1_cam/config/a1_intrinsics.yaml @@ -0,0 +1,32 @@ +calibration_date: '2025-08-25T13:48:37.236321' +calibration_quality: Excellent (<0.3px) - High precision poses, sub-millimeter accuracy +calibration_successful: true +camera_matrix: +- - 1465.6099184918514 + - 0.0 + - 932.55811173008 +- - 0.0 + - 1463.3121827070456 + - 512.2671650849411 +- - 0.0 + - 0.0 + - 1.0 +checkerboard_size: +- 7 +- 5 +distortion_coefficients: +- - 0.04988618136899136 + - 0.14442660532044985 + - -0.0046389734519390186 + - -0.009566672323473092 + - -0.47591564714658857 +focal_length_x: 1465.6099184918514 +focal_length_y: 1463.3121827070456 +image_size: +- 1920 +- 1080 +num_images_used: 10 +principal_point_x: 932.55811173008 +principal_point_y: 512.2671650849411 +reprojection_error: 0.03989865591060219 +square_size_mm: 30.0 \ No newline at end of file diff --git a/src/ac_training_lab/a1_cam/config/a1_intrinsics_new.yaml b/src/ac_training_lab/a1_cam/config/a1_intrinsics_new.yaml new file mode 100644 index 00000000..7f8c03fa --- /dev/null +++ b/src/ac_training_lab/a1_cam/config/a1_intrinsics_new.yaml @@ -0,0 +1,29 @@ +camera_matrix: +- - 490.85116885840836 + - 0.0 + - 535.2960404971705 +- - 0.0 + - 507.3850028750929 + - 312.8467770304556 +- - 0.0 + - 0.0 + - 1.0 +distortion_coefficients: +- 140.7714801253207 +- 221.91651299567826 +- 0.0713893073046025 +- 0.01939525484131475 +- 5548.412058989083 +- 119.3613020438373 +- 232.4963506612995 +- 4562.131433603662 +- 0.0 +- 0.0 +- 0.0 +- 0.0 +- 0.0 +- 0.0 +image_size: +- 950 +- 565 +rms_reprojection_error_pixels: 0.27374371960034244 diff --git a/src/ac_training_lab/a1_cam/config/ac_lab_camera_calibration.yaml b/src/ac_training_lab/a1_cam/config/ac_lab_camera_calibration.yaml new file mode 100644 index 00000000..4e62f695 --- /dev/null +++ b/src/ac_training_lab/a1_cam/config/ac_lab_camera_calibration.yaml @@ -0,0 +1,32 @@ +calibration_date: '2025-11-28T15:08:55.591794' +calibration_quality: Excellent (<0.3px) - High precision poses, sub-millimeter accuracy +calibration_successful: true +camera_matrix: +- - 521.7095558979644 + - 0.0 + - 517.8686691848677 +- - 0.0 + - 536.1295390630403 + - 275.52344400047383 +- - 0.0 + - 0.0 + - 1.0 +checkerboard_size: +- 11 +- 8 +distortion_coefficients: +- - 0.4414100104612963 + - -2.3884642553300477 + - 0.04274579321226224 + - 0.009112635642452339 + - 4.83961431427362 +focal_length_x: 521.7095558979644 +focal_length_y: 536.1295390630403 +image_size: +- 950 +- 565 +num_images_used: 15 +principal_point_x: 517.8686691848677 +principal_point_y: 275.52344400047383 +reprojection_error: 0.2705472853371433 +square_size_mm: 17.8 diff --git a/src/ac_training_lab/a1_cam/detect_apriltag.py b/src/ac_training_lab/a1_cam/detect_apriltag.py new file mode 100644 index 00000000..73b06224 --- /dev/null +++ b/src/ac_training_lab/a1_cam/detect_apriltag.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python3 +""" +AprilTag Detection Script for A1 Mini Camera +Detects AprilTags in captured images and computes 6DOF pose using camera intrinsics. + +Usage: + python detect_apriltag.py img1.png + python detect_apriltag.py # Uses img1.png by default +""" + +import cv2 +import numpy as np +import yaml +import argparse +import sys +import json +from datetime import datetime +from pathlib import Path +from scipy.spatial.transform import Rotation as R + +try: + from pupil_apriltags import Detector + PUPIL_APRILTAGS_AVAILABLE = True +except ImportError: + print("āš ļø pupil-apriltags library not found. Install with: pip install pupil-apriltags") + PUPIL_APRILTAGS_AVAILABLE = False + +def load_camera_intrinsics(config_path="config/ac_lab_camera_calibration.yaml"): + """Load camera intrinsics from YAML configuration file.""" + try: + with open(config_path, 'r') as f: + calib = yaml.safe_load(f) + return calib + except FileNotFoundError: + print(f"āŒ Camera intrinsics file not found: {config_path}") + return None + +def get_camera_matrices(calib): + """Extract camera matrix and distortion coefficients.""" + K = np.array(calib["camera_matrix"], dtype=np.float32) + + # Handle different distortion coefficient formats + if isinstance(calib["distortion_coefficients"][0], list): + # Old format: nested list + dist = np.array(calib["distortion_coefficients"][0], dtype=np.float32) + else: + # New format: flat list + dist = np.array(calib["distortion_coefficients"], dtype=np.float32) + + return K, dist + + +def detect_apriltag_pupil(image_path, camera_matrix, dist_coeffs, tag_size_m=0.05): + """ + Detect AprilTag using the pupil-apriltags library. + """ + if not PUPIL_APRILTAGS_AVAILABLE: + return None + + # Load image + img = cv2.imread(image_path) + if img is None: + print(f"āŒ Could not load image: {image_path}") + return None + + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + + # Camera parameters for pupil-apriltags (fx, fy, cx, cy) + camera_params = [camera_matrix[0,0], camera_matrix[1,1], camera_matrix[0,2], camera_matrix[1,2]] + + # Try different tag families + families_to_try = ['tag36h11', 'tagStandard41h12', 'tag25h9', 'tag16h5'] + + for family in families_to_try: + print(f" Trying {family}...") + try: + # Initialize detector + detector = Detector(families=family) + + # Detect tags with pose estimation + detections = detector.detect( + gray, + estimate_tag_pose=True, + camera_params=camera_params, + tag_size=tag_size_m + ) + + if detections: + break + except Exception as e: + print(f" Error with {family}: {e}") + continue + + if detections: + print(f"āœ… pupil-apriltags detected {len(detections)} tag(s) with {family}") + + results = [] + for detection in detections: + tag_id = detection.tag_id + corners = detection.corners + center = detection.center + + print(f" Tag ID {tag_id}:") + print(f" Center: [{center[0]:.1f}, {center[1]:.1f}]") + print(f" Corners: {corners.shape}") + + # Use pose from pupil-apriltags if available + if detection.pose_t is not None and detection.pose_R is not None: + # Convert rotation matrix to rotation vector + rvec, _ = cv2.Rodrigues(detection.pose_R) + tvec = detection.pose_t.flatten() + + results.append({ + 'tag_id': tag_id, + 'corners': corners, + 'center': center, + 'rvec': rvec.flatten(), + 'tvec': tvec, + 'method': 'pupil_apriltags' + }) + + print(f" Position (m): [{tvec[0]:.3f}, {tvec[1]:.3f}, {tvec[2]:.3f}]") + print(f" Rotation (rad): [{rvec[0][0]:.3f}, {rvec[1][0]:.3f}, {rvec[2][0]:.3f}]") + + # Calculate distance + distance_cm = np.linalg.norm(tvec) * 100 + print(f" Distance: {distance_cm:.1f}cm") + + # Convert rotation matrix to roll, pitch, yaw + try: + # Check if rotation matrix is valid (positive determinant) + det = np.linalg.det(detection.pose_R) + if det > 0: + euler = R.from_matrix(detection.pose_R).as_euler('xyz', degrees=True) + roll, pitch, yaw = euler + print(f" Roll: {roll:.2f}°, Pitch: {pitch:.2f}°, Yaw: {yaw:.2f}°") + else: + print(f" āš ļø Invalid rotation matrix (det={det:.3f}), using rotation vector instead") + # Use rotation vector as approximation + roll, pitch, yaw = np.degrees(rvec.flatten()) + print(f" Roll: {roll:.2f}°, Pitch: {pitch:.2f}°, Yaw: {yaw:.2f}°") + except ValueError as e: + print(f" āš ļø Rotation matrix error: {e}") + # Fallback to rotation vector + roll, pitch, yaw = np.degrees(rvec.flatten()) + print(f" Roll: {roll:.2f}°, Pitch: {pitch:.2f}°, Yaw: {yaw:.2f}° (from rvec)") + else: + print(f" āŒ No pose estimation available for tag {tag_id}") + + return results + + print("āŒ No AprilTags detected with any family") + return None + +def visualize_detections(image_path, detections, camera_matrix, dist_coeffs, output_path=None): + """ + Visualize detected AprilTags with pose axes. + """ + if not detections: + return + + img = cv2.imread(image_path) + if img is None: + return + + for detection in detections: + tag_id = detection['tag_id'] + corners = detection['corners'] + rvec = detection['rvec'] + tvec = detection['tvec'] + + # Draw tag outline + corners_int = corners.astype(int) + cv2.polylines(img, [corners_int], True, (0, 255, 0), 2) + + # Draw tag ID + center = corners.mean(axis=0).astype(int) + cv2.putText(img, f"ID: {tag_id}", (center[0]-20, center[1]-10), + cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) + + # Draw coordinate axes + axis_length = 0.03 # 3cm axes + axis_points = np.array([ + [0, 0, 0], # Origin + [axis_length, 0, 0], # X-axis (red) + [0, axis_length, 0], # Y-axis (green) + [0, 0, axis_length], # Z-axis (blue) + ], dtype=np.float32) + + # Project 3D points to image + projected, _ = cv2.projectPoints(axis_points, rvec, tvec, camera_matrix, dist_coeffs) + projected = projected.astype(int).reshape(-1, 2) + + # Draw axes + origin = projected[0] + cv2.arrowedLine(img, origin, projected[1], (0, 0, 255), 3) # X-axis: red + cv2.arrowedLine(img, origin, projected[2], (0, 255, 0), 3) # Y-axis: green + cv2.arrowedLine(img, origin, projected[3], (255, 0, 0), 3) # Z-axis: blue + + # Save or display result + if output_path: + cv2.imwrite(output_path, img) + print(f"āœ… Visualization saved to: {output_path}") + else: + cv2.imshow('AprilTag Detection', img) + print("Press any key to close the visualization...") + cv2.waitKey(0) + cv2.destroyAllWindows() + +def save_pose_to_json(detections, output_path): + """Save 6DOF pose data to JSON file""" + pose_data = { + "timestamp": str(pd.Timestamp.now()) if 'pd' in globals() else str(datetime.now()), + "detections": [] + } + + for detection in detections: + tag_id = detection['tag_id'] + tvec = detection['tvec'] + + # Calculate roll, pitch, yaw from rotation vector + rvec = detection['rvec'] + R_matrix, _ = cv2.Rodrigues(rvec) + euler = R.from_matrix(R_matrix).as_euler('xyz', degrees=True) + roll, pitch, yaw = euler + + tag_data = { + "tag_id": int(tag_id), + "position": { + "x": float(tvec[0]), + "y": float(tvec[1]), + "z": float(tvec[2]) + }, + "orientation": { + "roll": float(roll), + "pitch": float(pitch), + "yaw": float(yaw) + }, + "distance_cm": float(np.linalg.norm(tvec) * 100) + } + pose_data["detections"].append(tag_data) + + # Save to JSON file + with open(output_path, 'w') as f: + json.dump(pose_data, f, indent=2) + + print(f"āœ… Pose data saved to: {output_path}") + +def main(): + parser = argparse.ArgumentParser(description='Detect AprilTags in camera image') + parser.add_argument('image', nargs='?', default='img1.png', + help='Path to image file (default: img1.png)') + parser.add_argument('--tag-size', type=float, default=0.05, + help='AprilTag size in meters (default: 0.05m = 5cm)') + parser.add_argument('--visualize', action='store_true', + help='Show visualization with detected tags') + parser.add_argument('--save-viz', type=str, + help='Save visualization to file') + parser.add_argument('--save-json', type=str, + help='Save pose data to JSON file') + + args = parser.parse_args() + + # Check if image exists + if not Path(args.image).exists(): + print(f"āŒ Image file not found: {args.image}") + sys.exit(1) + + print("šŸ·ļø AprilTag Detection Starting...") + print(f" Image: {args.image}") + print(f" Tag size: {args.tag_size}m") + + # Load camera intrinsics + print("\nšŸ“· Loading camera intrinsics...") + calib = load_camera_intrinsics() + if calib is None: + sys.exit(1) + + K, dist = get_camera_matrices(calib) + print(f"āœ… Camera intrinsics loaded") + + # Extract focal lengths from camera matrix + fx, fy = K[0,0], K[1,1] + print(f" Focal length: fx={fx:.1f}, fy={fy:.1f}") + + # Handle different image size formats + if 'image_size' in calib: + print(f" Image size: {calib['image_size']}") + else: + print(f" Image size: not specified in calibration") + + # Try AprilTag detection + print(f"\nšŸ” Detecting AprilTags in {args.image}...") + + # Use pupil-apriltags for detection + detections = detect_apriltag_pupil(args.image, K, dist, args.tag_size) + + # Results + if detections: + print(f"\nāœ… Detection successful! Found {len(detections)} tag(s)") + + # Show visualization if requested + if args.visualize or args.save_viz: + print("\nšŸ–¼ļø Generating visualization...") + visualize_detections(args.image, detections, K, dist, args.save_viz) + + # Save pose data to JSON if requested + if args.save_json: + save_pose_to_json(detections, args.save_json) + + print("\nšŸ“Š Summary:") + for det in detections: + print(f" Tag {det['tag_id']}: Position [{det['tvec'][0]:.3f}, {det['tvec'][1]:.3f}, {det['tvec'][2]:.3f}]m") + else: + print("\nāŒ No AprilTags detected in image") + print(" Possible issues:") + print(" - No AprilTag in the image") + print(" - AprilTag too small/blurry/angled") + print(" - Wrong tag family (script expects tag36h11)") + print(" - Poor lighting conditions") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/ac_training_lab/a1_cam/hand_eye_calibration.py b/src/ac_training_lab/a1_cam/hand_eye_calibration.py new file mode 100644 index 00000000..ae1a2370 --- /dev/null +++ b/src/ac_training_lab/a1_cam/hand_eye_calibration.py @@ -0,0 +1,327 @@ +#!/usr/bin/env python3 +""" +Hand-Eye Calibration for A1 Mini Robot Arm +Calculates the transformation between camera and end-effector coordinates + +This script performs hand-eye calibration to determine the fixed transformation +between the camera and the robot arm's end-effector. Once calibrated, you can +convert AprilTag positions from camera coordinates to robot arm coordinates. + +Usage: +1. Move robot to different poses while keeping AprilTag in view +2. For each pose, record: robot pose + AprilTag detection +3. Run calibration to compute camera-to-end-effector transformation +4. Use transformation to convert future detections to robot coordinates +""" + +import numpy as np +import cv2 +import json +import yaml +from scipy.spatial.transform import Rotation as R +from scipy.optimize import least_squares +import argparse +from datetime import datetime + +class HandEyeCalibration: + def __init__(self): + self.calibration_data = [] + self.camera_to_ee_transform = None + + def add_calibration_point(self, robot_pose, apriltag_pose): + """ + Add a calibration data point + + Args: + robot_pose: dict with keys 'position' [x,y,z] and 'orientation' [roll,pitch,yaw] + in robot base coordinates (meters, degrees) + apriltag_pose: dict with keys 'position' [x,y,z] and 'orientation' [roll,pitch,yaw] + in camera coordinates (meters, degrees) + """ + # Convert orientations to rotation matrices + robot_rot = R.from_euler('xyz', robot_pose['orientation'], degrees=True).as_matrix() + camera_rot = R.from_euler('xyz', apriltag_pose['orientation'], degrees=True).as_matrix() + + calibration_point = { + 'robot_position': np.array(robot_pose['position']), + 'robot_rotation': robot_rot, + 'camera_position': np.array(apriltag_pose['position']), + 'camera_rotation': camera_rot, + 'timestamp': datetime.now().isoformat() + } + + self.calibration_data.append(calibration_point) + print(f"āœ… Added calibration point {len(self.calibration_data)}") + + def load_calibration_data(self, json_file): + """Load calibration data from JSON file""" + with open(json_file, 'r') as f: + data = json.load(f) + + for point in data['calibration_points']: + robot_pose = { + 'position': point['robot_pose']['position'], + 'orientation': point['robot_pose']['orientation'] + } + apriltag_pose = { + 'position': point['apriltag_pose']['position'], + 'orientation': point['apriltag_pose']['orientation'] + } + self.add_calibration_point(robot_pose, apriltag_pose) + + print(f"šŸ“‚ Loaded {len(self.calibration_data)} calibration points") + + def save_calibration_data(self, json_file): + """Save calibration data to JSON file""" + data = { + 'calibration_info': { + 'description': 'Hand-eye calibration data for A1 Mini robot arm', + 'date_created': datetime.now().isoformat(), + 'num_points': len(self.calibration_data) + }, + 'calibration_points': [] + } + + for i, point in enumerate(self.calibration_data): + # Convert rotation matrices back to euler angles for storage + robot_euler = R.from_matrix(point['robot_rotation']).as_euler('xyz', degrees=True) + camera_euler = R.from_matrix(point['camera_rotation']).as_euler('xyz', degrees=True) + + data['calibration_points'].append({ + 'point_id': i + 1, + 'timestamp': point['timestamp'], + 'robot_pose': { + 'position': point['robot_position'].tolist(), + 'orientation': robot_euler.tolist() + }, + 'apriltag_pose': { + 'position': point['camera_position'].tolist(), + 'orientation': camera_euler.tolist() + } + }) + + with open(json_file, 'w') as f: + json.dump(data, f, indent=2) + + print(f"šŸ’¾ Saved calibration data to {json_file}") + + def calibrate_hand_eye(self): + """ + Perform hand-eye calibration using AX=XB formulation + + A = robot end-effector motion + B = camera motion (inverse of AprilTag motion) + X = camera-to-end-effector transformation (what we want to find) + + Returns: + success: bool indicating if calibration succeeded + transform: 4x4 transformation matrix from camera to end-effector + """ + if len(self.calibration_data) < 3: + print("āŒ Need at least 3 calibration points") + return False, None + + print(f"šŸ”„ Computing hand-eye calibration with {len(self.calibration_data)} points...") + + # Use OpenCV's hand-eye calibration + robot_rotations = [] + robot_translations = [] + camera_rotations = [] + camera_translations = [] + + # Use first pose as reference + ref_robot_rot = self.calibration_data[0]['robot_rotation'] + ref_robot_pos = self.calibration_data[0]['robot_position'] + ref_camera_rot = self.calibration_data[0]['camera_rotation'] + ref_camera_pos = self.calibration_data[0]['camera_position'] + + for i in range(1, len(self.calibration_data)): + data = self.calibration_data[i] + + # Robot motion from reference to current pose + robot_rot_rel = data['robot_rotation'] @ ref_robot_rot.T + robot_trans_rel = data['robot_position'] - ref_robot_pos + + # Camera motion (inverse of AprilTag motion) + camera_rot_rel = ref_camera_rot @ data['camera_rotation'].T + camera_trans_rel = ref_camera_pos - data['camera_position'] + + robot_rotations.append(robot_rot_rel) + robot_translations.append(robot_trans_rel.reshape(-1, 1)) + camera_rotations.append(camera_rot_rel) + camera_translations.append(camera_trans_rel.reshape(-1, 1)) + + # Perform calibration + try: + R_cam2ee, t_cam2ee = cv2.calibrateHandEye( + robot_rotations, robot_translations, + camera_rotations, camera_translations, + method=cv2.CALIB_HAND_EYE_TSAI + ) + + # Create 4x4 transformation matrix + self.camera_to_ee_transform = np.eye(4) + self.camera_to_ee_transform[:3, :3] = R_cam2ee + self.camera_to_ee_transform[:3, 3] = t_cam2ee.flatten() + + print("āœ… Hand-eye calibration successful!") + return True, self.camera_to_ee_transform + + except Exception as e: + print(f"āŒ Hand-eye calibration failed: {e}") + return False, None + + def save_transform(self, yaml_file): + """Save camera-to-end-effector transformation to YAML""" + if self.camera_to_ee_transform is None: + print("āŒ No calibration to save") + return + + # Extract rotation and translation + R_cam2ee = self.camera_to_ee_transform[:3, :3] + t_cam2ee = self.camera_to_ee_transform[:3, 3] + + # Convert rotation to euler angles + euler_cam2ee = R.from_matrix(R_cam2ee).as_euler('xyz', degrees=True) + + transform_data = { + 'hand_eye_calibration': { + 'description': 'Camera to end-effector transformation', + 'calibration_date': datetime.now().isoformat(), + 'num_calibration_points': len(self.calibration_data), + 'transformation_matrix': self.camera_to_ee_transform.tolist(), + 'translation': { + 'x': float(t_cam2ee[0]), + 'y': float(t_cam2ee[1]), + 'z': float(t_cam2ee[2]) + }, + 'rotation_euler_degrees': { + 'roll': float(euler_cam2ee[0]), + 'pitch': float(euler_cam2ee[1]), + 'yaw': float(euler_cam2ee[2]) + } + } + } + + with open(yaml_file, 'w') as f: + yaml.dump(transform_data, f, default_flow_style=False) + + print(f"šŸ’¾ Saved transformation to {yaml_file}") + + def transform_pose(self, camera_pose): + """ + Transform AprilTag pose from camera coordinates to end-effector coordinates + + Args: + camera_pose: dict with 'position' and 'orientation' in camera coordinates + + Returns: + ee_pose: dict with 'position' and 'orientation' in end-effector coordinates + """ + if self.camera_to_ee_transform is None: + print("āŒ No hand-eye calibration available") + return None + + # Convert camera pose to homogeneous transformation + camera_pos = np.array(camera_pose['position']) + camera_rot = R.from_euler('xyz', camera_pose['orientation'], degrees=True).as_matrix() + + T_camera = np.eye(4) + T_camera[:3, :3] = camera_rot + T_camera[:3, 3] = camera_pos + + # Transform to end-effector coordinates + T_ee = self.camera_to_ee_transform @ T_camera + + # Extract position and orientation + ee_pos = T_ee[:3, 3] + ee_rot = R.from_matrix(T_ee[:3, :3]).as_euler('xyz', degrees=True) + + return { + 'position': ee_pos.tolist(), + 'orientation': ee_rot.tolist() + } + +def create_example_data(): + """Create example calibration data file""" + example_data = { + 'calibration_info': { + 'description': 'Example hand-eye calibration data for A1 Mini robot arm', + 'instructions': [ + '1. Move robot to various poses while keeping AprilTag visible', + '2. Record robot pose (x,y,z,roll,pitch,yaw) in base coordinates', + '3. Record AprilTag pose from camera detection', + '4. Repeat for 5-10 different robot poses', + '5. Run: python hand_eye_calibration.py calibrate example_data.json' + ] + }, + 'calibration_points': [ + { + 'point_id': 1, + 'robot_pose': { + 'position': [0.15, 0.0, 0.25], # Robot end-effector position (m) + 'orientation': [0.0, 0.0, 0.0] # Robot end-effector orientation (deg) + }, + 'apriltag_pose': { + 'position': [-0.02, 0.01, 0.12], # AprilTag in camera coordinates (m) + 'orientation': [0.0, 0.0, 0.0] # AprilTag orientation (deg) + } + }, + { + 'point_id': 2, + 'robot_pose': { + 'position': [0.12, 0.05, 0.22], + 'orientation': [10.0, 5.0, -15.0] + }, + 'apriltag_pose': { + 'position': [-0.05, 0.03, 0.15], + 'orientation': [8.0, 3.0, -12.0] + } + } + ] + } + + with open('example_hand_eye_data.json', 'w') as f: + json.dump(example_data, f, indent=2) + + print("šŸ“ Created example_hand_eye_data.json") + print(" Edit this file with your actual calibration data") + +def main(): + parser = argparse.ArgumentParser(description='Hand-Eye Calibration for A1 Mini Robot') + parser.add_argument('command', choices=['calibrate', 'transform', 'example'], + help='Command to run') + parser.add_argument('data_file', nargs='?', + help='JSON file with calibration data') + parser.add_argument('--output', '-o', default='hand_eye_transform.yaml', + help='Output YAML file for transformation') + + args = parser.parse_args() + + if args.command == 'example': + create_example_data() + return + + if args.command == 'calibrate': + if not args.data_file: + print("āŒ Please provide calibration data file") + return + + calibrator = HandEyeCalibration() + calibrator.load_calibration_data(args.data_file) + + success, transform = calibrator.calibrate_hand_eye() + if success: + calibrator.save_transform(args.output) + + print(f"\nšŸŽÆ Hand-Eye Calibration Results:") + print(f" Translation: {transform[:3, 3]}") + print(f" Rotation matrix:") + print(f" {transform[:3, :3]}") + + elif args.command == 'transform': + # Example of using the calibration + print("šŸ”„ Transform example - load your transform and AprilTag pose") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/ac_training_lab/a1_cam/img1_detection_corrected.json b/src/ac_training_lab/a1_cam/img1_detection_corrected.json new file mode 100644 index 00000000..34a0fe13 --- /dev/null +++ b/src/ac_training_lab/a1_cam/img1_detection_corrected.json @@ -0,0 +1,21 @@ +{ + "timestamp": "2025-11-28 15:11:00.619489", + "detections": [ + { + "tag_id": 110, + "position": { + "x": -0.027993731713029064, + "y": 0.04610861550706114, + "z": 0.4009576706127675 + }, + "orientation": { + "roll": -14.300916549107134, + "pitch": 1.9465526437791372, + "yaw": 7.9753109087608705 + }, + "distance_cm": 40.456978021401405, + "transformation_applied": "horizontal_flip_correction" + } + ], + "coordinate_system": "original_camera_orientation" +} \ No newline at end of file diff --git a/src/ac_training_lab/a1_cam/requirements.txt b/src/ac_training_lab/a1_cam/requirements.txt index 28647589..75e37866 100644 --- a/src/ac_training_lab/a1_cam/requirements.txt +++ b/src/ac_training_lab/a1_cam/requirements.txt @@ -1,3 +1,6 @@ boto3 paho-mqtt +opencv-python +PyYAML +numpy # picamera2 via `sudo apt install python3-picamera2` (see README) diff --git a/src/ac_training_lab/a1_cam/transform_coordinates.py b/src/ac_training_lab/a1_cam/transform_coordinates.py new file mode 100644 index 00000000..9b1d4cd1 --- /dev/null +++ b/src/ac_training_lab/a1_cam/transform_coordinates.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 +""" +AprilTag Coordinate Transformation +Corrects AprilTag poses detected in flipped images back to original coordinate system +""" + +import json +import numpy as np +from scipy.spatial.transform import Rotation as R +import argparse + +def transform_vertical_flip(pose_data): + """ + Transform coordinates from vertically flipped image back to original + Vertical flip: Y gets negated, X stays same, rotations around X-axis get negated + """ + transformed_detections = [] + + for detection in pose_data['detections']: + # Transform position + original_pos = { + 'x': detection['position']['x'], # Keep X + 'y': -detection['position']['y'], # Negate Y + 'z': detection['position']['z'] # Keep Z + } + + # Transform orientation - vertical flip affects pitch primarily + original_orient = { + 'roll': detection['orientation']['roll'], # Keep roll + 'pitch': -detection['orientation']['pitch'], # Negate pitch + 'yaw': detection['orientation']['yaw'] # Keep yaw + } + + # Recalculate distance + distance_cm = np.sqrt(original_pos['x']**2 + original_pos['y']**2 + original_pos['z']**2) * 100 + + transformed_detection = { + 'tag_id': detection['tag_id'], + 'position': original_pos, + 'orientation': original_orient, + 'distance_cm': distance_cm, + 'transformation_applied': 'vertical_flip_correction' + } + + transformed_detections.append(transformed_detection) + + return { + 'timestamp': pose_data['timestamp'], + 'detections': transformed_detections, + 'coordinate_system': 'original_camera_orientation' + } + +def transform_coordinates(input_file, flip_type, output_file=None): + """ + Apply coordinate transformation to correct for image flipping + + Args: + input_file: JSON file with AprilTag detections from flipped image + flip_type: 'horizontal' or 'vertical' + output_file: Output file name (optional) + """ + + # Load detection data + with open(input_file, 'r') as f: + pose_data = json.load(f) + + print(f"šŸ”„ Transforming coordinates from {flip_type} flip") + print(f"šŸ“‚ Input: {input_file}") + + # Apply transformation + if flip_type == 'horizontal': + transformed_data = transform_horizontal_flip(pose_data) + elif flip_type == 'vertical': + transformed_data = transform_vertical_flip(pose_data) + else: + raise ValueError("flip_type must be 'horizontal' or 'vertical'") + + # Generate output filename if not provided + if output_file is None: + base_name = input_file.replace('.json', '') + output_file = f"{base_name}_corrected.json" + + # Save transformed data + with open(output_file, 'w') as f: + json.dump(transformed_data, f, indent=2) + + print(f"āœ… Corrected coordinates saved to: {output_file}") + + # Show comparison + original_detection = pose_data['detections'][0] + corrected_detection = transformed_data['detections'][0] + + print(f"\nšŸ“Š Coordinate Transformation Results:") + print(f" Tag ID: {original_detection['tag_id']}") + print(f" Original Position: ({original_detection['position']['x']:.3f}, {original_detection['position']['y']:.3f}, {original_detection['position']['z']:.3f})") + print(f" Corrected Position: ({corrected_detection['position']['x']:.3f}, {corrected_detection['position']['y']:.3f}, {corrected_detection['position']['z']:.3f})") + print(f" Original Distance: {original_detection['distance_cm']:.1f}cm") + print(f" Corrected Distance: {corrected_detection['distance_cm']:.1f}cm") + + return output_file + +def main(): + parser = argparse.ArgumentParser(description='Transform AprilTag coordinates to correct for image flipping') + parser.add_argument('input_file', help='JSON file with AprilTag detections from flipped image') + parser.add_argument('flip_type', choices=['horizontal', 'vertical'], + help='Type of flip applied to the image') + parser.add_argument('--output', '-o', help='Output file name (optional)') + + args = parser.parse_args() + + transform_coordinates(args.input_file, args.flip_type, args.output) + +if __name__ == "__main__": + main() \ No newline at end of file