Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 18 additions & 5 deletions ros2_ws/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ To set up and run this workspace, you have several options:
### With sample images
> **Warning:** The paths mentioned in the instructions are absolute paths based on the Jetson's directory. These arguments can be changed to be relative to your current directory.

1. Navigate to the `/ROS/workspace_python/ros2_ws` directory.
1. Navigate to the `/ROS/ros2_ws` directory.
2. Create temporary variables to store the paths values:
```bash
IMAGE_FOLDER_PATH=/home/user/ROS/assets/maize
Expand All @@ -41,28 +41,41 @@ To set up and run this workspace, you have several options:
rosdep install --from-paths src --ignore-src -r -y

# Build the workspace
colcon build --symlink-install --packages-select custom_interface python_package
colcon build --symlink-install --packages-select custom_interfaces python_package

# why --symlink install: https://answers.ros.org/question/371822/what-is-the-use-of-symlink-install-in-ros2-colcon-build/
```
4. Run the `picture_node`:
4. Run the `picture_node` or `video_node`:
```bash
source install/setup.bash
ros2 run python_package picture_node --ros-args -p static_image_path:=$IMAGE_FOLDER_PATH -p frame_rate:=1
```

For video node, run:
```
VIDEO_PATH=write/path/here
ros2 run python_package video_node --ros-args -p static_image_path:=$VIDEO_PATH -p frame_rate:=10
```
5. Run the `inference_node` in a new terminal:
```bash
MODEL_WEIGHT_PATH=/home/user/ROS/models/maize/Maize.pt
source install/setup.bash
ros2 run python_package inference_node --ros-args -p weights_path:=$MODEL_WEIGHT_PATH
ros2 run python_package inference_node --ros-args -p weights_path:=$MODEL_WEIGHT_PATH
```
6. Run the `extermination_node` in a new terminal:
```bash
source install/setup.bash
ros2 run python_package extermination_node
ros2 run python_package extermination_node --ros-args -p use_display_node:=False
```


### Getting the plant count using the service
#### Prerequisites:
- Extermination node must be active
```bash
source install/setup.bash
ros2 service call /reset_tracker custom_interfaces/srv/GetRowPlantCount "{}"
```
## Common trouble shooting
Some common troubleshooting steps include:

Expand Down
2 changes: 1 addition & 1 deletion ros2_ws/src/python_package/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ Other nodes are still a WIP
## Additional information
### Compiling new Changes
```bash
colcon build --symlink-install --packages-select custom_interface python_package
colcon build --symlink-install --packages-select custom_interfaces python_package
source install/setup.bash
```

Expand Down
14 changes: 5 additions & 9 deletions ros2_ws/src/python_package/python_package/extermination_node.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
import time, os
import cv2
import serial
# import pycuda.driver as cuda
# from tracker import *
# depth point cloud here...
Expand All @@ -25,7 +23,7 @@ class ExterminationNode(Node):
def __init__(self):
super().__init__('extermination_node')

self.declare_parameter('use_display_node', True)
self.declare_parameter('use_display_node', False)
self.declare_parameter('camera_side', 'left')

self.use_display_node = self.get_parameter('use_display_node').get_parameter_value().bool_value
Expand All @@ -39,7 +37,7 @@ def __init__(self):
self.minimum_confidence = 0.5

self.boxes_present = 0
self.tracker = EuclideanDistTracker() #value sent to arduino
self.tracker = EuclideanDistTracker()
self.bridge = CvBridge()
self.boxes_msg = Int8()
self.boxes_msg.data = 0
Expand All @@ -57,7 +55,7 @@ def get_tracker_row_count_callback(self,request,response):
return the tracker's current row count
"""
row_count = self.tracker.reset()
response.row_count = row_count
response.plant_count = row_count
return response

def inference_callback(self, msg):
Expand All @@ -71,13 +69,12 @@ def inference_callback(self, msg):

bounding_boxes = postprocess(confidence,bboxes, raw_image,msg.velocity)

bbox_with_label = self.tracker.update(bounding_boxes)

final_image = draw_boxes(raw_image,bounding_boxes,velocity=msg.velocity)
final_image = draw_boxes(raw_image,bbox_with_label,velocity=msg.velocity)

self.tracker.update(bounding_boxes)

if self.use_display_node:
# Create a CUDA window and display the cv_image
cv2.imshow(self.window, final_image)
cv2.waitKey(10)

Expand All @@ -90,7 +87,6 @@ def inference_callback(self, msg):
self.boxes_msg.data = self.boxes_present

def timer_callback(self):
#send msg to arduino
self.box_publisher.publish(self.boxes_msg)
self.get_logger().info("Published results to Proxy Node")

Expand Down
8 changes: 5 additions & 3 deletions ros2_ws/src/python_package/python_package/picture_node.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
import time, os
import os
import cv2
import random
import rclpy
from rclpy.time import Time
from rclpy.node import Node
from rclpy.executors import MultiThreadedExecutor
from sensor_msgs.msg import Image
from std_msgs.msg import Header, String
from std_msgs.msg import Header
from cv_bridge import CvBridge
import queue
import numpy as np
Expand Down Expand Up @@ -57,6 +57,8 @@ def get_images(self)-> list[np.ndarray]:
self.get_logger().error(f"Static image not found at {self.static_image_path}")
raise FileNotFoundError(f"Static image not found at {self.static_image_path}")

filename = self.static_image_path

image_paths = []
if os.path.isfile(self.static_image_path) \
and (filename.endswith('.JPG') or filename.endswith('.png')):
Expand Down Expand Up @@ -114,7 +116,7 @@ def publish_static_image(self):

# publish image and increment whatever is needed
self.input_image_publisher.publish(image_input)

self.get_logger().info(f"Published image {self.image_counter}")
self.image_counter += 1
self.loop_count = self.image_counter // array_size
else:
Expand Down
26 changes: 23 additions & 3 deletions ros2_ws/src/python_package/python_package/scripts/tracker.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,30 @@
import math
THRESHOLD = 200

class EuclideanDistTracker:
def __init__(self):
"""
source: https://www.analyticsvidhya.com/blog/2022/05/a-tutorial-on-centroid-tracker-counter-system/
Object tracking using euclidean distance. TLDR:
- keep list of obj center points from the previous frame
- compare the center points of the current frame to the previous frame
- if the distance between the center points is less than a threshold, then it is the same object
- if the distance is greater than the threshold, then it is a new object

Current issues/limitations:
- Need to define our own threshold value, which is somewhat dependant on the bot speed and camera frame rate
- Dependent on proper object detection - if the obj detection fails the previous frame,
the tracker will see everything as a new object
- If two detections are close to each other, tracker will assign them the same object ID.
A "fix" right now is to keep track of previous IDs that we "used", so we don't assign them again.
"""

def __init__(self, threshold=THRESHOLD):
# Store the center positions of the objects
self.center_points = {}
# Keep the count of the IDs
# each time a new object id detected, the count will increase by one
self.id_count = 0
self.threshold = threshold

def reset(self):
""" resets the count and also returns val of curr row
Expand All @@ -20,6 +38,7 @@ def reset(self):
def update(self, objects_rect):
# Objects boxes and ids
objects_bbs_ids = []
centers = self.center_points.copy()

# Get center point of new object
for rect in objects_rect:
Expand All @@ -29,12 +48,13 @@ def update(self, objects_rect):

# Find out if that object was detected already
same_object_detected = False
for item_id, pt in self.center_points.items():
for item_id, pt in centers.items():
dist = math.hypot(cx - pt[0], cy - pt[1])
if dist < 200:
if dist < self.threshold:
self.center_points[item_id] = (cx, cy)
objects_bbs_ids.append([x1, y1, x2, y2, item_id])
same_object_detected = True
centers.pop(item_id)
break

# New object is detected we assign the ID to that object
Expand Down
36 changes: 28 additions & 8 deletions ros2_ws/src/python_package/python_package/scripts/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,24 @@ def initialise_model(weights_path=None, precision=None):
yolo = YOLO(weights_path)
return yolo

def _resize_with_padding(image: np.ndarray):
"""
Resize the image to the desired shape and pad the image with a constant color.
"""
new_shape = POSTPROCESS_OUTPUT_SHAPE
padding_color = (0, 255, 255)
original_shape = (image.shape[1], image.shape[0])
ratio = float(max(new_shape))/max(original_shape)
new_size = tuple([int(x*ratio) for x in original_shape])
image = cv2.resize(image, new_size)
delta_w = new_shape[0] - new_size[0]
delta_h = new_shape[1] - new_size[1]
top, bottom = delta_h//2, delta_h-(delta_h//2)
left, right = delta_w//2, delta_w-(delta_w//2)
preprocessed_img = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=padding_color)

return preprocessed_img

# Not used for now
def preprocess(image: np.ndarray):
"""
Takes in a numpy array that has been preprocessed
Expand Down Expand Up @@ -85,8 +101,8 @@ def postprocess(confidence, bbox_array: np.ndarray,raw_image: np.ndarray, veloci
"""

detections = _convert_bboxes_to_pixel(bbox_array, raw_image.shape)
detections = _object_filter(raw_image, detections) #color segmentation
detections = _verify_object(raw_image, detections,velocity)
# detections = _object_filter(raw_image, detections) #color segmentation
# detections = _verify_object(raw_image, detections,velocity)

return detections

Expand Down Expand Up @@ -115,7 +131,6 @@ def _convert_bboxes_to_pixel(bbox_array: np.ndarray, image_shape: Tuple[int, int
def _object_filter(image: np.ndarray, bboxes: List[Tuple[int, int, int, int]]) -> List[Tuple[int, int, int, int]]:
"""
Filters objects in an image based on bounding boxes and color thresholds.

Args:
image (np.ndarray): The input image in which objects are to be filtered.
bboxes (List[Tuple[int, int, int, int]]): A list of bounding boxes.
Expand Down Expand Up @@ -189,7 +204,7 @@ def _verify_object(raw_image, bboxes, velocity=0):
return adjusted_bboxes


def draw_boxes(image: np.ndarray, bboxes: list, with_roi =True, with_roi_shift = True, velocity = 0) -> np.ndarray:
def draw_boxes(image: np.ndarray, bboxes: list, with_roi =False, with_roi_shift = False, velocity = 0) -> np.ndarray:
"""
Given array of bounding box tuples and an image, draw the bounding boxes into the image.
If with_roi and with_roi shift is set to true, the ROI areas will also be drawn in.
Expand Down Expand Up @@ -225,10 +240,15 @@ def draw_boxes(image: np.ndarray, bboxes: list, with_roi =True, with_roi_shift =
color = tuple(np.random.randint(0, 256, 3).tolist()) # Generate a random color

for bbox in bboxes:
x1, y1, x2, y2 = map(int, bbox)
x1, y1, x2, y2, label = map(int, bbox)

# print(f"Bounding box: ({x1}, {y1}), ({x2}, {y2})")
image = cv2.rectangle(image, (x1, y1), (x2, y2),(255, 0, 0), 2)

image = cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
label = f"Object {label}"
cv2.putText(image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)


image = _resize_with_padding(image)

return image

Expand Down
Loading