my code is as follows"import xml.etree.ElementTree as ET
from pathlib import Path
from collections import defaultdict
from fastai.vision.all import *
import numpy as np
import torch
from yolov5 import YOLOv5
Define the paths to the dataset and annotations
dataset_path = Path(‘C:/Users/nirma/OneDrive/Documents/studies/CarbonArray’)
images_path = dataset_path / ‘Images’
annotations_path = dataset_path / ‘Annotations’
Function to get annotations
def get_annotations(image_path, annotations_path):
try:
annotation_file = annotations_path / (image_path.stem + ‘.xml’)
tree = ET.parse(annotation_file)
root = tree.getroot()
bboxes = []
labels = []
for obj in root.findall('object'):
bbox = obj.find('bndbox')
bndbox = [float(bbox.find(tag).text) for tag in ['xmin', 'ymin', 'xmax', 'ymax']]
label = obj.find('name').text # Ensure that this matches the actual tag in your XML files
bboxes.append(bndbox)
labels.append(label)
# Convert to numpy arrays with explicit types
if len(bboxes) > 0:
bboxes = np.array(bboxes, dtype=np.float32)
else:
bboxes = np.zeros((0, 4), dtype=np.float32)
if len(labels) > 0:
labels = np.array(labels, dtype=str)
else:
labels = np.array([], dtype=str)
print(f"Image: {image_path}")
print(f"Bounding boxes: {bboxes} (type: {bboxes.dtype})")
print(f"Labels: {labels} (type: {labels.dtype})")
return bboxes, labels
except Exception as e:
print(f"Error parsing annotations for {image_path}: {e}")
return np.zeros((0, 4), dtype=np.float32), np.array([], dtype=str)
Function to classify tree size based on pixel count
def get_tree_size_from_bbox(bbox):
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
area = width * height
if area < 1000:
return ‘small’
elif area < 5000:
return ‘medium’
else:
return ‘big’
Verify paths
print(f"Images path: {images_path}“)
print(f"Annotations path: {annotations_path}”)
print(f"Image files: {list(images_path.glob(‘*’))}")
Load your dataset using Fastai
def get_y(o):
bboxes, labels = get_annotations(o, annotations_path)
print(f"Annotations for {o}: bboxes={bboxes}, labels={labels}")
return bboxes, labels
Convert bounding boxes and labels to appropriate types within the DataBlock definition
def bbox_label_transform(o):
bboxes, labels = get_y(o)
print(f"Before conversion - Bounding boxes: {bboxes} (type: {type(bboxes)})“)
print(f"Before conversion - Labels: {labels} (type: {type(labels)})”)
bboxes = np.array(bboxes, dtype=np.float32)
labels = np.array(labels, dtype=str)
print(f"After conversion - Bounding boxes: {bboxes} (type: {bboxes.dtype})")
print(f"After conversion - Labels: {labels} (type: {labels.dtype})")
return bboxes, labels
Debug: Print the structure and types of the transformed data
def debug_transformed_data(o):
bboxes, labels = bbox_label_transform(o)
print(f"Transformed Bounding boxes: {bboxes} (type: {bboxes.dtype})“)
print(f"Transformed Labels: {labels} (type: {labels.dtype})”)
return bboxes, labels
data = DataBlock(
blocks=(ImageBlock, BBoxBlock, BBoxLblBlock),
get_items=get_image_files,
splitter=RandomSplitter(seed=42),
get_y=debug_transformed_data, # Use debug function to get detailed printout
item_tfms=Resize(460),
batch_tfms=aug_transforms(size=224, min_scale=0.75)
)
try:
dls = data.dataloaders(images_path)
except Exception as e:
print(f"Error creating dataloaders: {e}")
dls = None
if dls:
# Load the YOLOv5 model
try:
model = YOLOv5(‘yolov5s.pt’) # Ensure the correct path to your YOLOv5 model
except Exception as e:
print(f"Error loading YOLOv5 model: {e}")
model = None
if model:
# Train the model
try:
learn = Learner(dls, model, metrics=[error_rate])
learn.fine_tune(3)
learn.save('forest_tree_detector')
except Exception as e:
print(f"Error during model training: {e}")
# Inference on test images
test_files = get_image_files(dataset_path / 'test_images')
test_dl = dls.test_dl(test_files)
# Run predictions
try:
results = learn.get_preds(dl=test_dl)
except Exception as e:
print(f"Error during predictions: {e}")
results = None
if results:
# Post-process results to count trees per species and size
species_size_counts = defaultdict(lambda: defaultdict(int))
for result in results:
for bbox, label in zip(result[0], result[1]):
species = label # Extract species from the label
size = get_tree_size_from_bbox(bbox) # Define this function based on bbox dimensions
species_size_counts[species][size] += 1
# Format the output
output = []
for species, size_counts in species_size_counts.items():
sizes_output = ', '.join(f'{count} {size}' for size, count in size_counts.items())
total_count = sum(size_counts.values())
output.append(f'{species} trees - {total_count} ({sizes_output})')
# Print the formatted output
for line in output:
print(line)
else:
print("Prediction results are None. Please check the predictions.")
else:
print("Model loading failed. Please check the YOLOv5 integration.")
else:
print(“Dataloaders creation failed. Please check the dataset path and annotations.”)
" i am always getting the error no matter what “Error creating dataloaders: can’t convert np.ndarray of type numpy.object_. The only supported types are: float64, float32, float16, complex64, complex128, int64, int32, int16, int8, uint64, uint32, uint16, uint8, and bool.
Dataloaders creation failed. Please check the dataset path and annotations.” i have been stuck on this for like 2 days sucked the life out of chatgpt… Please help me