Skip to content

Commit

Permalink
Merge pull request Smorodov#436 from Nuzhny007/master
Browse files Browse the repository at this point in the history
YOLOv8 instance segmentation
  • Loading branch information
Nuzhny007 authored Nov 27, 2023
2 parents 2c9b33d + 2c466df commit 814ed80
Show file tree
Hide file tree
Showing 20 changed files with 1,427 additions and 251 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@

# Last changes

* YOLOv8 instance segmentation models worked with TensorRT! Export pretrained Pytorch models [here (ultralytics/ultralytics)](https://github.com/ultralytics/ultralytics) to onnx format and run Multitarget-tracker with -e=6 example

* Re-identification model osnet_x0_25_msmt17 from [mikel-brostrom/yolo_tracking](https://github.com/mikel-brostrom/yolo_tracking)

* YOLOv8 detector worked with TensorRT! Export pretrained Pytorch models [here (ultralytics/ultralytics)](https://github.com/ultralytics/ultralytics) to onnx format and run Multitarget-tracker with -e=6 example
Expand Down
21 changes: 16 additions & 5 deletions combined/combined.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,8 @@ bool CombinedDetector::InitDetector(cv::UMat frame)
YOLOv6,
YOLOv7,
YOLOv7Mask,
YOLOv8
YOLOv8,
YOLOv8Mask
};
YOLOModels usedModel = YOLOModels::YOLOv8;
switch (usedModel)
Expand Down Expand Up @@ -416,10 +417,8 @@ bool CombinedDetector::InitDetector(cv::UMat frame)
break;

case YOLOModels::YOLOv8:
//configDNN.emplace("modelConfiguration", pathToModel + "yolov8s.onnx");
//configDNN.emplace("modelBinary", pathToModel + "yolov8s.onnx");
configDNN.emplace("modelConfiguration", "C:/work/mtracking/Nuzhny007/Multitarget-tracker/data/yolov8x.onnx");
configDNN.emplace("modelBinary", "C:/work/mtracking/Nuzhny007/Multitarget-tracker/data/yolov8x.onnx");
configDNN.emplace("modelConfiguration", pathToModel + "yolov8s.onnx");
configDNN.emplace("modelBinary", pathToModel + "yolov8s.onnx");
configDNN.emplace("confidenceThreshold", "0.2");
configDNN.emplace("inference_precision", "FP16");
configDNN.emplace("net_type", "YOLOV8");
Expand All @@ -428,6 +427,18 @@ bool CombinedDetector::InitDetector(cv::UMat frame)
maxBatch = 1;
configDNN.emplace("maxCropRatio", "-1");
break;

case YOLOModels::YOLOv8Mask:
configDNN.emplace("modelConfiguration", pathToModel + "yolov8s-seg.onnx");
configDNN.emplace("modelBinary", pathToModel + "yolov8s-seg.onnx");
configDNN.emplace("confidenceThreshold", "0.2");
configDNN.emplace("inference_precision", "FP16");
configDNN.emplace("net_type", "YOLOV8Mask");
configDNN.emplace("inWidth", "640");
configDNN.emplace("inHeight", "640");
maxBatch = 1;
configDNN.emplace("maxCropRatio", "-1");
break;
}
configDNN.emplace("maxBatch", std::to_string(maxBatch));
configDNN.emplace("classNames", pathToModel + "coco.names");
Expand Down
8 changes: 0 additions & 8 deletions data/coco.data

This file was deleted.

File renamed without changes.
9 changes: 9 additions & 0 deletions data/coco/white_full.names
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
person
bicycle
car
motorbike
aeroplane
bus
train
truck
boat
103 changes: 103 additions & 0 deletions data/demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
import sys
import glob
import getopt
import numpy as np
import cv2 as cv
import pymtracking as mt

print("OpenCV Version: {}".format(cv.__version__))


def draw_regions(img, regions, color):
for reg in regions:
brect = reg.brect
cv.rectangle(img, (brect.x, brect.y, brect.width, brect.height), color, 2)


def draw_tracks(img, tracks, fps):
for track in tracks:
brect = track.GetBoundingRect()
if track.isStatic:
cv.rectangle(img, (brect.x, brect.y, brect.width, brect.height), (255, 0, 255), 2)
elif track.IsRobust(int(fps / 4), 0.7, (0.1, 10.), 3):
cv.rectangle(img, (brect.x, brect.y, brect.width, brect.height), (0, 255, 0), 2)
trajectory = track.GetTrajectory()
for i in range(0, len(trajectory) - 1):
cv.line(img, trajectory[i], trajectory[i+1], (0, 255, 0), 1)


def main():
args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
try:
video_src = video_src[0]
except:
video_src = 0
args = dict(args)

cam = cv.VideoCapture(video_src)

_ret, img = cam.read()
print("cam.read res = ", _ret, ", im size = ", img.shape)

fps = cam.get(cv.CAP_PROP_FPS)
print(video_src, " fps = ", fps)

configBGFG = mt.KeyVal()
configBGFG.Add('useRotatedRect', '20')
configBGFG.Add('history', '1000')
configBGFG.Add("nmixtures", "3")
configBGFG.Add("backgroundRatio", "0.7")
configBGFG.Add("noiseSigma", "0")
print("configBGFG = ", configBGFG)
mdetector = mt.BaseDetector(mt.BaseDetector.Detectors.MOG, configBGFG, img)
print("CanGrayProcessing: ", mdetector.CanGrayProcessing())
mdetector.SetMinObjectSize((1, 1))

tracker_settings = mt.TrackerSettings()

tracker_settings.SetDistance(mt.MTracker.DistRects)
tracker_settings.kalmanType = mt.MTracker.KalmanLinear
tracker_settings.filterGoal = mt.MTracker.FilterCenter
tracker_settings.lostTrackType = mt.MTracker.TrackNone
tracker_settings.matchType = mt.MTracker.MatchHungrian
tracker_settings.useAcceleration = False
tracker_settings.dt = 0.5
tracker_settings.accelNoiseMag = 0.1
tracker_settings.distThres = 0.95
tracker_settings.minAreaRadiusPix = img.shape[0] / 5.
tracker_settings.minAreaRadiusK = 0.8
tracker_settings.useAbandonedDetection = False
tracker_settings.maximumAllowedSkippedFrames = int(2 * fps)
tracker_settings.maxTraceLength = int(2 * fps)

mtracker = mt.MTracker(tracker_settings)

while True:
_ret, img = cam.read()
if _ret:
print("cam.read res = ", _ret, ", im size = ", img.shape, ", fps = ", fps)
else:
break

mdetector.Detect(img)
regions = mdetector.GetDetects()
print("mdetector.Detect:", len(regions))

mtracker.Update(regions, img, fps)
tracks = mtracker.GetTracks()
print("mtracker.Update:", len(tracks))

vis = img.copy()
# draw_regions(vis, regions, (255, 0, 255))
draw_tracks(vis, tracks, fps)
cv.imshow('detect', vis)

if cv.waitKey(int(1000 / fps)) == 27:
break

print('Done')


if __name__ == '__main__':
main()
cv.destroyAllWindows()
155 changes: 155 additions & 0 deletions data/settings_coco.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
[detection]

#-----------------------------
# opencv_dnn = 12
# darknet_cudnn = 10
# tensorrt = 11
detector_backend = 10

#-----------------------------
# Target and backend for opencv_dnn detector
# DNN_TARGET_CPU
# DNN_TARGET_OPENCL
# DNN_TARGET_OPENCL_FP16
# DNN_TARGET_MYRIAD
# DNN_TARGET_CUDA
# DNN_TARGET_CUDA_FP16
ocv_dnn_target = DNN_TARGET_CPU

# DNN_BACKEND_DEFAULT
# DNN_BACKEND_HALIDE
# DNN_BACKEND_INFERENCE_ENGINE
# DNN_BACKEND_OPENCV
# DNN_BACKEND_VKCOM
# DNN_BACKEND_CUDA
# DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
# DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
ocv_dnn_backend = DNN_BACKEND_OPENCV

#-----------------------------
# nn_weights = data/coco/yolov7.onnx
# nn_config = data/coco/yolov7.onnx

# nn_weights = data/coco/yolov6s.onnx
# nn_config = data/coco/yolov6s.onnx

nn_weights = C:/work/home/mtracker/Multitarget-tracker/data/yolov4.weights
nn_config = C:/work/home/mtracker/Multitarget-tracker/data/yolov4.cfg

class_names = C:/work/home/mtracker/Multitarget-tracker/data/coco.names

#-----------------------------
confidence_threshold = 0.2

max_crop_ratio = -1
max_batch = 1
gpu_id = 0

#-----------------------------
# YOLOV3
# YOLOV4
# YOLOV5
net_type = YOLOV4

#-----------------------------
# INT8
# FP16
# FP32
inference_precision = FP32

#-----------------------------
# Detect only set of types, ";"
white_list =

#-----------------------------
# For TensorRT optimization, bytes
video_memory = 0;


[tracking]

#-----------------------------
# DistCenters = 0 // Euclidean distance between centers, pixels
# DistRects = 1 // Euclidean distance between bounding rectangles, pixels
# DistJaccard = 2 // Intersection over Union, IoU, [0, 1]
# DistHist = 3 // Bhatacharia distance between histograms, [0, 1]

distance_type = 0

#-----------------------------
# KalmanLinear = 0
# KalmanUnscented = 1

kalman_type = 0

#-----------------------------
# FilterCenter = 0
# FilterRect = 1

filter_goal = 0

#-----------------------------
# TrackNone = 0
# TrackKCF = 1
# TrackMIL = 2
# TrackMedianFlow = 3
# TrackGOTURN = 4
# TrackMOSSE = 5
# TrackCSRT = 6
# TrackDAT = 7
# TrackSTAPLE = 8
# TrackLDES = 9
# Used if filter_goal == FilterRect

lost_track_type = 0

#-----------------------------
# MatchHungrian = 0
# MatchBipart = 1

match_type = 0

#-----------------------------
# Use constant acceleration motion model:
# 0 - unused (stable)
# 1 - use acceleration in Kalman filter (experimental)
use_aceleration = 0

#-----------------------------
# Delta time for Kalman filter
delta_time = 0.4

#-----------------------------
# Accel noise magnitude for Kalman filter
accel_noise = 0.2

#-----------------------------
# Distance threshold between region and object on two frames
dist_thresh = 0.8

#-----------------------------
# If this value > 0 than will be used circle with this radius
# If this value <= 0 than will be used ellipse with size (3*vx, 3*vy), vx and vy - horizontal and vertical speed in pixelsa
min_area_radius_pix = -1

#-----------------------------
# Minimal area radius in ration for object size. Used if min_area_radius_pix < 0
min_area_radius_k = 0.8

#-----------------------------
# If the object do not assignment more than this frames then it will be removed
max_skip_frames = 50

#-----------------------------
# The maximum trajectory length
max_trace_len = 50

#-----------------------------
# Detection abandoned objects
detect_abandoned = 0
# After this time (in seconds) the object is considered abandoned
min_static_time = 5
# After this time (in seconds) the abandoned object will be removed
max_static_time = 25
# Speed in pixels. If speed of object is more that this value than object is non static
max_speed_for_static = 10
Loading

0 comments on commit 814ed80

Please sign in to comment.