| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529 |
- #!python3
- """
- Python 3 wrapper for identifying objects in images
-
- Requires DLL compilation
-
- Original *nix 2.7: https://github.com/pjreddie/darknet/blob/0f110834f4e18b30d5f101bf8f1724c34b7b83db/python/darknet.py
- Windows Python 2.7 version: https://github.com/AlexeyAB/darknet/blob/fc496d52bf22a0bb257300d3c79be9cd80e722cb/build/darknet/x64/darknet.py
-
- @author: Philip Kahn, Aymeric Dujardin
- @date: 20180911
- """
- # pylint: disable=R, W0401, W0614, W0703
- import cv2
- import pyzed.sl as sl
- from ctypes import *
- import math
- import random
- import os
- import numpy as np
- import statistics
- import sys
- import getopt
- from random import randint
- import time
- import pandas as pd
- from pandas import DataFrame as df
- import subprocess
-
-
- #NOTE at 2020-12-22: if svo's resulotion changed, then you have to edit WIDTH, HEIGHT and DEPTH_MAX
- WIDTH = 1920
- HEIGHT = 1080
- DEPTH_MAX = 8
-
-
- def sample(probs):
- s = sum(probs)
- probs = [a/s for a in probs]
- r = random.uniform(0, 1)
- for i in range(len(probs)):
- r = r - probs[i]
- if r <= 0:
- return i
- return len(probs)-1
-
-
- def c_array(ctype, values):
- arr = (ctype*len(values))()
- arr[:] = values
- return arr
-
-
- class BOX(Structure):
- _fields_ = [("x", c_float),
- ("y", c_float),
- ("w", c_float),
- ("h", c_float)]
-
-
- class DETECTION(Structure):
- _fields_ = [("bbox", BOX),
- ("classes", c_int),
- ("prob", POINTER(c_float)),
- ("mask", POINTER(c_float)),
- ("objectness", c_float),
- ("sort_class", c_int)]
-
-
- class IMAGE(Structure):
- _fields_ = [("w", c_int),
- ("h", c_int),
- ("c", c_int),
- ("data", POINTER(c_float))]
-
-
- class METADATA(Structure):
- _fields_ = [("classes", c_int),
- ("names", POINTER(c_char_p))]
-
-
- #lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL)
- #lib = CDLL("darknet.so", RTLD_GLOBAL)
- hasGPU = True
- if os.name == "nt":
- cwd = os.path.dirname(__file__)
- os.environ['PATH'] = cwd + ';' + os.environ['PATH']
- winGPUdll = os.path.join(cwd, "yolo_cpp_dll.dll")
- #print(winGPUdll)
- winNoGPUdll = os.path.join(cwd, "yolo_cpp_dll_nogpu.dll")
- envKeys = list()
- for k, v in os.environ.items():
- envKeys.append(k)
- try:
- try:
- tmp = os.environ["FORCE_CPU"].lower()
- if tmp in ["1", "true", "yes", "on"]:
- raise ValueError("ForceCPU")
- else:
- pass#print("Flag value '"+tmp+"' not forcing CPU mode")
- except KeyError:
- # We never set the flag
- if 'CUDA_VISIBLE_DEVICES' in envKeys:
- if int(os.environ['CUDA_VISIBLE_DEVICES']) < 0:
- raise ValueError("ForceCPU")
- try:
- global DARKNET_FORCE_CPU
- if DARKNET_FORCE_CPU:
- raise ValueError("ForceCPU")
- except NameError:
- pass
- # #print(os.environ.keys())
- # #print("FORCE_CPU flag undefined, proceeding with GPU")
- if not os.path.exists(winGPUdll):
- raise ValueError("NoDLL")
- lib = CDLL(winGPUdll, RTLD_GLOBAL)
- except (KeyError, ValueError):
- hasGPU = False
- if os.path.exists(winNoGPUdll):
- lib = CDLL(winNoGPUdll, RTLD_GLOBAL)
- #print("Notice: CPU-only mode")
- else:
- # Try the other way, in case no_gpu was
- # compile but not renamed
- lib = CDLL(winGPUdll, RTLD_GLOBAL)
- #print("Environment variables indicated a CPU run, but we didn't find `" +
- # winNoGPUdll+"`. Trying a GPU run anyway.")
- else:
- lib = CDLL("/root/darknet/libdarknet.so", RTLD_GLOBAL)
- lib.network_width.argtypes = [c_void_p]
- lib.network_width.restype = c_int
- lib.network_height.argtypes = [c_void_p]
- lib.network_height.restype = c_int
-
- predict = lib.network_predict
- predict.argtypes = [c_void_p, POINTER(c_float)]
- predict.restype = POINTER(c_float)
-
- if hasGPU:
- set_gpu = lib.cuda_set_device
- set_gpu.argtypes = [c_int]
-
- make_image = lib.make_image
- make_image.argtypes = [c_int, c_int, c_int]
- make_image.restype = IMAGE
-
- get_network_boxes = lib.get_network_boxes
- get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(
- c_int), c_int, POINTER(c_int), c_int]
- get_network_boxes.restype = POINTER(DETECTION)
-
- make_network_boxes = lib.make_network_boxes
- make_network_boxes.argtypes = [c_void_p]
- make_network_boxes.restype = POINTER(DETECTION)
-
- free_detections = lib.free_detections
- free_detections.argtypes = [POINTER(DETECTION), c_int]
-
- free_ptrs = lib.free_ptrs
- free_ptrs.argtypes = [POINTER(c_void_p), c_int]
-
- network_predict = lib.network_predict
- network_predict.argtypes = [c_void_p, POINTER(c_float)]
-
- reset_rnn = lib.reset_rnn
- reset_rnn.argtypes = [c_void_p]
-
- load_net = lib.load_network
- load_net.argtypes = [c_char_p, c_char_p, c_int]
- load_net.restype = c_void_p
-
- load_net_custom = lib.load_network_custom
- load_net_custom.argtypes = [c_char_p, c_char_p, c_int, c_int]
- load_net_custom.restype = c_void_p
-
- do_nms_obj = lib.do_nms_obj
- do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
-
- do_nms_sort = lib.do_nms_sort
- do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
-
- free_image = lib.free_image
- free_image.argtypes = [IMAGE]
-
- letterbox_image = lib.letterbox_image
- letterbox_image.argtypes = [IMAGE, c_int, c_int]
- letterbox_image.restype = IMAGE
-
- load_meta = lib.get_metadata
- lib.get_metadata.argtypes = [c_char_p]
- lib.get_metadata.restype = METADATA
-
- load_image = lib.load_image_color
- load_image.argtypes = [c_char_p, c_int, c_int]
- load_image.restype = IMAGE
-
- rgbgr_image = lib.rgbgr_image
- rgbgr_image.argtypes = [IMAGE]
-
- predict_image = lib.network_predict_image
- predict_image.argtypes = [c_void_p, IMAGE]
- predict_image.restype = POINTER(c_float)
-
-
- def array_to_image(arr):
- import numpy as np
- # need to return old values to avoid python freeing memory
- arr = arr.transpose(2, 0, 1)
- c = arr.shape[0]
- h = arr.shape[1]
- w = arr.shape[2]
- arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
- data = arr.ctypes.data_as(POINTER(c_float))
- im = IMAGE(w, h, c, data)
- return im, arr
-
-
- def classify(net, meta, im):
- out = predict_image(net, im)
- res = []
- for i in range(meta.classes):
- if altNames is None:
- nameTag = meta.names[i]
- else:
- nameTag = altNames[i]
- res.append((nameTag, out[i]))
- res = sorted(res, key=lambda x: -x[1])
- return res
-
-
- def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45, debug=False):
- """
- Performs the detection
- """
- custom_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
- custom_image = cv2.resize(custom_image, (lib.network_width(
- net), lib.network_height(net)), interpolation=cv2.INTER_LINEAR)
- im, arr = array_to_image(custom_image)
- num = c_int(0)
- pnum = pointer(num)
- predict_image(net, im)
- dets = get_network_boxes(
- net, image.shape[1], image.shape[0], thresh, hier_thresh, None, 0, pnum, 0)
- num = pnum[0]
- if nms:
- do_nms_sort(dets, num, meta.classes, nms)
- res = []
- if debug:
- pass
- #print("about to range")
- for j in range(num):
- for i in range(meta.classes):
- try:
- if dets[j].prob[i] > 0:
- b = dets[j].bbox
- if altNames is None:
- nameTag = meta.names[i]
- else:
- nameTag = altNames[i]
- res.append((nameTag, dets[j].prob[i], (b.x, b.y, b.w, b.h), i))
- except ValueError as e:
- #print(e)
- pass
- res = sorted(res, key=lambda x: -x[1])
- free_detections(dets, num)
- return res
-
-
- netMain = None
- metaMain = None
- altNames = None
-
-
- def getObjectDepth(depth, bounds):
- area_div = 2
-
- x_vect = []
- y_vect = []
- z_vect = []
-
- for j in range(int(bounds[0] - area_div), int(bounds[0] + area_div)):
- for i in range(int(bounds[1] - area_div), int(bounds[1] + area_div)):
- z = depth[i, j, 2]
- if not np.isnan(z) and not np.isinf(z):
- x_vect.append(depth[i, j, 0])
- y_vect.append(depth[i, j, 1])
- z_vect.append(z)
- try:
- x = statistics.median(x_vect)
- y = statistics.median(y_vect)
- z = statistics.median(z_vect)
- except Exception:
- x = -1
- y = -1
- z = -1
- pass
-
- return x, y, z
-
-
- def generateColor(metaPath):
- random.seed(42)
- f = open(metaPath, 'r')
- content = f.readlines()
- class_num = int(content[0].split("=")[1])
- color_array = []
- for x in range(0, class_num):
- color_array.append((randint(0, 255), randint(0, 255), randint(0, 255)))
- return color_array
-
-
- def main(argv):
-
- thresh = 0.25
- configPath = "/sources/cfg.cfg"
- weightPath = "/sources/mailsys.weight"
- metaPath = "/sources/data.data"
- svoPath = None
-
- help_str = 'darknet_zed.py -c <config> -w <weight> -m <meta> -t <threshold> -s <svo_file> -o <output_path>'
- try:
- opts, args = getopt.getopt(
- argv, "hc:w:m:t:s:o:", ["config=", "weight=", "meta=", "threshold=", "svo_file=", "output_path="])
- except getopt.GetoptError:
- print (help_str)
- sys.exit(2)
- for opt, arg in opts:
- if opt == '-h':
- print (help_str)
- sys.exit()
- elif opt in ("-c", "--config"):
- configPath = arg
- elif opt in ("-w", "--weight"):
- weightPath = arg
- elif opt in ("-m", "--meta"):
- metaPath = arg
- elif opt in ("-t", "--threshold"):
- thresh = float(arg)
- elif opt in ("-s", "--svo_file"):
- svoPath = arg
- elif opt in ("-o", "--output_path"):
- output_path = arg
-
- init = sl.InitParameters()
- init.coordinate_units = sl.UNIT.METER
-
-
-
- if svoPath is not None:
- init.set_from_svo_file(svoPath)
-
- dirname = os.path.splitext(svoPath)[0]
- dir, filename = os.path.split(dirname)
- #print(output_path)
- #print(filename)
-
- #try:
- # if not (os.path.isdir(output_path)):
- # os.makedirs(os.path.join(output_path))
- #except OSError as e:
- # #print(e)
-
-
- cam = sl.Camera()
- if not cam.is_opened():
- #print("Opening ZED Camera...")
- pass
- status = cam.open(init)
- if status != sl.ERROR_CODE.SUCCESS:
- #print(repr(status))
- exit()
-
- runtime = sl.RuntimeParameters()
- # Use STANDARD sensing mode
- runtime.sensing_mode = sl.SENSING_MODE.STANDARD
- mat = sl.Mat()
- point_cloud_mat = sl.Mat()
-
- # Import the global variables. This lets us instance Darknet once, then just call performDetect() again without instancing again
- global metaMain, netMain, altNames # pylint: disable=W0603
- assert 0 < thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
- if not os.path.exists(configPath):
- raise ValueError("Invalid config path `" +
- os.path.abspath(configPath)+"`")
- if not os.path.exists(weightPath):
- raise ValueError("Invalid weight path `" +
- os.path.abspath(weightPath)+"`")
- if not os.path.exists(metaPath):
- raise ValueError("Invalid data file path `" +
- os.path.abspath(metaPath)+"`")
- if netMain is None:
- netMain = load_net_custom(configPath.encode(
- "ascii"), weightPath.encode("ascii"), 0, 1) # batch size = 1
- if metaMain is None:
- metaMain = load_meta(metaPath.encode("ascii"))
- if altNames is None:
- # In thon 3, the metafile default access craps out on Windows (but not Linux)
- # Read the names file and create a list to feed to detect
- try:
- with open(metaPath) as metaFH:
- metaContents = metaFH.read()
- import re
- match = re.search("names *= *(.*)$", metaContents,
- re.IGNORECASE | re.MULTILINE)
- if match:
- result = match.group(1)
- else:
- result = None
- try:
- if os.path.exists(result):
- with open(result) as namesFH:
- namesList = namesFH.read().strip().split("\n")
- altNames = [x.strip() for x in namesList]
- except TypeError:
- pass
- except Exception:
- pass
-
- color_array = generateColor(metaPath)
-
- #print("Running...")
- #df1 = df(data={'frame': [], 'label': [], 'x': [], 'y': [], 'depth': []})
- df1 = df(data={'x': [], 'y': [], 'z': [], 'frame': []})
- #print(df1)
- start = time.time()
- key = ''
- count = 0
- frame = 1
-
- #image_size = cam.get_resolution()
- #width = image_size.width
- #height = image_size.height
- #width_sbs = width * 2
-
- # Prepare side by side image container equivalent to CV_8UC4
- #svo_image_sbs_rgba = np.zeros((height, width_sbs, 4), dtype=np.uint8)
- fourcc = cv2.VideoWriter_fourcc('M', '4', 'S', '2')
-
- # 컬러 영상 저장시
- ##print(cam.get_camera_fps(), "!!!!!!!!!!!")
- #video_path = os.path.join(output_path, filename)
- #writer = cv2.VideoWriter(str(video_path)+"_output.avi", fourcc, cam.get_camera_fps(), (width, height))
- ##print(video_path)
- ##print(writer)
- while key != 113: # for 'q' key
- err = cam.grab(runtime)
- if err == sl.ERROR_CODE.SUCCESS:
- cam.retrieve_image(mat, sl.VIEW.LEFT)
- image = mat.get_data()
-
- cam.retrieve_measure(
- point_cloud_mat, sl.MEASURE.XYZRGBA)
- depth = point_cloud_mat.get_data()
-
- # Do the detection
- detections = detect(netMain, metaMain, image, thresh)
-
- #print(chr(27) + "[2J"+"**** " +
- # str(len(detections)) + " Results ****")
- frame += 1
- for detection in detections:
- label = detection[0]
- confidence = detection[1]
- pstring = label+": "+str(np.rint(100 * confidence))+"%"
- #print(pstring)
- bounds = detection[2]
- yExtent = int(bounds[3])
- xEntent = int(bounds[2])
- # Coordinates are around the center
- xCoord = int(bounds[0] - bounds[2]/2)
- yCoord = int(bounds[1] - bounds[3]/2)
- boundingBox = [[xCoord, yCoord], [xCoord, yCoord + yExtent], [xCoord + xEntent, yCoord + yExtent], [xCoord + xEntent, yCoord] ]
- thickness = 1
- x, y, z = getObjectDepth(depth, bounds)
- distance_xyz = math.sqrt(x * x + y * y + z * z)
-
- distance = "{:.4f}".format(distance_xyz)
-
- #print(label, distance, xCoord, yCoord)
- cv2.rectangle(image, (xCoord-thickness, yCoord-thickness), (xCoord + xEntent+thickness, yCoord+(18 +thickness*4)), color_array[detection[3]], -1)
- cv2.putText(image, label + " " + (str(distance) + " m"), (xCoord+(thickness*4), yCoord+(10 +thickness*4)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 2)
- cv2.rectangle(image, (xCoord-thickness, yCoord-thickness), (xCoord + xEntent+thickness, yCoord + yExtent+thickness), color_array[detection[3]], int(thickness*2))
- x = xCoord-thickness
- x1 = xCoord + xEntent+thickness
- y = yCoord-thickness
- y1 = yCoord + yExtent+thickness
-
-
- #If need normalize, erase # and change df1.loc[count]
- #norm_x = (x + x1) / 2 / WIDTH
- #norm_y = (y + y1) / 2 / HEIGHT
- #norm_z = distance_xyz / DEPTH_MAX
-
-
-
- df1.loc[count] = [int(xCoord), int(yCoord), distance_xyz, int(frame)]
- #df1.columns = ['idx','frame','label','x','y','depth']
- cv2.line(image, (int((x+x1)/2), int((y+y1)/2)), (int((x+x1)/2), int((y+y1)/2)), (0, 0, 255), 10)
-
- count += 1
- image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
- #print(frame)
- image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
- #writer.write(image)
- # cv2.imshow("ZED", image)
- # key = cv2.waitKey(5)
- # #print(cam.get_svo_number_of_frames())
- if frame == cam.get_svo_number_of_frames():
- break
- else:
- #print('something happend')
- #key = cv2.waitKey(5)
- pass
- #print("time :", time.time() - start)
-
- cam.close()
- #writer.release()
- #cv2.destroyAllWindows()
- #print("\nFINISH")
- ##print(df1)
-
- df1.to_csv(output_path, index=False)
- #print("csv save")
-
-
- if __name__ == "__main__":
-
- #print(sys.argv)
- main(sys.argv[1:])
|