如何解决我如何在控制台中打印边界框的坐标并检查此框是否在yolov3中的特定象限中
我正在使用预训练的物体检测器,并且正在使用它来检测鸟,狗和猫,但是我想知道的是,如果边界框位于其中一个象限中,那么如何打印到控制台,例如,如果边界框在控制台的第一个象限打印中,则该框在第一个象限中。
这是因为将来我将使它成为我自己的自定义对象检测器,并且我正在练习。我希望你能帮助我。
这是我拥有的代码和象限的图像:
import numpy as np
import cv2
# load the image to detect,get width,height
img_to_detect = cv2.imread('images/testing/cat_dog_bird.jpg')
img_height = img_to_detect.shape[0]
img_width = img_to_detect.shape[1]
# convert to blob to pass into model
img_blob = cv2.dnn.blobFromImage(img_to_detect,0.003922,(416,416),swapRB=True,crop=False)
#recommended by yolo authors,scale factor is 0.003922=1/255,width,height of blob is 320,320
#accepted sizes are 320×320,416×416,609×609. More size means more accuracy but less speed
# set of 80 class labels
class_labels = ["person","bicycle","car","motorcycle","airplane","bus","train","truck","boat","trafficlight","firehydrant","stopsign","parkingmeter","bench","bird","cat","dog","horse","sheep","cow","elephant","bear","zebra","giraffe","backpack","umbrella","handbag","tie","suitcase","frisbee","skis","snowboard","sportsball","kite","baseballbat","baseballglove","skateboard","surfboard","tennisracket","bottle","wineglass","cup","fork","knife","spoon","bowl","banana","apple","sandwich","orange","broccoli","carrot","hotdog","pizza","donut","cake","chair","sofa","pottedplant","bed","diningtable","toilet","tvmonitor","laptop","mouse","remote","keyboard","cellphone","microwave","oven","toaster","sink","refrigerator","book","clock","vase","scissors","teddybear","hairdrier","toothbrush"]
#Declare List of colors as an array
#Green,Blue,Red,cyan,yellow,purple
#Split based on ',' and for every split,change type to int
#convert that to a numpy array to apply color mask to the image numpy array
class_colors = ["0,255,0","0,255","255,255"]
class_colors = [np.array(every_color.split(",")).astype("int") for every_color in class_colors]
class_colors = np.array(class_colors)
class_colors = np.tile(class_colors,(16,1))
# Loading pretrained model
yolo_model = cv2.dnn.readNetFromDarknet('modelo/yolov4.cfg','modelo/yolov4.weights')
# Get all layers from the yolo network
yolo_layers = yolo_model.getLayerNames()
# Loop and find the last layer (output layer) of the yolo network
yolo_output_layer = [yolo_layers[yolo_layer[0] - 1] for yolo_layer in yolo_model.getUnconnectedOutLayers()]
# input preprocessed blob into model and pass through the model
# pasarle el blob al modelo
yolo_model.setInput(img_blob)
# obtain the detection layers by forwarding through till the output layer
#aqui se le esta pasando el valor de la capa de salida **yolo_output_layer** al modelo con la funcion forward
obj_detection_layers = yolo_model.forward(yolo_output_layer)
# initialization for non-max suppression (NMS)
# declare list for [class id],[box center,width & height[],[confidences]
class_ids_list = []
boxes_list = []
confidences_list = []
cont=0
# loop over each of the layer outputs
#bucle para cada una de las capas de salida
for object_detection_layer in obj_detection_layers:
# loop over the detections
for object_detection in object_detection_layer:
all_scores = object_detection[5:]
predicted_class_id = np.argmax(all_scores)
prediction_confidence = all_scores[predicted_class_id]
# take only predictions with confidence more than 20%
if prediction_confidence > 0.40:
#get the predicted label
predicted_class_label = class_labels[predicted_class_id]
#obtain the bounding box co-oridnates for actual image from resized image size
bounding_box = object_detection[0:4] * np.array([img_width,img_height,img_width,img_height])
(box_center_x_pt,box_center_y_pt,box_width,box_height) = bounding_box.astype("int")
start_x_pt = int(box_center_x_pt - (box_width / 2))
start_y_pt = int(box_center_y_pt - (box_height / 2))
#save class id,start x,y,width & height,confidences in a list for nms processing
#make sure to pass confidence as float and width and height as integers
class_ids_list.append(predicted_class_id)
confidences_list.append(float(prediction_confidence))
boxes_list.append([start_x_pt,start_y_pt,int(box_width),int(box_height)])
# Applying the NMS will return only the selected max value ids while suppressing the non maximum (weak) overlapping bounding boxes
# Non-Maxima Suppression confidence set as 0.5 & max_suppression threhold for NMS as 0.4 (adjust and try for better perfomance)
max_value_ids = cv2.dnn.NMSBoxes(boxes_list,confidences_list,0.5,0.4)
# loop through the final set of detections remaining after NMS and draw bounding box and write text
contD=0
contC=0
contB=0
for max_valueid in max_value_ids:
max_class_id = max_valueid[0]
box = boxes_list[max_class_id]
start_x_pt = box[0]
start_y_pt = box[1]
box_width = box[2]
box_height = box[3]
#get the predicted class id and label
predicted_class_id = class_ids_list[max_class_id]
predicted_class_label = class_labels[predicted_class_id]
prediction_confidence = confidences_list[max_class_id]
end_x_pt = start_x_pt + box_width
end_y_pt = start_y_pt + box_height
#get a random mask color from the numpy array of colors
box_color = class_colors[predicted_class_id]
#convert the color numpy array as a list and apply to text and box
box_color = [int(c) for c in box_color]
#object counters
if predicted_class_label == class_labels[14]:
contB+=1
elif predicted_class_label == class_labels[16]:
contD+=1
elif predicted_class_label == class_labels[15]:
contC+=1
# print the prediction in console
predicted_class_label = "{}: {:.2f}%".format(predicted_class_label,prediction_confidence * 100)
print("predicted object {}".format(predicted_class_label))
# draw rectangle and text in the image
cv2.rectangle(img_to_detect,(start_x_pt,start_y_pt),(end_x_pt,end_y_pt),box_color,1)
cv2.putText(img_to_detect,predicted_class_label,start_y_pt-5),cv2.FONT_HERSHEY_SIMPLEX,1)
cv2.imshow("Detection Output",img_to_detect)
print("\n")
if contB > 0 and contD > 0 and contC > 0:
print("This Photo has a dog,a cat and a bird")
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。