大图切割为小图(这个博主的链接我实在找不到了,各位朋友如有发现一定告诉我,定加上转载)
import os import cv2 as cv import argparse import numpy as np import cv2 weightsPath="F:/Python/ModelArts/yolov3.weights" configPath="F:/Python/ModelArts/darknet-master/cfg/yolov3.cfg" labelsPath="F:/Python/ModelArts/darknet-master/data/coco.names" rootdir = r"F:\Python\ModelArts\test1/frames1" #图像读取地址 savepath = "F:/Python/ModelArts/image/output/test2" # 图像保存地址 #初始化一些参数 LABELS = open(labelsPath).read().strip().split("\n") #物体类别 COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),dtype="uint8")#颜色 filelist = os.listdir(rootdir) # 打开对应的文件夹 total_num = len(filelist) #得到文件夹中图像的个数 print(total_num) # 如果输出的文件夹不存在,创建即可 if not os.path.isdir(savepath): os.makedirs(savepath) for(dirpath,dirnames,filenames) in os.walk(rootdir): for filename in filenames: # 必须将boxes在遍历新的图片后初始化 boxes = [] confidences = [] classIDs = [] net = cv2.dnn.readNetFromDarknet(configPath, weightsPath) path = os.path.join(dirpath,filename) #print(path) image = cv.imread(path) #print(image) (H, W) = image.shape[:2] # 得到 YOLO需要的输出层 ln = net.getLayerNames() ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()] #从输入图像构造一个blob,然后通过加载的模型,给我们提供边界框和相关概率 blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),swapRB=True, crop=False) net.setInput(blob) layerOutputs = net.forward(ln) #在每层输出上循环 for output in layerOutputs: # 对每个检测进行循环 for detection in output: scores = detection[5:] classID = np.argmax(scores) confidence = scores[classID] #过滤掉那些置信度较小的检测结果 if confidence > 0.9: #框后接框的宽度和高度 box = detection[0:4] * np.array([W, H, W, H]) (centerX, centerY, width, height) = box.astype("int") #边框的左上角 x = int(centerX - (width / 2)) y = int(centerY - (height / 2)) # 更新检测出来的框 # 批量检测图片注意此处的boxes在每一次遍历的时候要初始化,否则检测出来的图像框会叠加 boxes.append([x, y, int(width), int(height)]) #print(boxes) confidences.append(float(confidence)) #print(confidences) classIDs.append(classID) print('boxes:',boxes) print('confidences:',confidences) print(type(boxes),type(confidences)) # 极大值抑制 idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5,0.3) k = -1 if len(idxs) > 0: # for k in range(0,len(boxes)): for i in idxs.flatten() : (x, y) = (boxes[i][0], boxes[i][1]) (w, h) = (boxes[i][2], boxes[i][3]) # 在原图上绘制边框和类别 color = [int(c) for c in COLORS[classIDs[i]]] # image是原图, 左上点坐标, 右下点坐标, 颜色, 画线的宽度 cv2.rectangle(image, (x, y), (x + w, y + h), color, 2) text = "{}: {:.3f}".format(LABELS[classIDs[i]], confidences[i]) print('type:',LABELS[classIDs[i]]) savepath = "F:/Python/ModelArts/image/output/test2" # 图像保存地址 savepath=savepath+'/'+LABELS[classIDs[i]] # 如果输出的文件夹不存在,创建即可 if not os.path.isdir(savepath): os.makedirs(savepath) # 各参数依次是:图片,添加的文字,左上角坐标(整数),字体, 字体大小,颜色,字体粗细 cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2) # 图像裁剪注意坐标要一一对应 # 图片裁剪 裁剪区域【Ly:Ry,Lx:Rx】 cut = image[y:(y+h), x:(x + w)] #print(type(cut)) if cut.size != 0: # boxes的长度即为识别出来的车辆个数,利用boxes的长度来定义裁剪后车辆的路径名称 if k < len(boxes): k = k+1 # 从字母a开始每次+1 t = chr(ord("a")+k) print(filename) print(filename.split(".")[0]+"_"+t+".jpg") cv2.imwrite(savepath+"/"+filename.split(".")[0]+"_"+t+".jpg",cut)