SSD-tensorflow-1 demo


一、簡易識別

用最簡單的已訓練好的模型對20類目標做檢測。

你電腦的tensorflow + CUDA + CUDNN環境都是OK的, 同時python需要安裝cv2庫

{      'aeroplane'    'bicycle'    'bird'     'boat'    'bottle'    'bus'    'car'    'cat'    'chair'    'cow'    'diningtable'    'dog'    'horse'    'motorbike'    'person'    'pottedplant'    'sheep'    'sofa'    'train'    'tvmonitor' }

代碼地址 

下載好代碼后,在checkpoints文件夾下面由ssd_300_vgg.ckpt,直接解壓到當前文件夾。

找到notebooks文件夾里的ssd_notebook.ipynb,用jupyter打開。

將讀入的圖片改為自己的圖片就行了(更改path或者將自己的圖片放到demo文件夾下面),然后運行所有cells。

  • path為你想要進行測試的圖片目錄(代碼中只對該目錄下的最后一個文件夾進行測試,如果要想測試多幅圖片或者做成視頻的方式,需大家自行修改代碼)

 二、demo2

notebooks文件夾下,建立demo_test.py文件,在demo_test.py文件內寫入如下代碼后,直接運行demo_test.py(以下代碼也是notebooks文件夾ssd_tests.ipynb內的代碼,可以用notebook讀取;我只是做了一些小改動)

# -*- coding:utf-8 -*-
# -*- author:zzZ_CMing  CSDN address:https://blog.csdn.net/zzZ_CMing
# -*- 2018/07/14; 15:19
# -*- python3.5
"""
address: https://blog.csdn.net/qq_35608277/article/details/78660469
本文代碼來自於github中微軟官方倉庫
"""
import os
import cv2
import math
import random
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.cm as mpcm
import matplotlib.image as mpimg
from notebooks import visualization
from nets import ssd_vgg_300, ssd_common, np_methods
from preprocessing import ssd_vgg_preprocessing
import sys

# 當引用模塊和運行的腳本不在同一個目錄下,需在腳本開頭添加如下代碼:
sys.path.append('./SSD-Tensorflow/')

slim = tf.contrib.slim

# TensorFlow session
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)
isess = tf.InteractiveSession(config=config)

l_VOC_CLASS = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
               'bus', 'car', 'cat', 'chair', 'cow',
               'diningTable', 'dog', 'horse', 'motorbike', 'person',
               'pottedPlant', 'sheep', 'sofa', 'train', 'TV']

# 定義數據格式,設置占位符
net_shape = (300, 300)
# 預處理,以Tensorflow backend, 將輸入圖片大小改成 300x300,作為下一步輸入
img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
# 輸入圖像的通道排列形式,'NHWC'表示 [batch_size,height,width,channel]
data_format = 'NHWC'

# 數據預處理,將img_input輸入的圖像resize為300大小,labels_pre,bboxes_pre,bbox_img待解析
image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(
    img_input, None, None, net_shape, data_format,
    resize=ssd_vgg_preprocessing.Resize.WARP_RESIZE)
# 拓展為4維變量用於輸入
image_4d = tf.expand_dims(image_pre, 0)

# 定義SSD模型
# 是否復用,目前我們沒有在訓練所以為None
reuse = True if 'ssd_net' in locals() else None
# 調出基於VGG神經網絡的SSD模型對象,注意這是一個自定義類對象
ssd_net = ssd_vgg_300.SSDNet()
# 得到預測類和預測坐標的Tensor對象,這兩個就是神經網絡模型的計算流程
with slim.arg_scope(ssd_net.arg_scope(data_format=data_format)):
    predictions, localisations, _, _ = ssd_net.net(image_4d, is_training=False, reuse=reuse)

# 導入官方給出的 SSD 模型參數
ckpt_filename = '../checkpoints/ssd_300_vgg.ckpt'
# ckpt_filename = '../checkpoints/VGG_VOC0712_SSD_300x300_ft_iter_120000.ckpt'
isess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(isess, ckpt_filename)

# 在網絡模型結構中,提取搜索網格的位置
# 根據模型超參數,得到每個特征層(這里用了6個特征層,分別是4,7,8,9,10,11)的anchors_boxes
ssd_anchors = ssd_net.anchors(net_shape)
"""
每層的anchors_boxes包含4個arrayList,前兩個List分別是該特征層下x,y坐標軸對於原圖(300x300)大小的映射
第三,四個List為anchor_box的長度和寬度,同樣是經過歸一化映射的,根據每個特征層box數量的不同,這兩個List元素
個數會變化。其中,長寬的值根據超參數anchor_sizes和anchor_ratios制定。
"""


# 加載輔助作圖函數
def colors_subselect(colors, num_classes=21):
    dt = len(colors) // num_classes
    sub_colors = []
    for i in range(num_classes):
        color = colors[i * dt]
        if isinstance(color[0], float):
            sub_colors.append([int(c * 255) for c in color])
        else:
            sub_colors.append([c for c in color])
    return sub_colors


def bboxes_draw_on_img(img, classes, scores, bboxes, colors, thickness=2):
    shape = img.shape
    for i in range(bboxes.shape[0]):
        bbox = bboxes[i]
        color = colors[classes[i]]
        # Draw bounding box...
        p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
        p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
        cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
        # Draw text...
        s = '%s/%.3f' % (l_VOC_CLASS[int(classes[i]) - 1], scores[i])
        p1 = (p1[0] - 5, p1[1])
        # cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 1.5, color, 3)


colors_plasma = colors_subselect(mpcm.plasma.colors, num_classes=21)


# 主流程函數
def process_image(img, case, select_threshold=0.15, nms_threshold=.1, net_shape=(300, 300)):
    # select_threshold:box閾值——每個像素的box分類預測數據的得分會與box閾值比較,高於一個box閾值則認為這個box成功框到了一個對象
    # nms_threshold:重合度閾值——同一對象的兩個框的重合度高於該閾值,則運行下面去重函數

    # 執行SSD模型,得到4維輸入變量,分類預測,坐標預測,rbbox_img參數為最大檢測范圍,本文固定為[0,0,1,1]即全圖
    rimg, rpredictions, rlocalisations, rbbox_img = isess.run([image_4d, predictions,
                                                               localisations, bbox_img], feed_dict={img_input: img})

    # ssd_bboxes_select()函數根據每個特征層的分類預測分數,歸一化后的映射坐標,
    # ancohor_box的大小,通過設定一個閾值計算得到每個特征層檢測到的對象以及其分類和坐標
    rclasses, rscores, rbboxes = np_methods.ssd_bboxes_select(rpredictions, rlocalisations, ssd_anchors,
                                                              select_threshold=select_threshold,
                                                              img_shape=net_shape,
                                                              num_classes=21, decode=True)

    """
    這個函數做的事情比較多,這里說的細致一些:
    首先是輸入,輸入的數據為每個特征層(一共6個,見上文)的:
                                                rpredictions: 分類預測數據,
                                                rlocalisations: 坐標預測數據,
                                                ssd_anchors: anchors_box數據
                                            其中:
                                               分類預測數據為當前特征層中每個像素的每個box的分類預測
                                               坐標預測數據為當前特征層中每個像素的每個box的坐標預測
                                               anchors_box數據為當前特征層中每個像素的每個box的修正數據

        函數根據坐標預測數據和anchors_box數據,計算得到每個像素的每個box的中心和長寬,這個中心坐標和長寬會根據一個算法進行些許的修正,
    從而得到一個更加准確的box坐標;修正的算法會在后文中詳細解釋,如果只是為了理解算法流程也可以不必深究這個,因為這個修正算法屬於經驗算
    法,並沒有太多邏輯可循。
        修正完box和中心后,函數會計算每個像素的每個box的分類預測數據的得分,當這個分數高於一個閾值(這里是0.5)則認為這個box成功
    框到了一個對象,然后將這個box的坐標數據,所屬分類和分類得分導出,從而得到:
        rclasses:所屬分類
        rscores:分類得分
        rbboxes:坐標

        最后要注意的是,同一個目標可能會在不同的特征層都被檢測到,並且他們的box坐標會有些許不同,這里並沒有去掉重復的目標,而是在下文
    中專門用了一個函數來去重
    """

    # 檢測有沒有超出檢測邊緣
    rbboxes = np_methods.bboxes_clip(rbbox_img, rbboxes)
    rclasses, rscores, rbboxes = np_methods.bboxes_sort(rclasses, rscores, rbboxes, top_k=400)
    # 去重,將重復檢測到的目標去掉
    rclasses, rscores, rbboxes = np_methods.bboxes_nms(rclasses, rscores, rbboxes, nms_threshold=nms_threshold)
    # 將box的坐標重新映射到原圖上(上文所有的坐標都進行了歸一化,所以要逆操作一次)
    rbboxes = np_methods.bboxes_resize(rbbox_img, rbboxes)

    if case == 1:
        bboxes_draw_on_img(img, rclasses, rscores, rbboxes, colors_plasma, thickness=8)
        return img
    else:
        return rclasses, rscores, rbboxes


"""
# 只做目標定位,不做預測分析
case = 1
img = cv2.imread("../demo/person.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(process_image(img, case))
plt.show()
"""
# 做目標定位,同時做預測分析
case = 2
path = '../demo/person.jpg'
# 讀取圖片
img = mpimg.imread(path)
# 執行主流程函數
rclasses, rscores, rbboxes = process_image(img, case)
# visualization.bboxes_draw_on_img(img, rclasses, rscores, rbboxes, visualization.colors_plasma)
# 顯示分類結果圖
visualization.plt_bboxes(img, rclasses, rscores, rbboxes), rscores, rbboxes

會得到如下圖示,如圖已經成功的把物體標注出來,每個標記框中前一個數是標簽項,后一個是預測的准確率;

# 標簽項與其對應的標簽內容
dict = {1:'aeroplane', 2:'bicycle', 3:'bird', 4:'boat', 5:'bottle',
6:'bus', 7:'car', 8:'cat', 9:'chair', 10:'cow',
11:'diningTable', 12:'dog', 13:'horse', 14:'motorbike', 15:'person',
16:'pottedPlant', 17:'sheep', 18:'sofa', 19:'train', 20:'TV'}

三、demo3 視頻定位檢測

以上demo文件夾內都只是圖片,如果你想在視頻中標記物體——首先你需要拍一段視頻,建議不要太長不然你要跑很久,然后需要在主目錄下建立Video文件夾,在其下建立inputoutput文件夾,如下圖所示:

再將拍攝的視頻存入input文件夾下,注意視頻的名稱哦!最后在主目錄下建立demo_Video.py文件,存入如下代碼,運行demo_Video.py

請注意:166行的文件名要與文件夾視頻名一致
請注意:166行的文件名要與文件夾視頻名一致

  1 # -*- coding:utf-8 -*-
  2 # -*- author:zzZ_CMing  CSDN address:https://blog.csdn.net/zzZ_CMing
  3 # -*- 2018/07/09; 15:19
  4 # -*- python3.5
  5 import os
  6 import cv2
  7 import math
  8 import random
  9 import tensorflow as tf
 10 import matplotlib.pyplot as plt
 11 import matplotlib.cm as mpcm
 12 import matplotlib.image as mpimg
 13 from notebooks import visualization
 14 from nets import ssd_vgg_300, ssd_common, np_methods
 15 from preprocessing import ssd_vgg_preprocessing
 16 import sys
 17 
 18 # 當引用模塊和運行的腳本不在同一個目錄下,需在腳本開頭添加如下代碼:
 19 sys.path.append('./SSD-Tensorflow/')
 20 
 21 slim = tf.contrib.slim
 22 
 23 # TensorFlow session
 24 gpu_options = tf.GPUOptions(allow_growth=True)
 25 config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)
 26 isess = tf.InteractiveSession(config=config)
 27 
 28 l_VOC_CLASS = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
 29                'bus', 'car', 'cat', 'chair', 'cow',
 30                'diningTable', 'dog', 'horse', 'motorbike', 'person',
 31                'pottedPlant', 'sheep', 'sofa', 'train', 'TV']
 32 
 33 # 定義數據格式,設置占位符
 34 net_shape = (300, 300)
 35 # 預處理,以Tensorflow backend, 將輸入圖片大小改成 300x300,作為下一步輸入
 36 img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
 37 # 輸入圖像的通道排列形式,'NHWC'表示 [batch_size,height,width,channel]
 38 data_format = 'NHWC'
 39 
 40 # 數據預處理,將img_input輸入的圖像resize為300大小,labels_pre,bboxes_pre,bbox_img待解析
 41 image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(
 42     img_input, None, None, net_shape, data_format,
 43     resize=ssd_vgg_preprocessing.Resize.WARP_RESIZE)
 44 # 拓展為4維變量用於輸入
 45 image_4d = tf.expand_dims(image_pre, 0)
 46 
 47 # 定義SSD模型
 48 # 是否復用,目前我們沒有在訓練所以為None
 49 reuse = True if 'ssd_net' in locals() else None
 50 # 調出基於VGG神經網絡的SSD模型對象,注意這是一個自定義類對象
 51 ssd_net = ssd_vgg_300.SSDNet()
 52 # 得到預測類和預測坐標的Tensor對象,這兩個就是神經網絡模型的計算流程
 53 with slim.arg_scope(ssd_net.arg_scope(data_format=data_format)):
 54     predictions, localisations, _, _ = ssd_net.net(image_4d, is_training=False, reuse=reuse)
 55 
 56 # 導入官方給出的 SSD 模型參數
 57 ckpt_filename = '../checkpoints/ssd_300_vgg.ckpt'
 58 # ckpt_filename = '../checkpoints/VGG_VOC0712_SSD_300x300_ft_iter_120000.ckpt'
 59 isess.run(tf.global_variables_initializer())
 60 saver = tf.train.Saver()
 61 saver.restore(isess, ckpt_filename)
 62 
 63 # 在網絡模型結構中,提取搜索網格的位置
 64 # 根據模型超參數,得到每個特征層(這里用了6個特征層,分別是4,7,8,9,10,11)的anchors_boxes
 65 ssd_anchors = ssd_net.anchors(net_shape)
 66 """
 67 每層的anchors_boxes包含4個arrayList,前兩個List分別是該特征層下x,y坐標軸對於原圖(300x300)大小的映射
 68 第三,四個List為anchor_box的長度和寬度,同樣是經過歸一化映射的,根據每個特征層box數量的不同,這兩個List元素
 69 個數會變化。其中,長寬的值根據超參數anchor_sizes和anchor_ratios制定。
 70 """
 71 
 72 
 73 # 加載輔助作圖函數
 74 def colors_subselect(colors, num_classes=21):
 75     dt = len(colors) // num_classes
 76     sub_colors = []
 77     for i in range(num_classes):
 78         color = colors[i * dt]
 79         if isinstance(color[0], float):
 80             sub_colors.append([int(c * 255) for c in color])
 81         else:
 82             sub_colors.append([c for c in color])
 83     return sub_colors
 84 
 85 
 86 def bboxes_draw_on_img(img, classes, scores, bboxes, colors, thickness=2):
 87     shape = img.shape
 88     for i in range(bboxes.shape[0]):
 89         bbox = bboxes[i]
 90         color = colors[classes[i]]
 91         # Draw bounding box...
 92         p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
 93         p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
 94         cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
 95         # Draw text...
 96         s = '%s/%.3f' % (l_VOC_CLASS[int(classes[i]) - 1], scores[i])
 97         p1 = (p1[0] - 5, p1[1])
 98         # cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 1.5, color, 3)
 99 
100 
101 colors_plasma = colors_subselect(mpcm.plasma.colors, num_classes=21)
102 
103 
104 # 主流程函數
105 def process_image(img, select_threshold=0.2, nms_threshold=.1, net_shape=(300, 300)):
106     # select_threshold:box閾值——每個像素的box分類預測數據的得分會與box閾值比較,高於一個box閾值則認為這個box成功框到了一個對象
107     # nms_threshold:重合度閾值——同一對象的兩個框的重合度高於該閾值,則運行下面去重函數
108 
109     # 執行SSD模型,得到4維輸入變量,分類預測,坐標預測,rbbox_img參數為最大檢測范圍,本文固定為[0,0,1,1]即全圖
110     rimg, rpredictions, rlocalisations, rbbox_img = isess.run([image_4d, predictions, localisations, bbox_img],
111                                                               feed_dict={img_input: img})
112 
113     # ssd_bboxes_select函數根據每個特征層的分類預測分數,歸一化后的映射坐標,
114     # ancohor_box的大小,通過設定一個閾值計算得到每個特征層檢測到的對象以及其分類和坐標
115     rclasses, rscores, rbboxes = np_methods.ssd_bboxes_select(rpredictions, rlocalisations, ssd_anchors,
116                                                               select_threshold=select_threshold,
117                                                               img_shape=net_shape,
118                                                               num_classes=21, decode=True)
119 
120     """
121     這個函數做的事情比較多,這里說的細致一些:
122     首先是輸入,輸入的數據為每個特征層(一共6個,見上文)的:
123                                                 分類預測數據(rpredictions),
124                                                 坐標預測數據(rlocalisations),
125                                                 anchors_box數據(ssd_anchors)
126                                             其中:
127                                                分類預測數據為當前特征層中每個像素的每個box的分類預測
128                                                坐標預測數據為當前特征層中每個像素的每個box的坐標預測
129                                                anchors_box數據為當前特征層中每個像素的每個box的修正數據
130 
131         函數根據坐標預測數據和anchors_box數據,計算得到每個像素的每個box的中心和長寬,這個中心坐標和長寬會根據一個算法進行些許的修正,
132     從而得到一個更加准確的box坐標;修正的算法會在后文中詳細解釋,如果只是為了理解算法流程也可以不必深究這個,因為這個修正算法屬於經驗算
133     法,並沒有太多邏輯可循。
134         修正完box和中心后,函數會計算每個像素的每個box的分類預測數據的得分,當這個分數高於一個閾值(這里是0.5)則認為這個box成功
135     框到了一個對象,然后將這個box的坐標數據,所屬分類和分類得分導出,從而得到:
136         rclasses:所屬分類
137         rscores:分類得分
138         rbboxes:坐標
139 
140         最后要注意的是,同一個目標可能會在不同的特征層都被檢測到,並且他們的box坐標會有些許不同,這里並沒有去掉重復的目標,而是在下文
141     中專門用了一個函數來去重
142     """
143 
144     # 檢測有沒有超出檢測邊緣
145     rbboxes = np_methods.bboxes_clip(rbbox_img, rbboxes)
146     rclasses, rscores, rbboxes = np_methods.bboxes_sort(rclasses, rscores, rbboxes, top_k=400)
147     # 去重,將重復檢測到的目標去掉
148     rclasses, rscores, rbboxes = np_methods.bboxes_nms(rclasses, rscores, rbboxes, nms_threshold=nms_threshold)
149     # 將box的坐標重新映射到原圖上(上文所有的坐標都進行了歸一化,所以要逆操作一次)
150     rbboxes = np_methods.bboxes_resize(rbbox_img, rbboxes)
151 
152     bboxes_draw_on_img(img, rclasses, rscores, rbboxes, colors_plasma, thickness=8)
153     return img
154 
155 
156 # 視頻物體定位
157 import imageio
158 imageio.plugins.ffmpeg.download()
159 from moviepy.editor import VideoFileClip
160 
161 def process_video (input_path, output_path):
162     video = VideoFileClip(input_path)
163     result = video.fl_image(process_image)
164     result.write_videofile(output_path, fps=40)
165 
166 video_name = "3.mp4"
167 input_path = "./Video/input/" + video_name
168 output_path = "./Video/output/output_" + video_name
169 process_video(input_path,output_path )

經過一段時間的等待,終於跑完程序;

打開Video/input文件夾,查看輸出的視頻是什么樣子的吧!

四、demo4-視頻(顯示標簽)

notebook目錄下新建ssd_notebook_camera.py:

# coding: utf-8


import os
import math
import random

import numpy as np
import tensorflow as tf
import cv2

slim = tf.contrib.slim

# get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg

import sys

sys.path.append('../')

from nets import ssd_vgg_300, ssd_common, np_methods
from preprocessing import ssd_vgg_preprocessing
from notebooks import visualization_camera  # visualization

# TensorFlow session: grow memory when needed. TF, DO NOT USE ALL MY GPU MEMORY!!!
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)
isess = tf.InteractiveSession(config=config)

# ## SSD 300 Model
#
# The SSD 300 network takes 300x300 image inputs. In order to feed any image, the latter is resize to this input shape (i.e.`Resize.WARP_RESIZE`). Note that even though it may change the ratio width / height, the SSD model performs well on resized images (and it is the default behaviour in the original Caffe implementation).
#
# SSD anchors correspond to the default bounding boxes encoded in the network. The SSD net output provides offset on the coordinates and dimensions of these anchors.

# Input placeholder.
net_shape = (300, 300)
data_format = 'NHWC'
img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
# Evaluation pre-processing: resize to SSD net shape.
image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(
    img_input, None, None, net_shape, data_format, resize=ssd_vgg_preprocessing.Resize.WARP_RESIZE)
image_4d = tf.expand_dims(image_pre, 0)

# Define the SSD model.
reuse = True if 'ssd_net' in locals() else None
ssd_net = ssd_vgg_300.SSDNet()
with slim.arg_scope(ssd_net.arg_scope(data_format=data_format)):
    predictions, localisations, _, _ = ssd_net.net(image_4d, is_training=False, reuse=reuse)

# Restore SSD model.
ckpt_filename = 'E:/SSD/initial_SSD/SSD-Tensorflow-master/checkpoints/ssd_300_vgg.ckpt'  # 可更改為自己的模型路徑
# ckpt_filename = '../checkpoints/VGG_VOC0712_SSD_300x300_ft_iter_120000.ckpt'
isess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(isess, ckpt_filename)

# SSD default anchor boxes.
ssd_anchors = ssd_net.anchors(net_shape)


# ## Post-processing pipeline
#
# The SSD outputs need to be post-processed to provide proper detections. Namely, we follow these common steps:
#
# * Select boxes above a classification threshold;
# * Clip boxes to the image shape;
# * Apply the Non-Maximum-Selection algorithm: fuse together boxes whose Jaccard score > threshold;
# * If necessary, resize bounding boxes to original image shape.


# Main image processing routine.
def process_image(img, select_threshold=0.5, nms_threshold=.45, net_shape=(300, 300)):
    # Run SSD network.
    rimg, rpredictions, rlocalisations, rbbox_img = isess.run([image_4d, predictions, localisations, bbox_img],
                                                              feed_dict={img_input: img})

    # Get classes and bboxes from the net outputs.
    rclasses, rscores, rbboxes = np_methods.ssd_bboxes_select(
        rpredictions, rlocalisations, ssd_anchors,
        select_threshold=select_threshold, img_shape=net_shape, num_classes=21, decode=True)

    rbboxes = np_methods.bboxes_clip(rbbox_img, rbboxes)
    rclasses, rscores, rbboxes = np_methods.bboxes_sort(rclasses, rscores, rbboxes, top_k=400)
    rclasses, rscores, rbboxes = np_methods.bboxes_nms(rclasses, rscores, rbboxes, nms_threshold=nms_threshold)
    # Resize bboxes to original image shape. Note: useless for Resize.WARP!
    rbboxes = np_methods.bboxes_resize(rbbox_img, rbboxes)
    return rclasses, rscores, rbboxes


# # Test on some demo image and visualize output.
# path = '../demo/'
# image_names = sorted(os.listdir(path))

# img = mpimg.imread(path + image_names[-5])
# rclasses, rscores, rbboxes =  process_image(img)

# # visualization.bboxes_draw_on_img(img, rclasses, rscores, rbboxes, visualization.colors_plasma)
# visualization.plt_bboxes(img, rclasses, rscores, rbboxes)


##### following are added for camera demo####
cap = cv2.VideoCapture(r'E:/SSD/initial_SSD/SSD-Tensorflow-master/demo_video/01.mp4')
fps = cap.get(cv2.CAP_PROP_FPS)
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
fourcc = cap.get(cv2.CAP_PROP_FOURCC)
# fourcc = cv2.CAP_PROP_FOURCC(*'CVID')
print('fps=%d,size=%r,fourcc=%r' % (fps, size, fourcc))
delay = 30 / int(fps)

while (cap.isOpened()):
    ret, frame = cap.read()
    if ret == True:
        #          image = Image.open(image_path)
        #          gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        image = frame
        # the array based representation of the image will be used later in order to prepare the
        # result image with boxes and labels on it.
        image_np = image
        #          image_np = load_image_into_numpy_array(image)
        # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(image_np, axis=0)
        # Actual detection.
        rclasses, rscores, rbboxes = process_image(image_np)
        # Visualization of the results of a detection.
        visualization_camera.bboxes_draw_on_img(image_np, rclasses, rscores, rbboxes)
        #          plt.figure(figsize=IMAGE_SIZE)
        #          plt.imshow(image_np)
        cv2.imshow('frame', image_np)
        cv2.waitKey(np.uint(delay))
        print('Ongoing...')
    else:
        break
cap.release()
cv2.destroyAllWindows()

此外還要新建visualization.py:

  1 # Copyright 2017 Paul Balanca. All Rights Reserved.
  2 #
  3 # Licensed under the Apache License, Version 2.0 (the "License");
  4 # you may not use this file except in compliance with the License.
  5 # You may obtain a copy of the License at
  6 #
  7 #     http://www.apache.org/licenses/LICENSE-2.0
  8 #
  9 # Unless required by applicable law or agreed to in writing, software
 10 # distributed under the License is distributed on an "AS IS" BASIS,
 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 # See the License for the specific language governing permissions and
 13 # limitations under the License.
 14 # ==============================================================================
 15 import cv2
 16 import random
 17 
 18 import matplotlib.pyplot as plt
 19 import matplotlib.image as mpimg
 20 import matplotlib.cm as mpcm
 21 #這是一個mixin類,用於支持標量數據到RGBA映射。ScalarMappable在從給定的colormap返回RGBA顏色之前使用數據規范化。
 22 
 23 def num2class(n):
 24     import datasets.pascalvoc_2007 as pas
 25     x = pas.pascalvoc_common.VOC_LABELS.items()
 26     for name, item in x:
 27         if n in item:
 28             # print(name)
 29             return name
 30 # =========================================================================== #
 31 # Some colormaps.
 32 # =========================================================================== #
 33 def colors_subselect(colors, num_classes=21):
 34     dt = len(colors) // num_classes
 35     sub_colors = []
 36     for i in range(num_classes):
 37         color = colors[i * dt]
 38         if isinstance(color[0], float):
 39             sub_colors.append([int(c * 255) for c in color])
 40         else:
 41             sub_colors.append([c for c in color])
 42     return sub_colors
 43 
 44 
 45 colors_plasma = colors_subselect(mpcm.plasma.colors, num_classes=21)
 46 colors_tableau = [(255, 255, 255), (31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
 47                   (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
 48                   (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
 49                   (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
 50                   (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
 51 
 52 
 53 # =========================================================================== #
 54 # OpenCV drawing.
 55 # =========================================================================== #
 56 def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
 57     """Draw a collection of lines on an image.
 58     """
 59     for line in lines:
 60         for x1, y1, x2, y2 in line:
 61             cv2.line(img, (x1, y1), (x2, y2), color, thickness)
 62 
 63 
 64 def draw_rectangle(img, p1, p2, color=[255, 0, 0], thickness=2):
 65     cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
 66 
 67 
 68 def draw_bbox(img, bbox, shape, label, color=[255, 0, 0], thickness=2):
 69     p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
 70     p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
 71     cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
 72     p1 = (p1[0] + 15, p1[1])
 73     cv2.putText(img, str(label), p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.5, color, 1)
 74 
 75 
 76 def bboxes_draw_on_img(img, classes, scores, bboxes, colors=dict(), thickness=2):
 77     shape = img.shape
 78     ####add 20180516#####
 79     # colors=dict()
 80     ####add #############
 81     for i in range(bboxes.shape[0]):
 82         bbox = bboxes[i]
 83         if classes[i] not in colors:
 84             colors[classes[i]] = (random.random(), random.random(), random.random())
 85         p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
 86         p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
 87         cv2.rectangle(img, p1[::-1], p2[::-1], colors[classes[i]], thickness)
 88         s = '%s/%.3f' % (num2class(classes[i]), scores[i])
 89         p1 = (p1[0] - 5, p1[1])
 90         cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.4, colors[classes[i]], 1)
 91 
 92     # =========================================================================== #
 93 
 94 
 95 # Matplotlib show...
 96 # =========================================================================== #
 97 def plt_bboxes(img, classes, scores, bboxes, figsize=(10, 10), linewidth=1.5):
 98     """Visualize bounding boxes. Largely inspired by SSD-MXNET!
 99     """
100     fig = plt.figure(figsize=figsize)
101     plt.imshow(img)
102     height = img.shape[0]
103     width = img.shape[1]
104     colors = dict()
105     for i in range(classes.shape[0]):
106         cls_id = int(classes[i])
107         if cls_id >= 0:
108             score = scores[i]
109             if cls_id not in colors:
110                 colors[cls_id] = (random.random(), random.random(), random.random())
111             ymin = int(bboxes[i, 0] * height)
112             xmin = int(bboxes[i, 1] * width)
113             ymax = int(bboxes[i, 2] * height)
114             xmax = int(bboxes[i, 3] * width)
115             rect = plt.Rectangle((xmin, ymin), xmax - xmin,
116                                  ymax - ymin, fill=False,
117                                  edgecolor=colors[cls_id],
118                                  linewidth=linewidth)
119             plt.gca().add_patch(rect)
120             ##class_name = str(cls_id) #commented 20180516
121             #### added 20180516#####
122             class_name = num2class(cls_id)
123             #### added end #########
124             plt.gca().text(xmin, ymin - 2,
125                            '{:s} | {:.3f}'.format(class_name, score),
126                            bbox=dict(facecolor=colors[cls_id], alpha=0.5),
127                            fontsize=12, color='white')
128     plt.show()

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM