Python實現推流直播


教程

https://codingchaozhang.blog.csdn.net/article/details/102732555?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-2.channel_param&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-2.channel_param

 

Python實現推流直播

首先給出展示結果,大體就是檢測工業板子是否出現。采取檢測的方法比較簡單,用的OpenCV的模板檢測。
在這里插入圖片描述

大體思路

  • opencv讀取視頻
  • 將視頻分割為幀
  • 對每一幀進行處理(opencv模板匹配)
  • 在將此幀寫入pipe管道
  • 利用ffmpeg進行推流直播

中間遇到的問題

在處理本地視頻時,並沒有延時卡頓的情況。但對實時視頻流的時候,出現了卡頓延時的效果。在一頓度娘操作之后,采取了多線程的方法。

FFmpeg推流

在Ubuntu 14 上安裝 Nginx-RTMP 流媒體服務器
https://www.cnblogs.com/cocoajin/p/4353767.html

import subprocess as sp

rtmpUrl = ""
camera_path = ""
cap = cv.VideoCapture(camera_path)

# Get video information
fps = int(cap.get(cv.CAP_PROP_FPS))
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))

# ffmpeg command
command = ['ffmpeg',
        '-y',
        '-f', 'rawvideo',
        '-vcodec','rawvideo',
        '-pix_fmt', 'bgr24',
        '-s', "{}x{}".format(width, height),
        '-r', str(fps),
        '-i', '-',
        '-c:v', 'libx264',
        '-pix_fmt', 'yuv420p',
        '-preset', 'ultrafast',
        '-f', 'flv', 
        rtmpUrl]

# 管道配置
p = sp.Popen(command, stdin=sp.PIPE)

# read webcamera
while(cap.isOpened()):
    ret, frame = cap.read()
    if not ret:
        print("Opening camera is failed")
        break

    # process frame
    # your code
    # process frame

    # write to pipe
    p.stdin.write(frame.tostring())
  • 說明:rtmp是要接受視頻的服務器,服務器按照上面所給連接地址即可。

  

多線程處理

  • python mutilprocessing多進程編程 https://blog.csdn.net/jeffery0207/article/details/82958520
def image_put(q):
    # 采取本地視頻驗證
    cap = cv2.VideoCapture("./new.mp4")
    # 采取視頻流的方式
    # cap = cv2.VideoCapture(0)
    # cap.set(cv2.CAP_PROP_FRAME_WIDTH,1920)
    # cap.set(cv2.CAP_PROP_FRAME_HEIGHT,1080)

    if cap.isOpened():
        print('success')
    else:
        print('faild')
    while True:
        q.put(cap.read()[1])
        q.get() if q.qsize() > 1 else time.sleep(0.01)

def image_get(q):
    while True:
        # start = time.time()
        #flag += 1
        frame = q.get()
        frame = template_match(frame)
        # end = time.time()
        # print("the time is", end-start)
        cv2.imshow("frame", frame)
        cv2.waitKey(0)
        # pipe.stdin.write(frame.tostring())
        #cv2.imwrite(save_path + "%d.jpg"%flag,frame)

# 多線程執行一個攝像頭
def run_single_camera():
    # 初始化
    mp.set_start_method(method='spawn')  # init
    # 隊列
    queue = mp.Queue(maxsize=2)
    processes = [mp.Process(target=image_put, args=(queue, )),
                 mp.Process(target=image_get, args=(queue, ))]

    [process.start() for process in processes]
    [process.join() for process in processes]

def run():
    run_single_camera()  # quick, with 2 threads
    pass

  

  • 說明:使用Python3自帶的多線程模塊mutilprocessing模塊,創建一個隊列,線程A從通過rstp協議從視頻流中讀取出每一幀,並放入隊列中,線程B從隊列中將圖片取出,處理后進行顯示。線程A如果發現隊列里有兩張圖片,即線程B的讀取速度跟不上線程A,那么線程A主動將隊列里面的舊圖片刪掉,換新圖片。
import time
import multiprocessing as mp
import numpy as np
import  random
import subprocess as sp
import cv2
import os
# 定義opencv所需的模板
template_path = "./high_img_template.jpg"

# 定義矩形框所要展示的變量
category = "Category:      board"

var_confidence = (np.random.randint(86, 98)) / 100
Confidence = "Confidence:     " + str(var_confidence)

var_precision = round(random.uniform(98, 99), 2)
Precision = "Precision:    " + str(var_precision) + "%"

product_yield = "Product Yield:  100%"

result = "Result: perfect"


# 讀取模板並獲取模板的高度和寬度
template = cv2.imread(template_path, 0)
h, w = template.shape[:2]
# 定義模板匹配函數
def template_match(img_rgb):
    # 灰度轉換
    img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
    # 模板匹配
    res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
    # 設置閾值
    threshold = 0.8
    loc = np.where(res >= threshold)
    if len(loc[0]):
        # 這里直接固定區域
        cv2.rectangle(img_rgb, (155, 515), (1810, 820), (0, 0, 255), 3)
        cv2.putText(img_rgb, category, (240, 600), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
        cv2.putText(img_rgb, Confidence, (240, 640), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
        cv2.putText(img_rgb, Precision, (240, 680), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
        cv2.putText(img_rgb, product_yield, (240, 720), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
        cv2.putText(img_rgb, result, (240, 780), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 5)
    return img_rgb


# 視頻屬性
size = (1920, 1080)
sizeStr = str(size[0]) + 'x' + str(size[1])
# fps = cap.get(cv2.CAP_PROP_FPS)  # 30p/self
# fps = int(fps)
fps = 11
hz = int(1000.0 / fps)
print ('size:'+ sizeStr + ' fps:' + str(fps) + ' hz:' + str(hz))

rtmpUrl = 'rtmp://localhost/hls/test'
# 直播管道輸出
# ffmpeg推送rtmp 重點 : 通過管道 共享數據的方式
command = ['ffmpeg',
    '-y',
    '-f', 'rawvideo',
    '-vcodec','rawvideo',
    '-pix_fmt', 'bgr24',
    '-s', sizeStr,
    '-r', str(fps),
    '-i', '-',
    '-c:v', 'libx264',
    '-pix_fmt', 'yuv420p',
    '-preset', 'ultrafast',
    '-f', 'flv',
    rtmpUrl]
#管道特性配置
# pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
pipe = sp.Popen(command, stdin=sp.PIPE) #,shell=False
# pipe.stdin.write(frame.tostring())


def image_put(q):
    # 采取本地視頻驗證
    cap = cv2.VideoCapture("./new.mp4")
    # 采取視頻流的方式
    # cap = cv2.VideoCapture(0)
    # cap.set(cv2.CAP_PROP_FRAME_WIDTH,1920)
    # cap.set(cv2.CAP_PROP_FRAME_HEIGHT,1080)

    if cap.isOpened():
        print('success')
    else:
        print('faild')
    while True:
        q.put(cap.read()[1])
        q.get() if q.qsize() > 1 else time.sleep(0.01)

# 采取本地視頻的方式保存圖片
save_path = "./res_imgs"
if os.path.exists(save_path):
    os.makedir(save_path)

def image_get(q):
    while True:
        # start = time.time()
        #flag += 1
        frame = q.get()
        frame = template_match(frame)
        # end = time.time()
        # print("the time is", end-start)
        cv2.imshow("frame", frame)
        cv2.waitKey(0)
        # pipe.stdin.write(frame.tostring())
        #cv2.imwrite(save_path + "%d.jpg"%flag,frame)

# 多線程執行一個攝像頭
def run_single_camera():
    # 初始化
    mp.set_start_method(method='spawn')  # init
    # 隊列
    queue = mp.Queue(maxsize=2)
    processes = [mp.Process(target=image_put, args=(queue, )),
                 mp.Process(target=image_get, args=(queue, ))]

    [process.start() for process in processes]
    [process.join() for process in processes]

def run():
    run_single_camera()  # quick, with 2 threads
    pass


if __name__ == '__main__':
    run()

  

參考文章

    1. 在Ubuntu 14 上安裝 Nginx-RTMP 流媒體服務器:https://www.cnblogs.com/cocoajin/p/4353767.html
    1. python mutilprocessing多進程編程:https://blog.csdn.net/jeffery0207/article/details/82958520
    1. ffmpeg 將視頻和圖片互轉化:https://blog.csdn.net/TingiBanDeQu/article/details/53896944
    1. 基於python2.7的opencv3.3-ffmpeg-rtmp視頻處理並推送流直播:https://blog.csdn.net/u014303844/article/details/80394101
    1. 讀取多個(海康\大華)網絡攝像頭的視頻流 (使用opencv-python),解決實時讀取延遲問題:https://zhuanlan.zhihu.com/p/38136322
    1. python利用ffmpeg進行rtmp推流直播:https://zhuanlan.zhihu.com/p/74260950

全部代碼展示


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM