明天又要去面試了,趁次機會也將以前做的東西總結一下,為以后理解提供方便,也再加深下印象。
網絡視頻監控與人臉識別主要由三個程序組成:1、視頻采集與傳輸程序;2、接受與顯示程序;3、人臉識別程序。下面就分別來分析一下這三個程序。
一、視頻采集與傳輸程序(Servfox)
關鍵部分解析:
1、視頻數據采集(可采用共享內存方式和讀方式)
- int v4lGrab (struct vdIn *vd )
- {
- static int frame = 0;
- int len;
- int size;
- int erreur = 0;
- int jpegsize = 0;
- struct frame_t *headerframe;
- double timecourant =0;
- double temps = 0;
- timecourant = ms_time();
- if (vd->grabMethod) /*<strong>共享內存方式</strong>*/
- {
- vd->vmmap.height = vd->hdrheight;
- vd->vmmap.width = vd->hdrwidth;
- vd->vmmap.format = vd->formatIn;
- /*該函數成功返回則表示一幀采集已完成,采集到的圖像數據放到
- 起始地址為 vd->map+vd->mbuf.offsets[vd->frame]
- 的內存區中,讀取該內存區中的數據便可得到圖像數據。
- 接着可以做下一次的 VIDIOCMCAPTURE。*/
- if (<strong>ioctl (vd->fd, VIDIOCSYNC, &vd->vmmap.frame</strong>) < 0)
- {
- perror ("cvsync err\n");
- erreur = -1;
- }
- /* Is there someone using the frame */
- while((vd->framelock[vd->frame_cour] != 0) && vd->signalquit)
- usleep(1000);
- pthread_mutex_lock (&vd->grabmutex);
- temps = ms_time();
- /*采集完成,進行jpeg壓縮處理,里面大有文章*/
- jpegsize= <strong>convertframe</strong>(vd->ptframe[vd->frame_cour]+ sizeof(struct frame_t),
- vd->pFramebuffer + vd->videombuf.offsets[vd->vmmap.frame],
- vd->hdrwidth,vd->hdrheight,vd->formatIn,vd->framesizeIn);
- /*填充數據幀信息頭*/
- headerframe=(struct frame_t*)vd->ptframe[vd->frame_cour];
- snprintf(headerframe->header,5,"%s","SPCA");
- headerframe->seqtimes = ms_time();
- headerframe->deltatimes=(int)(headerframe->seqtimes-timecourant);
- headerframe->w = vd->hdrwidth;
- headerframe->h = vd->hdrheight;
- headerframe->size = (( jpegsize < 0)?0:jpegsize);
- headerframe->format = vd->formatIn;
- headerframe->nbframe = frame++;
- // printf("compress frame %d times %f\n",frame, headerframe->seqtimes-temps);
- pthread_mutex_unlock (&vd->grabmutex);
- /************************************/
- if ((<strong>ioctl (vd->fd, VIDIOCMCAPTURE, &(vd->vmmap)</strong>)) < 0)
- {
- perror ("cmcapture");
- if(debug) printf (">>cmcapture err \n");
- erreur = -1;
- }
- vd->vmmap.frame = (vd->vmmap.frame + 1) % vd->videombuf.frames;
- vd->frame_cour = (vd->frame_cour +1) % OUTFRMNUMB;
- //if(debug) printf("frame nb %d\n",vd->vmmap.frame);
- }
- else /* <strong>讀方式</strong>*/
- {
- size = vd->framesizeIn;
- len = <strong>read</strong> (vd->fd, vd->pFramebuffer, size);
- if (len < 0 )
- {
- if(debug) printf ("v4l read error\n");
- if(debug) printf ("len %d asked %d \n", len, size);
- return 0;
- }
- /* Is there someone using the frame */
- while((vd->framelock[vd->frame_cour] != 0)&& vd->signalquit)
- usleep(1000);
- pthread_mutex_lock (&vd->grabmutex);
- temps = ms_time();
- jpegsize= convertframe(vd->ptframe[vd->frame_cour]+ sizeof(struct frame_t),
- vd->pFramebuffer ,
- vd->hdrwidth,vd->hdrheight,vd->formatIn,vd->framesizeIn);
- headerframe=(struct frame_t*)vd->ptframe[vd->frame_cour];
- snprintf(headerframe->header,5,"%s","SPCA");
- headerframe->seqtimes = ms_time();
- headerframe->deltatimes=(int)(headerframe->seqtimes-timecourant);
- headerframe->w = vd->hdrwidth;
- headerframe->h = vd->hdrheight;
- headerframe->size = (( jpegsize < 0)?0:jpegsize);
- headerframe->format = vd->formatIn;
- headerframe->nbframe = frame++;
- // if(debug) printf("compress frame %d times %f\n",frame, headerframe->seqtimes-temps);
- vd->frame_cour = (vd->frame_cour +1) % OUTFRMNUMB;
- pthread_mutex_unlock (&vd->grabmutex);
- /************************************/
- }
- return erreur;
- }
2、數據通過socket通信方式發送
- for (;;)
- {
- memset(&message,0,sizeof(struct client_t));
- /*接受網絡數據,保存在message 結構體中*/
- ret = read(sock,(unsigned char*)&message,sizeof(struct client_t));
- /*根據接受到的控制信息進行控制*/
- /*大小調節*/
- else if (message.updosize){ //compatibility FIX chg quality factor ATM
- switch (message.updosize){
- case 1: qualityUp(&videoIn);
- break;
- case 2: qualityDown(&videoIn);
- break;
- }
- ack = 1;
- }
- /*幀數調節*/
- else if (message.fps){
- switch (message.fps){
- case 1: timeDown(&videoIn);
- break;
- case 2: timeUp(&videoIn);
- break;
- }
- ack = 1;
- }
- /*睡眠控制*/
- else if (message.sleepon){
- ack = 1;
- }
- else ack =0;
- while ((frameout == videoIn.frame_cour) && videoIn.signalquit) usleep(1000);
- if (videoIn.signalquit){
- videoIn.framelock[frameout]++;
- headerframe = (struct frame_t *) videoIn.ptframe[frameout];
- headerframe->acknowledge = ack;
- headerframe->bright = bright;
- headerframe->contrast = contrast;
- headerframe->wakeup = wakeup;
- /*發送數據幀頭信息*/
- ret = write_sock(sock, (unsigned char *)headerframe, sizeof(struct frame_t)) ;
- if(!wakeup)
- /*發送數據幀信息*/
- ret = write_sock(sock,(unsigned char*)(videoIn.ptframe[frameout]+sizeof(struct frame_t)),headerframe->size);
- videoIn.framelock[frameout]--;
- frameout = (frameout+1)%4;
- } else {
- if(debug) printf("reader %d going out \n",*id);
- break;
- }
- }
- close_sock(sock);
- pthread_exit(NULL);
- }
二、接受與顯示程序
1、JPEG圖片壓縮原理
實際上,一個平面的圖像,可以理解為除了水平 X 和垂直 Y 以外,還有一個色彩值的 Z 的三維的系統。Z 代表了三元色中各個分支 R/G/B 的混合時所占的具體數值大小,每個像素的 RGB 的混合值可能都有所不同,各個值有大有小,但臨近的兩個點的 R/G/B 三個值會比較接近。兩個相鄰的點,會有很多的色彩是很接近的,那么如何能在最后得到的圖片中,盡量少得記錄這些不需要的數據, 也即達到了壓縮的效果。