RV1126移植並使用RKMedia中RKNN相關代碼


1,首先參考sdk/external/rkmedia/examples/rkmedia_vi_rknn_venc_rstp_test.c的代碼,並在同一目錄下創建rkmedia_vi_venc_rtsp_md.c文件,適配自己的攝像頭編寫代碼。這里我使用的攝像頭為USB攝像頭,輸出圖像格式為YUYV,有兩種分辨率,分別為640x480,1280x720.
2,編寫代碼之前先在同目錄下的CMakeList中加入配置語句如下:
#--------------------------
# rkmedia_vi_venc_rtsp_md
#--------------------------
link_directories(${PROJECT_SOURCE_DIR}/librtsp/)
add_executable(rkmedia_vi_venc_rtsp_md rkmedia_vi_venc_rtsp_md.c ${COMMON_SRC})
add_dependencies(rkmedia_vi_venc_rtsp_md easymedia)
target_link_libraries(rkmedia_vi_venc_rtsp_md rtsp rknn_api m easymedia rga)
target_include_directories(rkmedia_vi_venc_rtsp_md PRIVATE ${CMAKE_SOURCE_DIR}/include ${CMAKE_SYSROOT}/usr/include/rknn)
install(TARGETS rkmedia_vi_venc_rtsp_md RUNTIME DESTINATION "bin")
install(FILES rtsp-nn.cfg DESTINATION share)
install(DIRECTORY rknn_model DESTINATION share)
3,完成上述步驟開始編寫代碼

原文件中的圖片輸入格式為NV12,這里為YUYV422,因此將NV12轉RGB格式的函數改為YUYV轉RGB的函數即可,YUYV轉RGB的函數如下

int convert_yuv_to_rgb_pixel(int y, int u, int v)
{
 unsigned int pixel32 = 0;
 unsigned char *pixel = (unsigned char *)&pixel32;
 int r, g, b;
 r = y + (1.370705 * (v-128));
 g = y - (0.698001 * (v-128)) - (0.337633 * (u-128));
 b = y + (1.732446 * (u-128));
 if(r > 255) r = 255;
 if(g > 255) g = 255;
 if(b > 255) b = 255;
 if(r < 0) r = 0;
 if(g < 0) g = 0;
 if(b < 0) b = 0;
 pixel[0] = r * 220 / 256;
 pixel[1] = g * 220 / 256;
 pixel[2] = b * 220 / 256;
 return pixel32;
}

int YUYV_to_rgb24(unsigned char *yuv, unsigned char *rgb, unsigned int width, unsigned int height)
{
 unsigned int in, out = 0;
 unsigned int pixel_16;
 unsigned char pixel_24[3];
 unsigned int pixel32;
 int y0, u, y1, v;
 for(in = 0; in < width * height * 2; in += 4) {
  pixel_16 =
   yuv[in + 3] << 24 |
   yuv[in + 2] << 16 |
   yuv[in + 1] <<  8 |
   yuv[in + 0];
  y0 = (pixel_16 & 0x000000ff);
  u  = (pixel_16 & 0x0000ff00) >>  8;
  y1 = (pixel_16 & 0x00ff0000) >> 16;
  v  = (pixel_16 & 0xff000000) >> 24;
  pixel32 = convert_yuv_to_rgb_pixel(y0, u, v);
  pixel_24[0] = (pixel32 & 0x000000ff);
  pixel_24[1] = (pixel32 & 0x0000ff00) >> 8;
  pixel_24[2] = (pixel32 & 0x00ff0000) >> 16;
  rgb[out++] = pixel_24[0];
  rgb[out++] = pixel_24[1];
  rgb[out++] = pixel_24[2];
  pixel32 = convert_yuv_to_rgb_pixel(y1, u, v);
  pixel_24[0] = (pixel32 & 0x000000ff);
  pixel_24[1] = (pixel32 & 0x0000ff00) >> 8;
  pixel_24[2] = (pixel32 & 0x00ff0000) >> 16;
  rgb[out++] = pixel_24[0];
  rgb[out++] = pixel_24[1];
  rgb[out++] = pixel_24[2];
 }
 return 0;
}

再將原來的NV12toRGB24函數改為以上的函數YUYV_to_rgb24即可,同時原代碼中跟蹤圖形畫框的代碼也是基於NV12圖像格式,這里改為YUYV格式進行處理,代碼如下:

int nv12_border(char *pic, int pic_w, int pic_h, int rect_x, int rect_y,
                int rect_w, int rect_h, int R, int G, int B) {
  /* Set up the rectangle border size */
  const int border = 15;

  /* RGB convert YUV */
  int Y, U, V;
  Y = 0.299 * R + 0.587 * G + 0.114 * B;
  U = -0.1687 * R - 0.3313 * G + 0.5 * B + 128;
  V = 0.5 * R - 0.4187 * G - 0.0813 * B + 128;
  /* Locking the scope of rectangle border range */
  int j, k,y_index,u_index,v_index;
  for (j = rect_y; j < rect_y + rect_h; j++) {
    for (k = rect_x; k < rect_x + rect_w; k++) {
      if (k < (rect_x + border) || k > (rect_x + rect_w - border) ||
          j < (rect_y + border) || j > (rect_y + rect_h - border)) 
      { 
         //y_index = j * pic_w + k;
         //u_index =
            //(y_index / 2 - pic_w / 2 * ((j + 1) / 2)) * 2 + pic_w * pic_h;
         //v_index = u_index + 1;
        /* Components of YUV's storage address index */
        y_index = j * pic_w * 2 + k * 2 + pic_h *0 ;
        u_index = y_index + 1;
        if(k%2 == 0)
          u_index = y_index + 1;
        else
          u_index = y_index - 1;
        v_index = u_index + 2;
        /* set up YUV's conponents value of rectangle border */
        pic[y_index] = Y;
        pic[u_index] = U;
        pic[v_index] = V;
      }
    }
  }

  return 0;
}

原代碼調用了雙目攝像頭,因此可以獲取兩個視頻流,並創建GetMediaBuffer()與MainStream()這里要對兩個函數合二為一,將MainStream中跟蹤被檢測物體畫框的代碼移植到GetMediaBuffer()最后即可,同時再Main函數中創建MainStream函數的線程也需要同時注釋刪除,修改后的GetMediaBuffer函數如下:

static void *GetMediaBuffer(void *arg) {
  printf("#Start %s thread, arg:%p\n", __func__, arg);

  rknn_context ctx;
  int ret;
  int model_len = 0;
  unsigned char *model;

  printf("Loading model ...\n");
  model = load_model(g_ssd_path, &model_len);
  ret = rknn_init(&ctx, model, model_len, 0);
  if (ret < 0) {
    printf("rknn_init fail! ret=%d\n", ret);
    return NULL;
  }

  // Get Model Input Output Info
  rknn_input_output_num io_num;
  ret = rknn_query(ctx, RKNN_QUERY_IN_OUT_NUM, &io_num, sizeof(io_num));
  if (ret != RKNN_SUCC) {
    printf("rknn_query fail! ret=%d\n", ret);
    return NULL;
  }
  printf("model input num: %d, output num: %d\n", io_num.n_input,
         io_num.n_output);

  printf("input tensors:\n");
  rknn_tensor_attr input_attrs[io_num.n_input];
  memset(input_attrs, 0, sizeof(input_attrs));
  for (unsigned int i = 0; i < io_num.n_input; i++) {
    input_attrs[i].index = i;
    ret = rknn_query(ctx, RKNN_QUERY_INPUT_ATTR, &(input_attrs[i]),
                     sizeof(rknn_tensor_attr));
    if (ret != RKNN_SUCC) {
      printf("rknn_query fail! ret=%d\n", ret);
      return NULL;
    }
    printRKNNTensor(&(input_attrs[i]));
  }

  printf("output tensors:\n");
  rknn_tensor_attr output_attrs[io_num.n_output];
  memset(output_attrs, 0, sizeof(output_attrs));
  for (unsigned int i = 0; i < io_num.n_output; i++) {
    output_attrs[i].index = i;
    ret = rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs[i]),
                     sizeof(rknn_tensor_attr));
    if (ret != RKNN_SUCC) {
      printf("rknn_query fail! ret=%d\n", ret);
      return NULL;
    }
    printRKNNTensor(&(output_attrs[i]));
  }

  MEDIA_BUFFER buffer = NULL;
  float x_rate = (float)cfg.session_cfg[DRAW_INDEX].u32Width / MODEL_INPUT_SIZE;
  float y_rate =
      (float)cfg.session_cfg[DRAW_INDEX].u32Height / MODEL_INPUT_SIZE;
  printf("x_rate is %f, y_rate is %f\n", x_rate, y_rate);

  while (g_flag_run) {
    buffer = RK_MPI_SYS_GetMediaBuffer(RK_ID_VI, 0, -1);
    if (!buffer) {
      printf("RK_MPI_SYS_GetMediaBuffer getmediabuffer get null buffer!\n");
      break;
    }
    // printf("Get Frame:ptr:%p, fd:%d, size:%zu, mode:%d, channel:%d, "
    //        "timestamp:%lld\n",
    //        RK_MPI_MB_GetPtr(buffer), RK_MPI_MB_GetFD(buffer),
    //        RK_MPI_MB_GetSize(buffer),
    //        RK_MPI_MB_GetModeID(buffer), RK_MPI_MB_GetChannelID(buffer),
    //        RK_MPI_MB_GetTimestamp(buffer));
// nv12 to rgb24 and resize
//int rga_buffer_size = 640*360*3;
int rga_buffer_size = cfg.session_cfg[RK_NN_INDEX].u32Width *cfg.session_cfg[RK_NN_INDEX].u32Height *4; // nv12 3/2, rgb 3
int rga_buffer_model_input_size = MODEL_INPUT_SIZE * MODEL_INPUT_SIZE * 4;
unsigned char *rga_buffer = malloc(rga_buffer_size);
unsigned char *rga_buffer_model_input = malloc(rga_buffer_model_input_size);

nv12_to_rgb24(RK_MPI_MB_GetPtr(buffer), rga_buffer,
            cfg.session_cfg[RK_NN_INDEX].u32Width,
            cfg.session_cfg[RK_NN_INDEX].u32Height);//cfg.session_cfg[RK_NN_INDEX].u32Width,cfg.session_cfg[RK_NN_INDEX].u32Height  640,360

rgb24_resize(rga_buffer, rga_buffer_model_input,
             cfg.session_cfg[RK_NN_INDEX].u32Width,
             cfg.session_cfg[RK_NN_INDEX].u32Height, MODEL_INPUT_SIZE,
             MODEL_INPUT_SIZE);//cfg.session_cfg[RK_NN_INDEX].u32Width,cfg.session_cfg[RK_NN_INDEX].u32Height

// Set Input Data
rknn_input inputs[1];
memset(inputs, 0, sizeof(inputs));
inputs[0].index = 0;
inputs[0].type = RKNN_TENSOR_UINT8;
inputs[0].size = rga_buffer_model_input_size;
inputs[0].fmt = RKNN_TENSOR_NHWC;
inputs[0].buf = rga_buffer_model_input;

ret = rknn_inputs_set(ctx, io_num.n_input, inputs);
if (ret < 0) {
  printf("rknn_input_set fail! ret=%d\n", ret);
  return NULL;
}

// Run
printf("rknn_run\n");
ret = rknn_run(ctx, NULL);
if (ret < 0) {
  printf("rknn_run fail! ret=%d\n", ret);
  return NULL;
}

// Get Output
rknn_output outputs[2];
memset(outputs, 0, sizeof(outputs));
outputs[0].want_float = 1;
outputs[1].want_float = 1;
ret = rknn_outputs_get(ctx, io_num.n_output, outputs, NULL);
if (ret < 0) {
  printf("rknn_outputs_get fail! ret=%d\n", ret);
  return NULL;
}

// Post Process
detect_result_group_t detect_result_group;
postProcessSSD((float *)(outputs[0].buf), (float *)(outputs[1].buf),
               MODEL_INPUT_SIZE, MODEL_INPUT_SIZE, &detect_result_group);
// Release rknn_outputs
rknn_outputs_release(ctx, 2, outputs);

//Dump Objects
for (int i = 0; i < detect_result_group.count; i++){
   detect_result_t *det_result = &(detect_result_group.results[i]);
   printf("%s @ (%d %d %d %d) %f\n", det_result->name,
   det_result->box.left,
          det_result->box.top, det_result->box.right,
          det_result->box.bottom,
          det_result->prop);
 }

if (detect_result_group.count > 0) {
  rknn_list_push(rknn_list_, getCurrentTimeMsec(), detect_result_group);
  int size = rknn_list_size(rknn_list_);
  if (size >= MAX_RKNN_LIST_NUM)
    rknn_list_drop(rknn_list_);
  printf("size is %d\n", size);
}

// draw
if(rknn_list_size(rknn_list_)){
  long time_before;
  detect_result_group_t detect_result_group1;
  memset(&detect_result_group1, 0, sizeof(detect_result_group1));
  rknn_list_pop(rknn_list_, &time_before, &detect_result_group1);
  // printf("time interval is %ld\n", getCurrentTimeMsec() - time_before);

  for (int j = 0; j < detect_result_group1.count; j++) {

    if (strcmp(detect_result_group1.results[j].name, "mouse"))
      continue;
    if (detect_result_group1.results[j].prop < 0.5)
      continue;
    int x = detect_result_group1.results[j].box.left * x_rate;
    int y = detect_result_group1.results[j].box.top * y_rate;
    int w = (detect_result_group1.results[j].box.right -
             detect_result_group1.results[j].box.left) *
            x_rate;
    int h = (detect_result_group1.results[j].box.bottom -
             detect_result_group1.results[j].box.top) *
            y_rate;
    if (x < 0)
      x = 0;
    if (y < 0)
      y = 0;
    while ((uint32_t)(x + w) >= cfg.session_cfg[DRAW_INDEX].u32Width) {
      w -= 16;
    }
    while ((uint32_t)(y + h) >= cfg.session_cfg[DRAW_INDEX].u32Height) {
      h -= 16;
    }
    printf("border=(%d %d %d %d)\n", x, y, w, h);
    nv12_border((char*)RK_MPI_MB_GetPtr(buffer),
                cfg.session_cfg[DRAW_INDEX].u32Width,
                cfg.session_cfg[DRAW_INDEX].u32Height, x, y, w, h, 255, 0,
                0);
  }
}
// send from VI to VENC
RK_MPI_SYS_SendMediaBuffer(
    RK_ID_VENC, cfg.session_cfg[DRAW_INDEX].stVenChn.s32ChnId, buffer);
RK_MPI_MB_ReleaseBuffer(buffer);

if (rga_buffer)
  free(rga_buffer);
if (rga_buffer_model_input)
  free(rga_buffer_model_input);
 }
  // release
  if (ctx)
    rknn_destroy(ctx);
  if (model)
    free(model);

  return NULL;
}

在Main函數中,由於輸入的配置文件中有關於兩個攝像頭的信息,這里需要只選取一個,因此將其中循環創建vi,venc通道的for循環注釋,而且循環調用的語句全部更改為只需要一次,並且由於圖像檢測的信息在GetMediaBuffer中已經傳送給了venc通道,這里,就不需要用到通道綁定的函數,RK_MPI_SYS_BIND函數一並注釋,具體的Main函數代碼如下:

int main(int argc, char **argv) {
  RK_CHAR *pCfgPath = "/oem/usr/share/rtsp-nn.cfg";
  RK_CHAR *pIqfilesPath = NULL;
  RK_S32 s32CamId = 0;
#ifdef RKAIQ
  RK_BOOL bMultictx = RK_FALSE;
#endif
  int c;
  while ((c = getopt_long(argc, argv, optstr, long_options, NULL)) != -1) {
    const char *tmp_optarg = optarg;
    switch (c) {
    case 'a':
      if (!optarg && NULL != argv[optind] && '-' != argv[optind][0]) {
        tmp_optarg = argv[optind++];
      }
      if (tmp_optarg) {
        pIqfilesPath = (char *)tmp_optarg;
      } else {
        pIqfilesPath = "/oem/etc/iqfiles/";
      }
      break;
    case 'c':
      pCfgPath = optarg;
      break;
    case 'b':
      g_box_priors = optarg;
      break;
    case 'l':
      g_labels_list = optarg;
      break;
    case 'p':
      g_ssd_path = optarg;
      break;
    case 'I':
      s32CamId = atoi(optarg);
      break;
#ifdef RKAIQ
    case 'M':
      if (atoi(optarg)) {
        bMultictx = RK_TRUE;
      }
      break;
#endif
    case '?':
    default:
      print_usage(argv[0]);
      return 0;
    }
  }

  printf("cfg path is %s\n", pCfgPath);
  printf("BOX_PRIORS_TXT_PATH is %s\n", g_box_priors);
  printf("LABEL_NALE_TXT_PATH is %s\n", g_labels_list);
  printf("MODEL_PATH is %s\n", g_ssd_path);
  printf("#CameraIdx: %d\n\n", s32CamId);
  load_cfg(pCfgPath);

  signal(SIGINT, sig_proc);

  if (pIqfilesPath) {
#ifdef RKAIQ
    printf("xml dirpath: %s\n\n", pIqfilesPath);
    printf("#bMultictx: %d\n\n", bMultictx);
    int fps = 30;
    rk_aiq_working_mode_t hdr_mode = RK_AIQ_WORKING_MODE_NORMAL;
    SAMPLE_COMM_ISP_Init(s32CamId, hdr_mode, bMultictx, pIqfilesPath);
    SAMPLE_COMM_ISP_Run(s32CamId);
    SAMPLE_COMM_ISP_SetFrameRate(s32CamId, fps);
#endif
  }

  // init rtsp
  printf("init rtsp\n");
  g_rtsplive = create_rtsp_demo(554);

  // init mpi
  printf("init mpi\n");
  RK_MPI_SYS_Init();

  // create session
    int i = 0; //for (int i = 0; i < cfg.session_count; i++){}
    cfg.session_cfg[i].session =
        rtsp_new_session(g_rtsplive, cfg.session_cfg[i].path);
// VI create
printf("VI create\n");
cfg.session_cfg[i].stViChn.enModId = RK_ID_VI;
cfg.session_cfg[i].stViChn.s32ChnId = i;
SAMPLE_COMMON_VI_Start(&cfg.session_cfg[i], VI_WORK_MODE_NORMAL, i);
// VENC create
printf("VENC create\n");
cfg.session_cfg[i].stVenChn.enModId = RK_ID_VENC;
cfg.session_cfg[i].stVenChn.s32ChnId = i;
SAMPLE_COMMON_VENC_Start(&cfg.session_cfg[i]);
//if (i == DRAW_INDEX)
  RK_MPI_VI_StartStream(s32CamId, cfg.session_cfg[i].stViChn.s32ChnId);
//else
  //RK_MPI_SYS_Bind(&cfg.session_cfg[i].stViChn,
                  //&cfg.session_cfg[i].stVenChn);

// rtsp video
printf("rtsp video\n");
switch (cfg.session_cfg[i].video_type) {
case RK_CODEC_TYPE_H264:
  rtsp_set_video(cfg.session_cfg[i].session, RTSP_CODEC_ID_VIDEO_H264, NULL,
                 0);
  break;
case RK_CODEC_TYPE_H265:
  rtsp_set_video(cfg.session_cfg[i].session, RTSP_CODEC_ID_VIDEO_H265, NULL,
                 0);
  break;
default:
  printf("video codec not support.\n");
  break;
}

rtsp_sync_video_ts(cfg.session_cfg[i].session, rtsp_get_reltime(),
                   rtsp_get_ntptime());
  //}

  create_rknn_list(&rknn_list_);

  // Get the sub-stream buffer for humanoid recognition
  pthread_t read_thread;
  pthread_create(&read_thread, NULL, GetMediaBuffer, NULL);

  // The mainstream draws a box asynchronously based on the recognition result
  //pthread_t main_stream_thread;
  //pthread_create(&main_stream_thread, NULL, MainStream, NULL);

  while (g_flag_run) {
    int j = 0;//for (int j = 0; j < cfg.session_count; j++) {
      MEDIA_BUFFER buffer;
      // send video buffer
      buffer = RK_MPI_SYS_GetMediaBuffer(
          RK_ID_VENC, cfg.session_cfg[j].stVenChn.s32ChnId, 0);
      if (buffer) {
        rtsp_tx_video(cfg.session_cfg[j].session, RK_MPI_MB_GetPtr(buffer),
                      RK_MPI_MB_GetSize(buffer),
                      RK_MPI_MB_GetTimestamp(buffer));
        RK_MPI_MB_ReleaseBuffer(buffer);
      }
    //}
    rtsp_do_event(g_rtsplive);
  }

  pthread_join(read_thread, NULL);
  //pthread_join(main_stream_thread, NULL);

  rtsp_del_demo(g_rtsplive);
  int k = 0;//for (int i = 0; i < cfg.session_count; i++) {
    //if (i != DRAW_INDEX)
      //RK_MPI_SYS_UnBind(&cfg.session_cfg[k].stViChn,
                        //&cfg.session_cfg[k].stVenChn);
    RK_MPI_VENC_DestroyChn(cfg.session_cfg[k].stVenChn.s32ChnId);
    RK_MPI_VI_DisableChn(s32CamId, cfg.session_cfg[k].stViChn.s32ChnId);
  //}

  if (pIqfilesPath) {
#ifdef RKAIQ
    SAMPLE_COMM_ISP_Stop(s32CamId);
#endif
  }

  destory_rknn_list(&rknn_list_);
  return 0;
}

其他具體的更改的代碼可以與一個目錄下的rkmedia_vi_rknn_venc_rtsp_test.c進行比較查看,在代碼中使用了rtsp-nn.cfg,box_priors,coco_labels_list,ssd_inception_v2_rv1109_rv1126文件,后三者是目標檢測訓練好的算法模型,更改需要重新訓練人工智能,並進行更改,第一個rtsp-nn.cfg代碼與本代碼在同一個目錄下,其中配置了攝像頭的輸入圖像類型,像素,rtsp推流信號等參數,需要自己針對具體要求進行修改,代碼配置如下:

# cfgline:
# path=%s audio_type=%d channels=%u samplerate=%u nbsample=%u alsapath=%s video_type=%s width=%u height=%u image_type=%u video_path=%s
#
# from rkmedia_common.h CODEC_TYPE_E
# audio_type list
##  AAC, 	0
##  MP2, 	1
##  G711A, 	3
##  G711U, 	4
##  G726, 	5
# video_type list
##  H264, 	6
##  H265, 	7

# image_type
## from rkmedia_common.h IMAGE_TYPE_E
## IMAGE_TYPE_NV12, 	4
## IMAGE_TYPE_FBC0, 	8
## IMAGE_TYPE_NV16, 	10
#
# video_path
## rkispp_m_bypass
## rkispp_scale0
## rkispp_scale1
## rkispp_scale2
# example
path=/live/main_stream video_type=6 width=640 height=480 image_type=13 video_path=/dev/video25
#path=/live/main_stream video_type=7 width=1920 height=1080 image_type=4 video_path=rkispp_scale0
#path=/live/sub_stream video_type=6 width=720 height=576 image_type=13 video_path=/dev/video25
4,進行編譯
# SDK根目錄,選擇環境
source envsetup.sh firefly_rv1126_rv1109
# 重編rkmedia源碼
make rkmedia-dirclean && make rkmedia
# rkmedia庫/程序打包到文件系統(oem.img)
./build.sh rootfs
# 重新燒寫oem.img,若有其他基礎包配置更新(如ffmpeg),則需要重燒rootfs.img

或者執行完前兩步后,在SDK/buildroot/output/firefly_rv1126_rv1109/oem/usr/bin目錄下找到rkmedia_vi_venc_rtsp_md二進制可執行程序,移動到板子某一個目錄下,再進行測試。這里,我開發板掛載了Linux系統下的/home/kxq/share目錄,在開發板下對應目錄為/mnt/nfs,因此在本虛擬機下,將此二進制可執行程序移動到虛擬機下的/home/kxq/share目錄,再在開發板下的/mnt/nfs目錄執行程序,注意需要加上./與開發板中攜帶的程序進行區別。

5,測試

測試語句為
./rkmedia_vi_venc_rtsp_md -c /oem/usr/share/rtsp-nn.cfg -b /oem/usr/share/rknn_model/box_priors.txt -l /oem/usr/share/rknn_model/coco_labels_list.txt -p /oem/usr/share/rknn_model/ssd_inception_v2_rv1109_rv1126.rknn
vlc rtsp://192.168.137.71:554/live/main_stream

源代碼如下:
https://files.cnblogs.com/files/blogs/741465/rkmedia_vi_venc_rtsp_md.zip?t=1648281776


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM