1,適用于VideoCapture打開的攝像頭VideoCapture capture(0); 設(shè)置攝像頭參數(shù) 不要隨意修改 capture.set(CV_CAP_PROP_FRAME_WIDTH, 1080);//寬度 capture.set(CV_CAP_PROP_FRAME_HEIGHT, 960);//高度 capture.set(CV_CAP_PROP_FPS, 30);//幀率 幀/秒 capture.set(CV_CAP_PROP_BRIGHTNESS, 1);//亮度 capture.set(CV_CAP_PROP_CONTRAST,40);//對比度 40 capture.set(CV_CAP_PROP_SATURATION, 50);//飽和度 50 capture.set(CV_CAP_PROP_HUE, 50);//色調(diào) 50 capture.set(CV_CAP_PROP_EXPOSURE, 50);//曝光 50 獲取攝像頭參數(shù) 得到攝像頭的參數(shù) capture.get(CV_CAP_PROP_FRAME_WIDTH); capture.get(CV_CAP_PROP_FRAME_HEIGHT); capture.get(CV_CAP_PROP_FPS); capture.get(CV_CAP_PROP_BRIGHTNESS); capture.get(CV_CAP_PROP_CONTRAST); capture.get(CV_CAP_PROP_SATURATION); capture.get(CV_CAP_PROP_HUE); capture.get(CV_CAP_PROP_EXPOSURE); 獲取視頻參數(shù): capture.get(CV_CAP_PROP_FRAME_COUNT);//視頻幀數(shù) 然后你會(huì)發(fā)現(xiàn)除了個(gè)別參數(shù)你能更改之外(如曝光度),大分布你是不能更改的,甚至都沒辦法得到,這種并不適用 2,不做開發(fā),只是單純的更改那么推薦一個(gè)軟件,amcap,百度網(wǎng)盤鏈接,https://pan.baidu.com/s/1pL8nq0V#list/path=%2F,很簡單很容易上手。 補(bǔ),現(xiàn)在突然想起來我的一個(gè)學(xué)長告訴我的,利用這個(gè)軟件調(diào)節(jié)攝像頭的曝光度,可以改變幀率,且攝像頭會(huì)記住曝光度的設(shè)置(其他特性就沒有這個(gè)特點(diǎn))。-2019.3.12 3,修改opencv的文件,不過效果可能和第一個(gè)差不多大概是在opencv的這個(gè)位置,找一下,modules/highgui/src/cap_v4l.cpp,里面有關(guān)于參數(shù)的設(shè)置,位置比較靠前,可以搜索,也可以直接找到 大致在200多行 4,v4l2下面是我找到的一篇參考,可以突破幀率的限制,當(dāng)然前提是攝像頭支持 https://blog.csdn.net/c406495762/article/details/72732135 目前只適用于Linux系統(tǒng),本人試驗(yàn)過,120幀的攝像頭在只打開攝像頭時(shí)可以達(dá)到100幀左右,設(shè)置的圖片分辨率越小,能達(dá)到的幀率越高 #include <linux/videodev2.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include 'opencv2/highgui/highgui.hpp' #include 'opencv2/imgproc/imgproc.hpp' #define CLEAR(x) memset(&(x), 0, sizeof(x)) #define WINDOW_NAME1 '【原始圖】' //為窗口標(biāo)題定義的宏 #define WINDOW_NAME2 '【圖像輪廓】' //為窗口標(biāo)題定義的宏 Mat g_srcImage; Mat g_grayImage; vector<vector<Point> > g_vContours; vector<Vec4i> g_vHierarchy; V4L2Capture(char *devName, int width, int height); int getFrame(void **,size_t *); V4L2Capture::V4L2Capture(char *devName, int width, int height) { // TODO Auto-generated constructor stub V4L2Capture::~V4L2Capture() { // TODO Auto-generated destructor stub int V4L2Capture::openDevice() { printf('video dev : %s\n', devName); fd_cam = open(devName, O_RDWR); perror('Can't open video device'); int V4L2Capture::closeDevice() { if ((ret = close(fd_cam)) < 0) { perror('Can't close video device'); int V4L2Capture::initDevice() { struct v4l2_capability cam_cap; //顯示設(shè)備信息 struct v4l2_cropcap cam_cropcap; //設(shè)置攝像頭的捕捉能力 struct v4l2_fmtdesc cam_fmtdesc; //查詢所有支持的格式:VIDIOC_ENUM_FMT struct v4l2_crop cam_crop; //圖像的縮放 struct v4l2_format cam_format; //設(shè)置攝像頭的視頻制式、幀格式等 /* 使用IOCTL命令VIDIOC_QUERYCAP,獲取攝像頭的基本信息*/ ret = ioctl(fd_cam, VIDIOC_QUERYCAP, &cam_cap); perror('Can't get device information: VIDIOCGCAP'); 'Driver Name:%s\nCard Name:%s\nBus info:%s\nDriver Version:%u.%u.%u\n', cam_cap.driver, cam_cap.card, cam_cap.bus_info, (cam_cap.version >> 16) & 0XFF, (cam_cap.version >> 8) & 0XFF, /* 使用IOCTL命令VIDIOC_ENUM_FMT,獲取攝像頭所有支持的格式*/ cam_fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; printf('Support format:\n'); while (ioctl(fd_cam, VIDIOC_ENUM_FMT, &cam_fmtdesc) != -1) { printf('\t%d.%s\n', cam_fmtdesc.index + 1, cam_fmtdesc.description); /* 使用IOCTL命令VIDIOC_CROPCAP,獲取攝像頭的捕捉能力*/ cam_cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (0 == ioctl(fd_cam, VIDIOC_CROPCAP, &cam_cropcap)) { printf('Default rec:\n\tleft:%d\n\ttop:%d\n\twidth:%d\n\theight:%d\n', cam_cropcap.defrect.left, cam_cropcap.defrect.top, cam_cropcap.defrect.width, cam_cropcap.defrect.height); /* 使用IOCTL命令VIDIOC_S_CROP,獲取攝像頭的窗口取景參數(shù)*/ cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; cam_crop.c = cam_cropcap.defrect; //默認(rèn)取景窗口大小 if (-1 == ioctl(fd_cam, VIDIOC_S_CROP, &cam_crop)) { //printf('Can't set crop para\n'); printf('Can't set cropcap para\n'); /* 使用IOCTL命令VIDIOC_S_FMT,設(shè)置攝像頭幀信息*/ cam_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; cam_format.fmt.pix.width = capW; cam_format.fmt.pix.height = capH; cam_format.fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG; //要和攝像頭支持的類型對應(yīng) cam_format.fmt.pix.field = V4L2_FIELD_INTERLACED; ret = ioctl(fd_cam, VIDIOC_S_FMT, &cam_format); perror('Can't set frame information'); /* 使用IOCTL命令VIDIOC_G_FMT,獲取攝像頭幀信息*/ cam_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = ioctl(fd_cam, VIDIOC_G_FMT, &cam_format); perror('Can't get frame information'); printf('Current data format information:\n\twidth:%d\n\theight:%d\n', cam_format.fmt.pix.width, cam_format.fmt.pix.height); perror('Buffers init error'); int V4L2Capture::initBuffers() { /* 使用IOCTL命令VIDIOC_REQBUFS,申請幀緩沖*/ struct v4l2_requestbuffers req; req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; req.memory = V4L2_MEMORY_MMAP; ret = ioctl(fd_cam, VIDIOC_REQBUFS, &req); perror('Request frame buffers failed'); perror('Request frame buffers while insufficient buffer memory'); buffers = (struct cam_buffer*) calloc(req.count, sizeof(*buffers)); for (n_buffers = 0; n_buffers < req.count; n_buffers++) { // 查詢序號為n_buffers 的緩沖區(qū),得到其起始物理地址和大小 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; ret = ioctl(fd_cam, VIDIOC_QUERYBUF, &buf); printf('VIDIOC_QUERYBUF %d failed\n', n_buffers); buffers[n_buffers].length = buf.length; //printf('buf.length= %d\n',buf.length); buffers[n_buffers].start = mmap( buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd_cam, if (MAP_FAILED == buffers[n_buffers].start) { printf('mmap buffer%d failed\n', n_buffers); int V4L2Capture::startCapture() { for (i = 0; i < n_buffers; i++) { buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; if (-1 == ioctl(fd_cam, VIDIOC_QBUF, &buf)) { printf('VIDIOC_QBUF buffer%d failed\n', i); type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (-1 == ioctl(fd_cam, VIDIOC_STREAMON, &type)) { printf('VIDIOC_STREAMON error'); int V4L2Capture::stopCapture() { type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (-1 == ioctl(fd_cam, VIDIOC_STREAMOFF, &type)) { printf('VIDIOC_STREAMOFF error\n'); int V4L2Capture::freeBuffers() { for (i = 0; i < n_buffers; ++i) { if (-1 == munmap(buffers[i].start, buffers[i].length)) { printf('munmap buffer%d failed\n', i); int V4L2Capture::getFrame(void **frame_buf, size_t* len) { struct v4l2_buffer queue_buf; queue_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; queue_buf.memory = V4L2_MEMORY_MMAP; if (-1 == ioctl(fd_cam, VIDIOC_DQBUF, &queue_buf)) { printf('VIDIOC_DQBUF error\n'); *frame_buf = buffers[queue_buf.index].start; *len = buffers[queue_buf.index].length; frameIndex = queue_buf.index; int V4L2Capture::backFrame() { struct v4l2_buffer queue_buf; queue_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; queue_buf.memory = V4L2_MEMORY_MMAP; queue_buf.index = frameIndex; if (-1 == ioctl(fd_cam, VIDIOC_QBUF, &queue_buf)) { printf('VIDIOC_QBUF error\n'); void V4L2Capture::test() { unsigned char *yuv422frame = NULL; unsigned long yuvframeSize = 0; string videoDev='/dev/video0'; V4L2Capture *vcap = new V4L2Capture(const_cast<char*>(videoDev.c_str()), vcap->getFrame((void **) &yuv422frame, (size_t *)&yuvframeSize); void line2(Point point3[100000], int n) float aa, bb, cc, dd, ee, ff, gg; aa += point3[jj].x*point3[jj].x; cc += point3[jj].x*point3[jj].y; ff = (n*cc - bb*dd) / ee; pointn.x = ((n-1) * ff + gg); Mat draw_ing2 = Mat::zeros(g_cannyMat_output.size(), CV_8UC3); line(draw_ing2, point0, pointn, (255, 255, 255)); //cout << '\n'<<ff <<' '<< gg << endl; float the =180*atan(ff)/3.14159; float dis = ff * 160+gg - 160; cout << the << ' ' << dis << endl; //正中心ff=0,gg=160,逆時(shí)ff為正,順時(shí)ff為負(fù) void findcolor(cv::Mat &image) cv::Mat_<cv::Vec3b>::iterator it = image.begin<cv::Vec3b>(); cv::Mat_<cv::Vec3b>::iterator itend = image.end<cv::Vec3b>(); cv::Mat srcX(image.rows, image.cols , CV_32F); cv::Mat srcY(image.rows, image.cols, CV_32F); for (int i = 0;i < image.rows;i++) for (int j = 0;j < image.cols;j++) if (flagg == 0)/*這樣遍歷水平方向無法得到有效數(shù)據(jù)*/ if ((*it)[0] == 255 && (*it)[1] == 0 && (*it)[2] == 255) if ((*it)[0] == 255 && (*it)[1] == 0 && (*it)[2] == 255) IplImage pImg = IplImage(image); CvArr* arr = (CvArr*)&pImg; point3[ii].x = (point1[ii].x + point2[ii].x) / 2; point3[ii].y = (point1[ii].y + point2[ii].y) / 2; //circle(image, point3[ii], 1, (255, 255, 255)); cvSet2D(arr, point3[ii].x, point3[ii].y, Scalar(255, 255, 255)); void on_ThreshChange(int, void* ) Canny( g_grayImage, g_cannyMat_output, g_nThresh, g_nThresh*2, 3 ); findContours( g_cannyMat_output, g_vContours, g_vHierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0) ); vector<Moments> mu(g_vContours.size() ); for(unsigned int i = 0; i < g_vContours.size(); i++ ) { mu[i] = moments( g_vContours[i], false ); } vector<Point2f> mc( g_vContours.size() ); for( unsigned int i = 0; i < g_vContours.size(); i++ ) { mc[i] = Point2f( static_cast<float>(mu[i].m10/mu[i].m00), static_cast<float>(mu[i].m01/mu[i].m00 )); } Mat drawing = Mat::zeros(g_cannyMat_output.size(), CV_8UC3); for( unsigned int i = 0; i< g_vContours.size(); i++ ) //Scalar color = Scalar( g_rng.uniform(0, 255), g_rng.uniform(0,255), g_rng.uniform(0,255) );//隨機(jī)生成顏色值 Scalar color = Scalar(255, 0, 255); drawContours( drawing, g_vContours, i, color, 2, 8, g_vHierarchy, 0, Point() );//繪制外層和內(nèi)層輪廓 circle( drawing, mc[i], 4, color, -1, 8, 0 );;//繪制圓 //line1(point1,point2,ii,iii); // namedWindow( WINDOW_NAME2, WINDOW_AUTOSIZE ); imshow( WINDOW_NAME2, drawing ); cv::Mat_<cv::Vec3b>::iterator it = image.begin<cv::Vec3b>(); cv::Mat_<cv::Vec3b>::iterator itend = image.end<cv::Vec3b>(); if ((*it)[1] == 0 && (*it)[2] >= 100)//條件可能需要改變 cout << '注意line1,避障'<<endl; cout << '注意line2,避障' << endl; void wave(const cv::Mat &image, cv::Mat &result) cv::Mat srcX(image.rows / 2, image.cols / 2, CV_32F); cv::Mat srcY(image.rows / 2, image.cols / 2, CV_32F); for (int i = 0;i<image.rows /2;i++) for (int j = 0;j < image.cols /2;j++) srcX.at<float>(i, j) = 2 * j; srcY.at<float>(i, j) = 2 * i; cv::remap(image, result, srcX, srcY, cv::INTER_LINEAR); unsigned char *yuv422frame = NULL; unsigned long yuvframeSize = 0; string videoDev = '/dev/video0'; V4L2Capture *vcap = new V4L2Capture(const_cast<char*>(videoDev.c_str()), 640, 480); cvNamedWindow('Capture',CV_WINDOW_AUTOSIZE); t = (double)cvGetTickCount(); vcap->getFrame((void **) &yuv422frame, (size_t *)&yuvframeSize); cvmat = cvMat(IMAGEHEIGHT,IMAGEWIDTH,CV_8UC3,(void*)yuv422frame); //CV_8UC3 img = cvDecodeImage(&cvmat,1); printf('DecodeImage error!\n'); cv::Mat g_srcImage = cv::cvarrToMat(img,true); cvShowImage('Capture',img); if((cvWaitKey(1)&255) == 27){ wave(g_srcImage, g_srcImage); // 把原圖像轉(zhuǎn)化成灰度圖像并進(jìn)行平滑 cvtColor(g_srcImage, g_grayImage, COLOR_BGR2GRAY); blur(g_grayImage, g_grayImage, Size(3, 3)); //創(chuàng)建滾動(dòng)條并進(jìn)行初始化 createTrackbar(' 閾值', WINDOW_NAME1, &g_nThresh, g_nMaxThresh, on_ThreshChange); t = (double)cvGetTickCount() - t; printf('Used time is %g ms\n', (t / (cvGetTickFrequency() * 1000))); if (number/ CLOCKS_PER_SEC>= 0.25)//windows10 for CLK_TCK
|