该方法只启动usb摄像头

import cv2
import numpy
import matplotlib.pyplot as plot class Camera:
cap = cv2.VideoCapture(0) @staticmethod
def getCamera():
ret, frame = Camera.cap.read()
return ret, frame @staticmethod
def getCap():
return Camera.cap def main():
camera = Camera()
while(1):
ret, frame = camera.getCamera() cv2.imshow("capture", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break camera.cap.release()
# cv2.destroyAllWindows() if __name__ == '__main__':
main()

C++ start onboard camera

#include <stdio.h>
#include <opencv2/opencv.hpp> using namespace cv;
using namespace std; int main(int argc, char** argv)
{
VideoCapture cap("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)1280, height=(int)720,format=(string)I420, framerate=(fraction)24/1 ! nvvidconv flip-method=2 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink");
if (!cap.isOpened())
{
cout << "Failed to open camera." << endl;
return -;
} for(;;)
{
Mat frame;
cap >> frame;
imshow("original", frame);
//waitKey(1);
if(waitKey() >= )
break;
}
return ;
}

C++ start usb camera

/*
Author:Jack-Cui
Blog:http://blog.csdn.net/c406495762
Time:25 May 2017
*/
#include <unistd.h>
#include <error.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <pthread.h>
#include <linux/videodev2.h>
#include <sys/mman.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <string.h> #include <iostream>
#include <iomanip>
#include <string> using namespace std; #define CLEAR(x) memset(&(x), 0, sizeof(x)) #define IMAGEWIDTH 3264
#define IMAGEHEIGHT 2448 class V4L2Capture {
public:
V4L2Capture(char *devName, int width, int height);
virtual ~V4L2Capture(); int openDevice();
int closeDevice();
int initDevice();
int startCapture();
int stopCapture();
int freeBuffers();
int getFrame(void **,size_t *);
int backFrame();
static void test(); private:
int initBuffers(); struct cam_buffer
{
void* start;
unsigned int length;
};
char *devName;
int capW;
int capH;
int fd_cam;
cam_buffer *buffers;
unsigned int n_buffers;
int frameIndex;
}; V4L2Capture::V4L2Capture(char *devName, int width, int height) {
// TODO Auto-generated constructor stub
this->devName = devName;
this->fd_cam = -;
this->buffers = NULL;
this->n_buffers = ;
this->frameIndex = -;
this->capW=width;
this->capH=height;
} V4L2Capture::~V4L2Capture() {
// TODO Auto-generated destructor stub
} int V4L2Capture::openDevice() {
/*设备的打开*/
printf("video dev : %s\n", devName);
fd_cam = open(devName, O_RDWR);
if (fd_cam < ) {
perror("Can't open video device");
}
return ;
} int V4L2Capture::closeDevice() {
if (fd_cam > ) {
int ret = ;
if ((ret = close(fd_cam)) < ) {
perror("Can't close video device");
}
return ;
} else {
return -;
}
} int V4L2Capture::initDevice() {
int ret;
struct v4l2_capability cam_cap; //显示设备信息
struct v4l2_cropcap cam_cropcap; //设置摄像头的捕捉能力
struct v4l2_fmtdesc cam_fmtdesc; //查询所有支持的格式:VIDIOC_ENUM_FMT
struct v4l2_crop cam_crop; //图像的缩放
struct v4l2_format cam_format; //设置摄像头的视频制式、帧格式等 /* 使用IOCTL命令VIDIOC_QUERYCAP,获取摄像头的基本信息*/
ret = ioctl(fd_cam, VIDIOC_QUERYCAP, &cam_cap);
if (ret < ) {
perror("Can't get device information: VIDIOCGCAP");
}
printf(
"Driver Name:%s\nCard Name:%s\nBus info:%s\nDriver Version:%u.%u.%u\n",
cam_cap.driver, cam_cap.card, cam_cap.bus_info,
(cam_cap.version >> ) & 0XFF, (cam_cap.version >> ) & 0XFF,
cam_cap.version & 0XFF); /* 使用IOCTL命令VIDIOC_ENUM_FMT,获取摄像头所有支持的格式*/
cam_fmtdesc.index = ;
cam_fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
printf("Support format:\n");
while (ioctl(fd_cam, VIDIOC_ENUM_FMT, &cam_fmtdesc) != -) {
printf("\t%d.%s\n", cam_fmtdesc.index + , cam_fmtdesc.description);
cam_fmtdesc.index++;
} /* 使用IOCTL命令VIDIOC_CROPCAP,获取摄像头的捕捉能力*/
cam_cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if ( == ioctl(fd_cam, VIDIOC_CROPCAP, &cam_cropcap)) {
printf("Default rec:\n\tleft:%d\n\ttop:%d\n\twidth:%d\n\theight:%d\n",
cam_cropcap.defrect.left, cam_cropcap.defrect.top,
cam_cropcap.defrect.width, cam_cropcap.defrect.height);
/* 使用IOCTL命令VIDIOC_S_CROP,获取摄像头的窗口取景参数*/
cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
cam_crop.c = cam_cropcap.defrect; //默认取景窗口大小
if (- == ioctl(fd_cam, VIDIOC_S_CROP, &cam_crop)) {
//printf("Can't set crop para\n");
}
} else {
printf("Can't set cropcap para\n");
} /* 使用IOCTL命令VIDIOC_S_FMT,设置摄像头帧信息*/
cam_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
cam_format.fmt.pix.width = capW;
cam_format.fmt.pix.height = capH;
cam_format.fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG; //要和摄像头支持的类型对应
cam_format.fmt.pix.field = V4L2_FIELD_INTERLACED;
ret = ioctl(fd_cam, VIDIOC_S_FMT, &cam_format);
if (ret < ) {
perror("Can't set frame information");
}
/* 使用IOCTL命令VIDIOC_G_FMT,获取摄像头帧信息*/
cam_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = ioctl(fd_cam, VIDIOC_G_FMT, &cam_format);
if (ret < ) {
perror("Can't get frame information");
}
printf("Current data format information:\n\twidth:%d\n\theight:%d\n",
cam_format.fmt.pix.width, cam_format.fmt.pix.height);
ret = initBuffers();
if (ret < ) {
perror("Buffers init error");
//exit(-1);
}
return ;
} int V4L2Capture::initBuffers() {
int ret;
/* 使用IOCTL命令VIDIOC_REQBUFS,申请帧缓冲*/
struct v4l2_requestbuffers req;
CLEAR(req);
req.count = ;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
ret = ioctl(fd_cam, VIDIOC_REQBUFS, &req);
if (ret < ) {
perror("Request frame buffers failed");
}
if (req.count < ) {
perror("Request frame buffers while insufficient buffer memory");
}
buffers = (struct cam_buffer*) calloc(req.count, sizeof(*buffers));
if (!buffers) {
perror("Out of memory");
}
for (n_buffers = ; n_buffers < req.count; n_buffers++) {
struct v4l2_buffer buf;
CLEAR(buf);
// 查询序号为n_buffers 的缓冲区,得到其起始物理地址和大小
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = n_buffers;
ret = ioctl(fd_cam, VIDIOC_QUERYBUF, &buf);
if (ret < ) {
printf("VIDIOC_QUERYBUF %d failed\n", n_buffers);
return -;
}
buffers[n_buffers].length = buf.length;
//printf("buf.length= %d\n",buf.length);
// 映射内存
buffers[n_buffers].start = mmap(
NULL, // start anywhere
buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd_cam,
buf.m.offset);
if (MAP_FAILED == buffers[n_buffers].start) {
printf("mmap buffer%d failed\n", n_buffers);
return -;
}
}
return ;
} int V4L2Capture::startCapture() {
unsigned int i;
for (i = ; i < n_buffers; i++) {
struct v4l2_buffer buf;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (- == ioctl(fd_cam, VIDIOC_QBUF, &buf)) {
printf("VIDIOC_QBUF buffer%d failed\n", i);
return -;
}
}
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (- == ioctl(fd_cam, VIDIOC_STREAMON, &type)) {
printf("VIDIOC_STREAMON error");
return -;
}
return ;
} int V4L2Capture::stopCapture() {
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (- == ioctl(fd_cam, VIDIOC_STREAMOFF, &type)) {
printf("VIDIOC_STREAMOFF error\n");
return -;
}
return ;
} int V4L2Capture::freeBuffers() {
unsigned int i;
for (i = ; i < n_buffers; ++i) {
if (- == munmap(buffers[i].start, buffers[i].length)) {
printf("munmap buffer%d failed\n", i);
return -;
}
}
free(buffers);
return ;
} int V4L2Capture::getFrame(void **frame_buf, size_t* len) {
struct v4l2_buffer queue_buf;
CLEAR(queue_buf);
queue_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
queue_buf.memory = V4L2_MEMORY_MMAP;
if (- == ioctl(fd_cam, VIDIOC_DQBUF, &queue_buf)) {
printf("VIDIOC_DQBUF error\n");
return -;
}
*frame_buf = buffers[queue_buf.index].start;
*len = buffers[queue_buf.index].length;
frameIndex = queue_buf.index;
return ;
} int V4L2Capture::backFrame() {
if (frameIndex != -) {
struct v4l2_buffer queue_buf;
CLEAR(queue_buf);
queue_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
queue_buf.memory = V4L2_MEMORY_MMAP;
queue_buf.index = frameIndex;
if (- == ioctl(fd_cam, VIDIOC_QBUF, &queue_buf)) {
printf("VIDIOC_QBUF error\n");
return -;
}
return ;
}
return -;
} void V4L2Capture::test() {
unsigned char *yuv422frame = NULL;
unsigned long yuvframeSize = ; string videoDev="/dev/video0";
V4L2Capture *vcap = new V4L2Capture(const_cast<char*>(videoDev.c_str()),
, );
vcap->openDevice();
vcap->initDevice();
vcap->startCapture();
vcap->getFrame((void **) &yuv422frame, (size_t *)&yuvframeSize); vcap->backFrame();
vcap->freeBuffers();
vcap->closeDevice();
} void VideoPlayer() {
unsigned char *yuv422frame = NULL;
unsigned long yuvframeSize = ; string videoDev = "/dev/video0";
V4L2Capture *vcap = new V4L2Capture(const_cast<char*>(videoDev.c_str()), , );
vcap->openDevice();
vcap->initDevice();
vcap->startCapture(); cvNamedWindow("Capture",CV_WINDOW_AUTOSIZE);
IplImage* img;
CvMat cvmat;
double t;
while(){
t = (double)cvGetTickCount();
vcap->getFrame((void **) &yuv422frame, (size_t *)&yuvframeSize);
cvmat = cvMat(IMAGEHEIGHT,IMAGEWIDTH,CV_8UC3,(void*)yuv422frame); //CV_8UC3 //解码
img = cvDecodeImage(&cvmat,);
if(!img){
printf("DecodeImage error!\n");
} cvShowImage("Capture",img);
cvReleaseImage(&img); vcap->backFrame();
if((cvWaitKey()&) == ){
exit();
}
t = (double)cvGetTickCount() - t;
printf("Used time is %g ms\n",( t / (cvGetTickFrequency()*)));
}
vcap->stopCapture();
vcap->freeBuffers();
vcap->closeDevice(); } int main() {
VideoPlayer();
return ;
}

Jetson TX1使用usb camera采集图像 (2)的更多相关文章

  1. Jetson TX1使用usb camera采集图像 (1)

    使用python实现 https://jkjung-avt.github.io/tx2-camera-with-python/ How to Capture and Display Camera Vi ...

  2. Camera 采集图像的方法

    使用 Camera 采集图像, 实现步骤如下: 需要权限: android.permission.CAMERA android.permission.WRITE_EXTERNAL_STORAGE // ...

  3. 【Xilinx-Petalinux学习】-06-OpenCV通过USB摄像头采集图像。

    占位, 实现USB摄像头的图像采集与保存

  4. 基于英伟达Jetson TX1的GPU处理平台

    基于英伟达Jetson TX1 GPU的HDMI图像输入的深度学习套件 [309] 本平台基于英伟达的Jetson TX1视觉计算的全功能开发板,配合本公司研发的HDMI输入图像采集板:Jetson ...

  5. camera按键采集图像及waitKey的用法(转)

    源: camera按键采集图像及waitKey的用法

  6. camera按键采集图像及waitKey的用法

    前言 项目需要通过摄像头采集图像并保存,主要是用于后续的摄像头标定.实现过程其实很简单,需要注意一些细节. 系统环境 系统版本:ubuntu16.04:opencv版本:opencv2.4.13:编程 ...

  7. [转]Jetson TX1 开发教程(1)配置与刷机

    开箱 Jetson TX1是英伟达公司新出的GPU开发板,拥有世界上先进的嵌入式视觉计算系统,提供高性能.新技术和极佳的开发平台.在进行配置和刷机工作之前,先来一张全家福: 可以看到,Jetson T ...

  8. 【并行计算-CUDA开发】 NVIDIA Jetson TX1

    概述 NVIDIA Jetson TX1是计算机视觉系统的SoM(system-on-module)解决方案.它组合了最新的NVIDIAMaxwell GPU架构,其具有ARM Cortex-A57 ...

  9. ffmpeg从USB摄像头采集一张原始图片(转)

    本文讲解使用ffmpeg从USB摄像头中采集一帧数据并写入文件保存,测试平台使用全志A20平台,其他平台修改交叉工具链即可移植.开发环境使用eclipse+CDT.交叉工具链使用arm-Linux-g ...

随机推荐

  1. spring boot 集成 zookeeper 搭建微服务架构

    PRC原理 RPC 远程过程调用(Remote Procedure Call) 一般用来实现部署在不同机器上的系统之间的方法调用,使得程序能够像访问本地系统资源一样,通过网络传输去访问远程系统资源,R ...

  2. Asp.Net Core 轻松学-HttpClient的演进和避坑

    前言     在 Asp.Net Core 1.0 时代,由于设计上的问题, HttpClient 给开发者带来了无尽的困扰,用 Asp.Net Core 开发团队的话来说就是:我们注意到,HttpC ...

  3. windows系统dokuwiki安装部署设置 xampp环境配置

    简单记录一次安装dokuwiki的过程 dokuwiki下载 dokuwiki下载地址 https://download.dokuwiki.org/ 下载前有一些可选项目,版本.语言.插件,可以按照需 ...

  4. SLAM+语音机器人DIY系列:(三)感知与大脑——3.轮式里程计与运动控制

    摘要 在我的想象中机器人首先应该能自由的走来走去,然后应该能流利的与主人对话.朝着这个理想,我准备设计一个能自由行走,并且可以与人语音对话的机器人.实现的关键是让机器人能通过传感器感知周围环境,并通过 ...

  5. Flutter 即学即用系列博客——10 混淆

    前言 之前的博客我们都是在 debug 的模式下进行开发的. 实际发布到市场或者给到用户的都是 release 包. 而对于 Android 来说,release 包一个重要的步骤就是混淆. Andr ...

  6. DataTable增加行

  7. servlet与jsp篇(一)$.ajax交互

    servlet其实是利用java类编写的服务器端应用程序,他的生命周期可以分为三个阶段:初始化阶段.运行阶段和消亡阶段; jsp页面实质上是一个HTML页面,但他包含了用户产生动态网页内容的java代 ...

  8. Android探究之View的绘制流程

    Android中Activity是作为应用程序的载体存在,代表着一个完整的用户界面,提供了一个窗口来绘制各种视图,当Activity启动时,我们会通过setContentView方法来设置一个内容视图 ...

  9. 【HANA系列】SAP HANA XS使用JavaScript编程详解

    公众号:SAP Technical 本文作者:matinal 原文出处:http://www.cnblogs.com/SAPmatinal/ 原文链接:[HANA系列]SAP HANA XS使用Jav ...

  10. 深入Node之初识

    0前言 陆续的用Node已经一年多了,已经用node写了几个的项目,也该是总结node学习的过程了 1.Node是啥? Node.js是一使用JavaScript作为开发语言,运行在服务器端的Web服 ...