该方法只启动usb摄像头

import cv2
import numpy
import matplotlib.pyplot as plot class Camera:
cap = cv2.VideoCapture(0) @staticmethod
def getCamera():
ret, frame = Camera.cap.read()
return ret, frame @staticmethod
def getCap():
return Camera.cap def main():
camera = Camera()
while(1):
ret, frame = camera.getCamera() cv2.imshow("capture", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break camera.cap.release()
# cv2.destroyAllWindows() if __name__ == '__main__':
main()

C++ start onboard camera

#include <stdio.h>
#include <opencv2/opencv.hpp> using namespace cv;
using namespace std; int main(int argc, char** argv)
{
VideoCapture cap("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)1280, height=(int)720,format=(string)I420, framerate=(fraction)24/1 ! nvvidconv flip-method=2 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink");
if (!cap.isOpened())
{
cout << "Failed to open camera." << endl;
return -;
} for(;;)
{
Mat frame;
cap >> frame;
imshow("original", frame);
//waitKey(1);
if(waitKey() >= )
break;
}
return ;
}

C++ start usb camera

/*
Author:Jack-Cui
Blog:http://blog.csdn.net/c406495762
Time:25 May 2017
*/
#include <unistd.h>
#include <error.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <pthread.h>
#include <linux/videodev2.h>
#include <sys/mman.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <string.h> #include <iostream>
#include <iomanip>
#include <string> using namespace std; #define CLEAR(x) memset(&(x), 0, sizeof(x)) #define IMAGEWIDTH 3264
#define IMAGEHEIGHT 2448 class V4L2Capture {
public:
V4L2Capture(char *devName, int width, int height);
virtual ~V4L2Capture(); int openDevice();
int closeDevice();
int initDevice();
int startCapture();
int stopCapture();
int freeBuffers();
int getFrame(void **,size_t *);
int backFrame();
static void test(); private:
int initBuffers(); struct cam_buffer
{
void* start;
unsigned int length;
};
char *devName;
int capW;
int capH;
int fd_cam;
cam_buffer *buffers;
unsigned int n_buffers;
int frameIndex;
}; V4L2Capture::V4L2Capture(char *devName, int width, int height) {
// TODO Auto-generated constructor stub
this->devName = devName;
this->fd_cam = -;
this->buffers = NULL;
this->n_buffers = ;
this->frameIndex = -;
this->capW=width;
this->capH=height;
} V4L2Capture::~V4L2Capture() {
// TODO Auto-generated destructor stub
} int V4L2Capture::openDevice() {
/*设备的打开*/
printf("video dev : %s\n", devName);
fd_cam = open(devName, O_RDWR);
if (fd_cam < ) {
perror("Can't open video device");
}
return ;
} int V4L2Capture::closeDevice() {
if (fd_cam > ) {
int ret = ;
if ((ret = close(fd_cam)) < ) {
perror("Can't close video device");
}
return ;
} else {
return -;
}
} int V4L2Capture::initDevice() {
int ret;
struct v4l2_capability cam_cap; //显示设备信息
struct v4l2_cropcap cam_cropcap; //设置摄像头的捕捉能力
struct v4l2_fmtdesc cam_fmtdesc; //查询所有支持的格式:VIDIOC_ENUM_FMT
struct v4l2_crop cam_crop; //图像的缩放
struct v4l2_format cam_format; //设置摄像头的视频制式、帧格式等 /* 使用IOCTL命令VIDIOC_QUERYCAP,获取摄像头的基本信息*/
ret = ioctl(fd_cam, VIDIOC_QUERYCAP, &cam_cap);
if (ret < ) {
perror("Can't get device information: VIDIOCGCAP");
}
printf(
"Driver Name:%s\nCard Name:%s\nBus info:%s\nDriver Version:%u.%u.%u\n",
cam_cap.driver, cam_cap.card, cam_cap.bus_info,
(cam_cap.version >> ) & 0XFF, (cam_cap.version >> ) & 0XFF,
cam_cap.version & 0XFF); /* 使用IOCTL命令VIDIOC_ENUM_FMT,获取摄像头所有支持的格式*/
cam_fmtdesc.index = ;
cam_fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
printf("Support format:\n");
while (ioctl(fd_cam, VIDIOC_ENUM_FMT, &cam_fmtdesc) != -) {
printf("\t%d.%s\n", cam_fmtdesc.index + , cam_fmtdesc.description);
cam_fmtdesc.index++;
} /* 使用IOCTL命令VIDIOC_CROPCAP,获取摄像头的捕捉能力*/
cam_cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if ( == ioctl(fd_cam, VIDIOC_CROPCAP, &cam_cropcap)) {
printf("Default rec:\n\tleft:%d\n\ttop:%d\n\twidth:%d\n\theight:%d\n",
cam_cropcap.defrect.left, cam_cropcap.defrect.top,
cam_cropcap.defrect.width, cam_cropcap.defrect.height);
/* 使用IOCTL命令VIDIOC_S_CROP,获取摄像头的窗口取景参数*/
cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
cam_crop.c = cam_cropcap.defrect; //默认取景窗口大小
if (- == ioctl(fd_cam, VIDIOC_S_CROP, &cam_crop)) {
//printf("Can't set crop para\n");
}
} else {
printf("Can't set cropcap para\n");
} /* 使用IOCTL命令VIDIOC_S_FMT,设置摄像头帧信息*/
cam_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
cam_format.fmt.pix.width = capW;
cam_format.fmt.pix.height = capH;
cam_format.fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG; //要和摄像头支持的类型对应
cam_format.fmt.pix.field = V4L2_FIELD_INTERLACED;
ret = ioctl(fd_cam, VIDIOC_S_FMT, &cam_format);
if (ret < ) {
perror("Can't set frame information");
}
/* 使用IOCTL命令VIDIOC_G_FMT,获取摄像头帧信息*/
cam_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = ioctl(fd_cam, VIDIOC_G_FMT, &cam_format);
if (ret < ) {
perror("Can't get frame information");
}
printf("Current data format information:\n\twidth:%d\n\theight:%d\n",
cam_format.fmt.pix.width, cam_format.fmt.pix.height);
ret = initBuffers();
if (ret < ) {
perror("Buffers init error");
//exit(-1);
}
return ;
} int V4L2Capture::initBuffers() {
int ret;
/* 使用IOCTL命令VIDIOC_REQBUFS,申请帧缓冲*/
struct v4l2_requestbuffers req;
CLEAR(req);
req.count = ;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
ret = ioctl(fd_cam, VIDIOC_REQBUFS, &req);
if (ret < ) {
perror("Request frame buffers failed");
}
if (req.count < ) {
perror("Request frame buffers while insufficient buffer memory");
}
buffers = (struct cam_buffer*) calloc(req.count, sizeof(*buffers));
if (!buffers) {
perror("Out of memory");
}
for (n_buffers = ; n_buffers < req.count; n_buffers++) {
struct v4l2_buffer buf;
CLEAR(buf);
// 查询序号为n_buffers 的缓冲区,得到其起始物理地址和大小
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = n_buffers;
ret = ioctl(fd_cam, VIDIOC_QUERYBUF, &buf);
if (ret < ) {
printf("VIDIOC_QUERYBUF %d failed\n", n_buffers);
return -;
}
buffers[n_buffers].length = buf.length;
//printf("buf.length= %d\n",buf.length);
// 映射内存
buffers[n_buffers].start = mmap(
NULL, // start anywhere
buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd_cam,
buf.m.offset);
if (MAP_FAILED == buffers[n_buffers].start) {
printf("mmap buffer%d failed\n", n_buffers);
return -;
}
}
return ;
} int V4L2Capture::startCapture() {
unsigned int i;
for (i = ; i < n_buffers; i++) {
struct v4l2_buffer buf;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (- == ioctl(fd_cam, VIDIOC_QBUF, &buf)) {
printf("VIDIOC_QBUF buffer%d failed\n", i);
return -;
}
}
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (- == ioctl(fd_cam, VIDIOC_STREAMON, &type)) {
printf("VIDIOC_STREAMON error");
return -;
}
return ;
} int V4L2Capture::stopCapture() {
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (- == ioctl(fd_cam, VIDIOC_STREAMOFF, &type)) {
printf("VIDIOC_STREAMOFF error\n");
return -;
}
return ;
} int V4L2Capture::freeBuffers() {
unsigned int i;
for (i = ; i < n_buffers; ++i) {
if (- == munmap(buffers[i].start, buffers[i].length)) {
printf("munmap buffer%d failed\n", i);
return -;
}
}
free(buffers);
return ;
} int V4L2Capture::getFrame(void **frame_buf, size_t* len) {
struct v4l2_buffer queue_buf;
CLEAR(queue_buf);
queue_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
queue_buf.memory = V4L2_MEMORY_MMAP;
if (- == ioctl(fd_cam, VIDIOC_DQBUF, &queue_buf)) {
printf("VIDIOC_DQBUF error\n");
return -;
}
*frame_buf = buffers[queue_buf.index].start;
*len = buffers[queue_buf.index].length;
frameIndex = queue_buf.index;
return ;
} int V4L2Capture::backFrame() {
if (frameIndex != -) {
struct v4l2_buffer queue_buf;
CLEAR(queue_buf);
queue_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
queue_buf.memory = V4L2_MEMORY_MMAP;
queue_buf.index = frameIndex;
if (- == ioctl(fd_cam, VIDIOC_QBUF, &queue_buf)) {
printf("VIDIOC_QBUF error\n");
return -;
}
return ;
}
return -;
} void V4L2Capture::test() {
unsigned char *yuv422frame = NULL;
unsigned long yuvframeSize = ; string videoDev="/dev/video0";
V4L2Capture *vcap = new V4L2Capture(const_cast<char*>(videoDev.c_str()),
, );
vcap->openDevice();
vcap->initDevice();
vcap->startCapture();
vcap->getFrame((void **) &yuv422frame, (size_t *)&yuvframeSize); vcap->backFrame();
vcap->freeBuffers();
vcap->closeDevice();
} void VideoPlayer() {
unsigned char *yuv422frame = NULL;
unsigned long yuvframeSize = ; string videoDev = "/dev/video0";
V4L2Capture *vcap = new V4L2Capture(const_cast<char*>(videoDev.c_str()), , );
vcap->openDevice();
vcap->initDevice();
vcap->startCapture(); cvNamedWindow("Capture",CV_WINDOW_AUTOSIZE);
IplImage* img;
CvMat cvmat;
double t;
while(){
t = (double)cvGetTickCount();
vcap->getFrame((void **) &yuv422frame, (size_t *)&yuvframeSize);
cvmat = cvMat(IMAGEHEIGHT,IMAGEWIDTH,CV_8UC3,(void*)yuv422frame); //CV_8UC3 //解码
img = cvDecodeImage(&cvmat,);
if(!img){
printf("DecodeImage error!\n");
} cvShowImage("Capture",img);
cvReleaseImage(&img); vcap->backFrame();
if((cvWaitKey()&) == ){
exit();
}
t = (double)cvGetTickCount() - t;
printf("Used time is %g ms\n",( t / (cvGetTickFrequency()*)));
}
vcap->stopCapture();
vcap->freeBuffers();
vcap->closeDevice(); } int main() {
VideoPlayer();
return ;
}

Jetson TX1使用usb camera采集图像 (2)的更多相关文章

  1. Jetson TX1使用usb camera采集图像 (1)

    使用python实现 https://jkjung-avt.github.io/tx2-camera-with-python/ How to Capture and Display Camera Vi ...

  2. Camera 采集图像的方法

    使用 Camera 采集图像, 实现步骤如下: 需要权限: android.permission.CAMERA android.permission.WRITE_EXTERNAL_STORAGE // ...

  3. 【Xilinx-Petalinux学习】-06-OpenCV通过USB摄像头采集图像。

    占位, 实现USB摄像头的图像采集与保存

  4. 基于英伟达Jetson TX1的GPU处理平台

    基于英伟达Jetson TX1 GPU的HDMI图像输入的深度学习套件 [309] 本平台基于英伟达的Jetson TX1视觉计算的全功能开发板,配合本公司研发的HDMI输入图像采集板:Jetson ...

  5. camera按键采集图像及waitKey的用法(转)

    源: camera按键采集图像及waitKey的用法

  6. camera按键采集图像及waitKey的用法

    前言 项目需要通过摄像头采集图像并保存,主要是用于后续的摄像头标定.实现过程其实很简单,需要注意一些细节. 系统环境 系统版本:ubuntu16.04:opencv版本:opencv2.4.13:编程 ...

  7. [转]Jetson TX1 开发教程(1)配置与刷机

    开箱 Jetson TX1是英伟达公司新出的GPU开发板,拥有世界上先进的嵌入式视觉计算系统,提供高性能.新技术和极佳的开发平台.在进行配置和刷机工作之前,先来一张全家福: 可以看到,Jetson T ...

  8. 【并行计算-CUDA开发】 NVIDIA Jetson TX1

    概述 NVIDIA Jetson TX1是计算机视觉系统的SoM(system-on-module)解决方案.它组合了最新的NVIDIAMaxwell GPU架构,其具有ARM Cortex-A57 ...

  9. ffmpeg从USB摄像头采集一张原始图片(转)

    本文讲解使用ffmpeg从USB摄像头中采集一帧数据并写入文件保存,测试平台使用全志A20平台,其他平台修改交叉工具链即可移植.开发环境使用eclipse+CDT.交叉工具链使用arm-Linux-g ...

随机推荐

  1. 2.2Bind建立配置文件和实体的映射「深入浅出ASP.NET Core系列」

    希望给你3-5分钟的碎片化学习,可能是坐地铁.等公交,积少成多,水滴石穿,谢谢关注. 新建MVC项目 这次我们没有使用控制台项目,而是使用mvc来测试. 如下图所示,选择空的项目,建完后,记得把项目设 ...

  2. electron开发客户端注意事项(兼开源个人知识管理工具“想学吗”)

    窗口间通信的问题 electron窗口通信比nwjs要麻烦的多 electron分主进程和渲染进程,渲染进程又分主窗口的渲染进程和子窗口的渲染进程 主窗口的渲染进程给子窗口的渲染进程发消息 subWi ...

  3. 贝塞尔曲线控件 for .NET (EN)

    Conmajia 2012 Updated on Feb. 18, 2018 In Photoshop, there is a very powerful feature Curve Adjust, ...

  4. Convert.ToInt32()和int.Parse()区别

    Convert.ToInt32()和int.Parse()都可以数据转换个int类型,区别在于: 1. Convert.ToInt32()将object类类型转换成int类型,例如:Convert.T ...

  5. oracle学习笔记(七) 预编译Statement介绍与使用

    预编译Statement优点 执行效率高 由于预编译语句使用占位符 "?",在执行SQL之前语句会被先发送到Oracle服务器进行语法检查和编译等工作,并将SQL语句加入到Orac ...

  6. Redis的值value(数据结构类型)

    Redis的数据结构类型,指的是redis的值的value类型: Redis的常用数据结构类型:string,list,set,sortedSet,hash 一.sting的类型 string类型是r ...

  7. 面试常问的几个排序和查找算法,PHP实现

    冒泡,快排,二分查找,都是面试常问的几个算法题目,虽然简单,但是一段时间不用的话就很容易忘记,这里我用PHP实现了一下,温故而知新. 排序 冒泡排序 每一次冒出一个最大的值 function bubb ...

  8. 中介者模式 调停者 Mediator 行为型 设计模式(二十一)

      中介者模式(Mediator)   调度.调停   意图 用一个中介对象(中介者)来封装一系列的对象交互,中介者使各对象不需要显式地相互引用,从而使其耦合松散 而且可以独立地改变它们之间的交互. ...

  9. 获取url特定参数

    获取通过url拼接的特定参数值: // 获取url指定参数 function getUrlParams(name) { var reg = new RegExp("(^|&)&quo ...

  10. 轨迹系列4——WebGIS中使用ZRender实现轨迹前端动态播放特效

    文章版权由作者李晓晖和博客园共有,若转载请于明显处标明出处:http://www.cnblogs.com/naaoveGIS/ 1.背景 项目中需要在地图上以时间轴方式播放人员.车辆在地图上的历史行进 ...