usb camera
谨以此文纪念过往的岁月。
一.前言
usb camera会涉及两个子系统,一个usb系统,另一个video系统。本文主要是记录gspca的源码的学习,同时学习usb的isoc传输机制和video的系统。下面就是根据gspca的注册以及运行的流程来看。从linux2.6.26后,gspca的源码被添加进linux源码中,其将设备和gspca分开为两个文件,一个是设备单独的初始化文件,一个则为gspca的公共文件。本文主要是根据SN9C110和OV7648来学习usb camera的,其主要函数在sonixj.c和gspca.c中。学习这些程序并不是我们以后再usb camera的驱动中就必须按照其格式来写,其主要是借鉴其编程的方法来实现自己独自的usb camera程序设计。在学习usb camera的驱动中同时会学习usb bulk以及usb isoc驱动的编程方法以及视频驱动的编写方法。
二.v4l2子系统
usb camera主要由usb子系统和v4l2子系统构成,本文主要是讲述v4l2的应用,同时在其中掺插usb的内容。
2.1video_device结构体
通常一个video设备会通过video_device_alloc来分配内存空间,然后对其赋值。
struct video_device *vdev = video_device_alloc();
以gspca_template为例
static struct video_device gspca_template = {
.name = "gspca main driver", --video的name出现在sysfs和kernel log中
.fops = &dev_fops, --用于cdev,这个和普通的cdev的fops没有区别
.ioctl_ops = &dev_ioctl_ops, --用于ioctl
.release = video_device_release, --释放video_device
.minor = -1, --表明自动分配minor
};
可以采用memcpy将gspca_template的值赋给vdev。
2.2video_device注册
注册v4l2设备
int video_register_device(struct video_device *vfd, int type, int nr)
{
return video_register_device_index(vfd, type, nr, -1); --注册成功返回0
}
vfd : 想注册的video_device结构体
type : video_device类型
VFL_TYPE_GRABBER - A frame grabber
VFL_TYPE_VTX - A teletext device
VFL_TYPE_VBI - Vertical blank data (undecoded)
VFL_TYPE_RADIO - A radio card
nr :设备号(0 == /dev/video0, 1 == /dev/video1, ... -1 == first free)
int video_register_device_index(struct video_device *vfd, int type, int nr,int index)
index : 基于父设备的流号,-1代表自动分配
2.3 cdev的fops
video_device注册后,那就来看其video_device结构中的几个成员
其中主要一个是:static struct file_operations *fops;还是以gspca为例
static struct file_operations dev_fops = {
.owner = THIS_MODULE,
.open = dev_open,
.release = dev_close,
.read = dev_read,
.mmap = dev_mmap,
.ioctl = video_ioctl2,
.llseek = no_llseek,
.poll = dev_poll,
};
在probe时调用了video_set_drvdata(gspca_dev->vdev, gspca_dev);
设备打开
static int dev_open(struct inode *inode, struct file *file)
{
struct gspca_dev *gspca_dev;
int ret;
gspca_dev = video_drvdata(file);
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
if (!gspca_dev->present) {
ret = -ENODEV;
goto out;
}
if (gspca_dev->users > 4) { --用户最多为4
ret = -EBUSY;
goto out;
}
if (!try_module_get(gspca_dev->module)) {
ret = -ENODEV;
goto out;
}
gspca_dev->users++;
kref_get(&gspca_dev->kref); --多用户
file->private_data = gspca_dev;
ret = 0;
out:
mutex_unlock(&gspca_dev->queue_lock);
if (ret != 0)
PDEBUG(D_ERR|D_STREAM, "open failed err %d", ret);
else
PDEBUG(D_STREAM, "open done");
return ret;
}
open设备并没有真正打开什么,仅仅是对一些计数器进行初始值,由于v4l2支持多用户打开,所以对于用户的信息保存很重要。
static int dev_close(struct inode *inode, struct file *file)
{
struct gspca_dev *gspca_dev = file->private_data;
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
gspca_dev->users--;
if (gspca_dev->capt_file == file) { --如果文件是捕获,释放流资源
if (gspca_dev->streaming) {
mutex_lock(&gspca_dev->usb_lock);
gspca_stream_off(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
}
frame_free(gspca_dev); --释放当前设备所开辟的内存空间
gspca_dev->capt_file = NULL;
gspca_dev->memory = GSPCA_MEMORY_NO;
}
file->private_data = NULL;
module_put(gspca_dev->module);
mutex_unlock(&gspca_dev->queue_lock);
kref_put(&gspca_dev->kref, gspca_delete);
return 0;
}
设备读
static ssize_t dev_read(struct file *file, char __user *data,size_t count, loff_t *ppos)
{
struct gspca_dev *gspca_dev = file->private_data;
struct gspca_frame *frame;
struct v4l2_buffer v4l2_buf;
struct timeval timestamp;
int n, ret, ret2;
switch (gspca_dev->memory) {
case GSPCA_MEMORY_NO: --第一次读,需要开辟空间,其主要原因在于设备open的时候并没有去申请资源。
ret = read_alloc(gspca_dev, file); --申请开辟资源
if (ret != 0)
return ret;
break;
case GSPCA_MEMORY_READ: --内存读,保存当前file 信息。这是最慢的一种,通过copy_to_usr来实现。
if (gspca_dev->capt_file == file)
break;
/* fall thru */
default:
return -EINVAL;
}
--获取一帧数据
jiffies_to_timeval(get_jiffies_64(), ×tamp);
timestamp.tv_sec--;
n = 2;
for (;;) {
memset(&v4l2_buf, 0, sizeof v4l2_buf);
v4l2_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
v4l2_buf.memory = GSPCA_MEMORY_READ;
ret = vidioc_dqbuf(file, gspca_dev, &v4l2_buf); --把缓冲区放回缓存队列
if (ret != 0) {
return ret;
}
frame = &gspca_dev->frame[v4l2_buf.index];
if (--n < 0) --避免无限循环
break;
if (frame->v4l2_buf.timestamp.tv_sec >= timestamp.tv_sec) --
break;
ret = vidioc_qbuf(file, gspca_dev, &v4l2_buf); --从缓冲队列中取出缓冲区。
if (ret != 0){
return ret;
}
}
if (count > frame->v4l2_buf.bytesused) --复制一帧数据
count = frame->v4l2_buf.bytesused;
ret = copy_to_user(data, frame->data, count);
if (ret != 0) {
ret = -EFAULT;
goto out;
}
ret = count;
out:
ret2 = vidioc_qbuf(file, gspca_dev, &v4l2_buf); --这段不知道为什么?
if (ret2 != 0)
return ret2;
return ret;
}
非阻塞I/O,其主要是该进程不会阻塞,而在上层被阻塞。关于如何使用的以后再看,在这里就不细究了
static unsigned int dev_poll(struct file *file, poll_table *wait);
内存映射,关于如何映射的以前学过,这里就不管了。
static int dev_mmap(struct file *file, struct vm_area_struct *vma);
2.4 ioctl
对于v4l2设备而言,ioctl很重要,几乎所有的操作都可以通过ioctl来完成,那来看几个比较重要的命令。
video_ioctl2-> __video_do_ioctl
还是以gspca为例
2.4.1 VIDIOC_QUERYCAP
VIDIOC_QUERYCAP 查询驱动功能,其最终调用函数为vidioc_querycap:
struct v4l2_capability {
__u8 driver[16]; --驱动信息
__u8 card[32]; --设备信息
__u8 bus_info[32]; --所依附的总线,如usb camera 则为usb
__u32 version; --驱动的版本号
__u32 capabilities; --属性
__u32 reserved[4]; --保留
};
static int vidioc_querycap(struct file *file, void *priv,struct v4l2_capability *cap)
{
struct gspca_dev *gspca_dev = priv;
memset(cap, 0, sizeof *cap);
strncpy(cap->driver, gspca_dev->sd_desc->name, sizeof cap->driver);
if (gspca_dev->dev->product != NULL) {
strncpy(cap->card, gspca_dev->dev->product,sizeof cap->card);
} else {
snprintf(cap->card, sizeof cap->card,"USB Camera (%04x:%04x)",le16_to_cpu(gspca_dev->dev->descriptor.idVendor),le16_to_cpu(gspca_dev->dev->descriptor.idProduct));
}
strncpy(cap->bus_info, gspca_dev->dev->bus->bus_name,sizeof cap->bus_info);
cap->version = DRIVER_VERSION_NUMBER;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE| V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; --关于这几个宏定义,可以参考源码中的解释。主要数目设备的类型以及读写的属性
return 0;
}
上面的程序很好理解,主要讲驱动的信息读到应用层。
2.4.2 VIDIOC_ENUM_FMT
获取当前驱动支持的视频格式,其视频格式描述结构体如下:
struct v4l2_fmtdesc {
__u32 index; --格式索引号
enum v4l2_buf_type type; --缓冲区类型
__u32 flags; --缓冲区标示符
__u8 description[32]; --描述符字符串
__u32 pixelformat; --格式描述
__u32 reserved[4]; --保留
};
enum v4l2_buf_type {
V4L2_BUF_TYPE_VIDEO_CAPTURE = 1,
V4L2_BUF_TYPE_VIDEO_OUTPUT = 2,
V4L2_BUF_TYPE_VIDEO_OVERLAY = 3,
V4L2_BUF_TYPE_VBI_CAPTURE = 4,
V4L2_BUF_TYPE_VBI_OUTPUT = 5,
V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6,
V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7,
V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
以V4L2_BUF_TYPE_VIDEO_CAPTURE为例,在下面没有特殊声明的情况下都是V4L2_BUF_TYPE_VIDEO_CAPTURE类型:
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,struct v4l2_fmtdesc *fmtdesc)
{
struct gspca_dev *gspca_dev = priv;
int i, j, index;
__u32 fmt_tb[8];
index = 0;
j = 0;
for (i = gspca_dev->cam.nmodes; --i >= 0; ) { --这个循环怎么感觉怪怪的,我以为这段可以完全不要的,直接采用循环检测就可以了,
fmt_tb[index] = gspca_dev->cam.cam_mode[i].pixelformat;
j = 0;
for (;;) {
if (fmt_tb[j] == fmt_tb[index])
break;
j++;
}
if (j == index) {
if (fmtdesc->index == index)
break; /* new format */
index++;
if (index >= sizeof fmt_tb / sizeof fmt_tb[0])
return -EINVAL;
}
}
if (i < 0)
return -EINVAL; /* no more format */
fmtdesc->pixelformat = fmt_tb[index];
if (gspca_is_compressed(fmt_tb[index])) --判断fmt是否被压缩
fmtdesc->flags = V4L2_FMT_FLAG_COMPRESSED;
fmtdesc->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmtdesc->description[0] = fmtdesc->pixelformat & 0xff;
fmtdesc->description[1] = (fmtdesc->pixelformat >> 8) & 0xff;
fmtdesc->description[2] = (fmtdesc->pixelformat >> 16) & 0xff;
fmtdesc->description[3] = fmtdesc->pixelformat >> 24;
fmtdesc->description[4] = '\0';
return 0;
}
2.4.3 VIDIOC_G_FMT
获取当前驱动的帧捕获格式
struct v4l2_format {
enum v4l2_buf_type type; --同样是类型
union {
struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */
struct v4l2_window win; /* V4L2_BUF_TYPE_VIDEO_OVERLAY */
struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */
struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */
__u8 raw_data[200]; /* user-defined */
} fmt;
};
对于V4L2_BUF_TYPE_VIDEO_CAPTURE类型而言,其fmt为v4l2_pix_format类型
struct v4l2_pix_format {
__u32 width; --捕获图片的宽度
__u32 height; --高度
__u32 pixelformat; --像素点类型
enum v4l2_field field; --与域
__u32 bytesperline; --每一行的bytes
__u32 sizeimage; --图像大小
enum v4l2_colorspace colorspace; --色彩空间
__u32 priv; --私有数据,依赖于像素点类型
};
也许上面的结构体很空洞,不知道各个成员的含义是什么,以OV7648为为例:
static struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 160,
.sizeimage = 160 * 120 * 4 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 2},
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = priv;
int mode;
if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
mode = gspca_dev->curr_mode;
memcpy(&fmt->fmt.pix, &gspca_dev->cam.cam_mode[mode],sizeof fmt->fmt.pix);
return 0;
}
2.4.4 VIDIOC_S_FMT
设置当前驱动的频捕获格式
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = priv;
int ret;
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
ret = try_fmt_vid_cap(gspca_dev, fmt); --查找对应的mode
if (ret < 0)
goto out;
if (gspca_dev->nframes != 0
&& fmt->fmt.pix.sizeimage > gspca_dev->frsz) {
ret = -EINVAL;
goto out;
}
if (ret == gspca_dev->curr_mode) { --如果设置的mode与当前mode一样,则不需要重新设置,直接返回。
ret = 0;
goto out; /* same mode */
}
if (gspca_dev->streaming) {
ret = -EBUSY;
goto out;
}
gspca_dev->width = fmt->fmt.pix.width;
gspca_dev->height = fmt->fmt.pix.height;
gspca_dev->pixfmt = fmt->fmt.pix.pixelformat;
gspca_dev->curr_mode = ret;
ret = 0;
out:
mutex_unlock(&gspca_dev->queue_lock);
return ret;
}
2.4.5 VIDIOC_TRY_FMT
验证当前驱动的显示格式
static int vidioc_try_fmt_vid_cap(struct file *file,void *priv,struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = priv;
int ret;
ret = try_fmt_vid_cap(gspca_dev, fmt);
if (ret < 0)
return ret;
return 0;
}
static int try_fmt_vid_cap(struct gspca_dev *gspca_dev,struct v4l2_format *fmt)
{
int w, h, mode, mode2;
if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
w = fmt->fmt.pix.width;
h = fmt->fmt.pix.height;
/* search the closest mode for width and height */
mode = wxh_to_mode(gspca_dev, w, h); --根据设定的图像宽度和高度来查询mode
if (gspca_dev->cam.cam_mode[mode].pixelformat!= fmt->fmt.pix.pixelformat) { --如果像素格式不匹配,则根据格式查找对应的mode
mode2 = gspca_get_mode(gspca_dev, mode,fmt->fmt.pix.pixelformat);
if (mode2 >= 0)
mode = mode2;
}
memcpy(&fmt->fmt.pix, &gspca_dev->cam.cam_mode[mode],sizeof fmt->fmt.pix);
return mode;
}
static int wxh_to_mode(struct gspca_dev *gspca_dev,int width, int height)
{
int i;
for (i = gspca_dev->cam.nmodes; --i > 0; ) { --高度和宽度匹配
if (width >= gspca_dev->cam.cam_mode[i].width && height >= gspca_dev->cam.cam_mode[i].height)
break;
}
return i;
}
static int gspca_get_mode(struct gspca_dev *gspca_dev,int mode,int pixfmt)
{
int modeU, modeD;
modeU = modeD = mode;
while ((modeU < gspca_dev->cam.nmodes) || modeD >= 0){
if (--modeD >= 0) {
if (gspca_dev->cam.cam_mode[modeD].pixelformat== pixfmt)
return modeD;
}
if (++modeU < gspca_dev->cam.nmodes) {
if (gspca_dev->cam.cam_mode[modeU].pixelformat== pixfmt)
return modeU;
}
}
return -EINVAL;
}
2.4.6 VIDIOC_REQBUFS
分配内存 ,以下面的调用为例:
struct v4l2_requestbuffers req;
memset(&req, 0, sizeof (req));
req.count = 10; --缓冲区块数
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP; --采用内存映射
ioctl(fd, VIDIOC_REQBUFS, &req);
static int vidioc_reqbufs(struct file *file, void *priv,struct v4l2_requestbuffers *rb)
{
struct gspca_dev *gspca_dev = priv;
int i, ret = 0;
if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
switch (rb->memory) {
case GSPCA_MEMORY_READ: /* (internal call) */
case V4L2_MEMORY_MMAP:
case V4L2_MEMORY_USERPTR:
break;
default:
return -EINVAL;
}
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
if (gspca_dev->memory != GSPCA_MEMORY_NO && gspca_dev->memory != rb->memory) { --分配内存时应该满足memory标志为未分配内存。
ret = -EBUSY;
goto out;
}
if (gspca_dev->capt_file != NULL && gspca_dev->capt_file != file) {
ret = -EBUSY;
goto out;
}
for (i = 0; i < gspca_dev->nframes; i++) { --如果内存被分配了,要保证内存没有被映射
if (gspca_dev->frame[i].vma_use_count) {
ret = -EBUSY;
goto out;
}
}
if (gspca_dev->streaming) { --如果视频流开始显示,则要关闭
mutex_lock(&gspca_dev->usb_lock);
gspca_stream_off(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
}
if (gspca_dev->nframes != 0) { --释放以前分配的内存
frame_free(gspca_dev);
gspca_dev->capt_file = NULL;
}
if (rb->count == 0) --如果申请的内存块为0,错误
goto out;
gspca_dev->memory = rb->memory;
ret = frame_alloc(gspca_dev, rb->count); --真正的帧分配
if (ret == 0) {
rb->count = gspca_dev->nframes;
gspca_dev->capt_file = file;
}
out:
mutex_unlock(&gspca_dev->queue_lock);
PDEBUG(D_STREAM, "reqbufs st:%d c:%d", ret, rb->count);
return ret;
}
static int frame_alloc(struct gspca_dev *gspca_dev,unsigned int count)
{
struct gspca_frame *frame;
unsigned int frsz;
int i;
i = gspca_dev->curr_mode;
frsz = gspca_dev->cam.cam_mode[i].sizeimage;
frsz = PAGE_ALIGN(frsz); --页对齐一帧内存的大小
gspca_dev->frsz = frsz;
if (count > GSPCA_MAX_FRAMES)
count = GSPCA_MAX_FRAMES;
gspca_dev->frbuf = rvmalloc(frsz * count); --开辟count帧内存大小
if (!gspca_dev->frbuf) {
return -ENOMEM;
}
gspca_dev->nframes = count; --针对于上面的应用count=10;
for (i = 0; i < count; i++) {
frame = &gspca_dev->frame[i];
frame->v4l2_buf.index = i;
frame->v4l2_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
frame->v4l2_buf.flags = 0;
frame->v4l2_buf.field = V4L2_FIELD_NONE;
frame->v4l2_buf.length = frsz;
frame->v4l2_buf.memory = gspca_dev->memory;
frame->v4l2_buf.sequence = 0;
frame->data = frame->data_end = gspca_dev->frbuf + i * frsz; --数据地址开始和结尾
frame->v4l2_buf.m.offset = i * frsz;
}
gspca_dev->fr_i = gspca_dev->fr_o = gspca_dev->fr_q = 0; --初始化input和output的值为0,没有进入和输出的framebuffer
gspca_dev->last_packet_type = DISCARD_PACKET; --设置包类型
gspca_dev->sequence = 0;
atomic_set(&gspca_dev->nevent, 0);
return 0;
}
这里开辟的内存在物理空间上是不连续的。只是在虚拟地址上是连续的。
static void *rvmalloc(unsigned long size)
{
void *mem;
unsigned long adr;
mem = vmalloc_32(size); --开辟32位对齐的数据内存,函数的效率不是很高。
if (mem != NULL) {
adr = (unsigned long) mem;
while ((long) size > 0) {
SetPageReserved(vmalloc_to_page((void *) adr)); --设置页保留
adr += PAGE_SIZE;
size -= PAGE_SIZE;
}
}
return mem;
}
以上就是开辟framebuffer,不过这种内存开辟方式对于实时性要求很高的系统是不合适的。一种比较快速的办法是在系统启动的时候,采用alloc_bootmem来开辟一段大的物理内存,这样采集过来的数据可以通过DMA的办法传输到缓冲区,同时可以采用内存映射的手段直接映射到用户空间。这个将在以后自己实现usb camera驱动的时候来实现具体的实现。
2.4.7 VIDIOC_QUERYBUF
查询framebuffer信息
static int vidioc_querybuf(struct file *file, void *priv,struct v4l2_buffer *v4l2_buf)
{
struct gspca_dev *gspca_dev = priv;
struct gspca_frame *frame;
frame = &gspca_dev->frame[v4l2_buf->index];
memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
return 0;
}
2.4.8 VIDIOC_QBUF
将一个framebuffer添加入队列,用于等待数据填充,那来看一下具体的实现。
static int vidioc_qbuf(struct file *file, void *priv,struct v4l2_buffer *v4l2_buf)
{
struct gspca_dev *gspca_dev = priv;
struct gspca_frame *frame;
int i, index, ret;
frame = &gspca_dev->frame[index];
frame->v4l2_buf.flags |= V4L2_BUF_FLAG_QUEUED;
if (frame->v4l2_buf.memory == V4L2_MEMORY_USERPTR) {
frame->v4l2_buf.m.userptr = v4l2_buf->m.userptr;
frame->v4l2_buf.length = v4l2_buf->length;
}
index = v4l2_buf->index;
i = gspca_dev->fr_q; --fr_q在申请内存空间时初始化为0
gspca_dev->fr_queue[i] = index; --保存用户空间所用的index
gspca_dev->fr_q = (i + 1) % gspca_dev->nframes; --保存下一个需要入队的值。
v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED; --设置
v4l2_buf->flags &= ~V4L2_BUF_FLAG_DONE;
ret = 0;
out:
mutex_unlock(&gspca_dev->queue_lock);
return ret;
}
2.4.9 VIDIOC_DQBUF
从缓冲队列中出列一个buffer,前提是这个buffer里面有一帧数据,否则将阻塞。
static int vidioc_dqbuf(struct file *file, void *priv,struct v4l2_buffer *v4l2_buf)
{
struct gspca_dev *gspca_dev = priv;
struct gspca_frame *frame;
int i, ret;
if (!(file->f_flags & O_NONBLOCK)&& !gspca_dev->streaming && gspca_dev->users == 1)
return -EINVAL;
if (mutex_lock_interruptible(&gspca_dev->read_lock))
return -ERESTARTSYS;
ret = frame_wait(gspca_dev, file->f_flags & O_NONBLOCK); --等待数据填充。
if (ret < 0)
goto out;
i = ret;
frame = &gspca_dev->frame[i];
if (gspca_dev->memory == V4L2_MEMORY_USERPTR) {
if (copy_to_user((__u8 __user *) frame->v4l2_buf.m.userptr,frame->data,frame->v4l2_buf.bytesused)) {
ret = -EFAULT;
goto out;
}
}
frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_DONE;
memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
ret = 0;
out:
mutex_unlock(&gspca_dev->read_lock);
return ret;
}
static int frame_wait(struct gspca_dev *gspca_dev,int nonblock_ing)
{
struct gspca_frame *frame;
int i, j, ret;
i = gspca_dev->fr_o; --检测是否完成一帧
j = gspca_dev->fr_queue[i];
frame = &gspca_dev->frame[j];
if (frame->v4l2_buf.flags & V4L2_BUF_FLAG_DONE) {
atomic_dec(&gspca_dev->nevent);
goto ok;
}
if (nonblock_ing)
return -EAGAIN;
for (;;) {
ret = wait_event_interruptible_timeout(gspca_dev->wq,atomic_read(&gspca_dev->nevent) > 0,msecs_to_jiffies(3000)); --等待一帧完成
if (ret <= 0) {
if (ret < 0)
return ret; /* interrupt */
return -EIO; /* timeout */
}
atomic_dec(&gspca_dev->nevent);
if (!gspca_dev->streaming || !gspca_dev->present)
return -EIO;
i = gspca_dev->fr_o;
j = gspca_dev->fr_queue[i];
frame = &gspca_dev->frame[j];
if (frame->v4l2_buf.flags & V4L2_BUF_FLAG_DONE)
break;
}
ok:
gspca_dev->fr_o = (i + 1) % gspca_dev->nframes;
if (gspca_dev->sd_desc->dq_callback) {
mutex_lock(&gspca_dev->usb_lock);
gspca_dev->sd_desc->dq_callback(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
}
return j;
}
那我们现在再来回顾一下gspca_dev中三个参数含义
fr_o下一个出队的index
fr_i下一个入队的index
fr_queue用于保存index所代表的帧号。
上面的入队和出队比较好理解,将这两个函数联合起来看就可以很容易的去理解了。
2.4.10 VIDIOC_STREAMON
视频流打开,这个从字面意思上去理解很难理解,那来看源码会好很多。
static int vidioc_streamon(struct file *file, void *priv,enum v4l2_buf_type buf_type)
{
struct gspca_dev *gspca_dev = priv;
int ret;
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
if (!gspca_dev->present) {
ret = -ENODEV;
goto out;
}
if (gspca_dev->nframes == 0) {
ret = -EINVAL;
goto out;
}
if (!gspca_dev->streaming) {
ret = gspca_init_transfer(gspca_dev);
if (ret < 0)
goto out;
}
ret = 0;
out:
mutex_unlock(&gspca_dev->queue_lock);
return ret;
}
这个函数的核心是gspca_init_transfer。
static int gspca_init_transfer(struct gspca_dev *gspca_dev)
{
struct usb_host_endpoint *ep;
int n, ret;
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
gspca_dev->alt = gspca_dev->nbalt;
for (;;) {
ep = get_ep(gspca_dev); --获取一个可用的endpoint,这里面支持两种一种isoc一种bulk
if (ep == NULL) {
ret = -EIO;
goto out;
}
ret = create_urbs(gspca_dev, ep); --创建urb
if (ret < 0)
goto out;
ret = gspca_dev->sd_desc->start(gspca_dev); --打开camera
if (ret < 0) {
destroy_urbs(gspca_dev);
goto out;
}
gspca_dev->streaming = 1; --设置流 已被打开
atomic_set(&gspca_dev->nevent, 0);
if (gspca_dev->alt == 0) --如果是bulk传输则在子系统中就开始
break;
for (n = 0; n < gspca_dev->nurbs; n++) { --以下是提交isoc urb
ret = usb_submit_urb(gspca_dev->urb[n], GFP_KERNEL);
if (ret < 0) {
gspca_dev->streaming = 0;
destroy_urbs(gspca_dev);
if (ret == -ENOSPC)
break; /* try the previous alt */
goto out;
}
}
if (ret >= 0)
break;
}
out:
mutex_unlock(&gspca_dev->usb_lock);
return ret;
}
创建urb,下面就是usb中创建urb的一个很好的例程。
static int create_urbs(struct gspca_dev *gspca_dev,struct usb_host_endpoint *ep)
{
struct urb *urb;
int n, nurbs, i, psize, npkt, bsize;
psize = le16_to_cpu(ep->desc.wMaxPacketSize); --计算最大包数
if (gspca_dev->alt != 0) { --这个值在probe的时候就被设置,这个是根据interface->num_altsetting决定,在SN9C110中这个值为0,即采用bulk传输
psize = (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
npkt = ISO_MAX_SIZE / psize;
if (npkt > ISO_MAX_PKT)
npkt = ISO_MAX_PKT;
bsize = psize * npkt;
nurbs = DEF_NURBS;
} else { /* bulk */
npkt = 0;
bsize = gspca_dev->cam.bulk_size;
if (bsize == 0)
bsize = psize;
nurbs = 1;
}
gspca_dev->nurbs = nurbs;
for (n = 0; n < nurbs; n++) {
urb = usb_alloc_urb(npkt, GFP_KERNEL);
if (!urb) {
destroy_urbs(gspca_dev);
return -ENOMEM;
}
urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev,bsize,GFP_KERNEL,&urb->transfer_dma); --开辟DMA空间
if (urb->transfer_buffer == NULL) {
usb_free_urb(urb);
destroy_urbs(gspca_dev);
return -ENOMEM;
}
gspca_dev->urb[n] = urb;
urb->dev = gspca_dev->dev;
urb->context = gspca_dev;
urb->transfer_buffer_length = bsize;
if (npkt != 0) { /* ISOC */
urb->pipe = usb_rcvisocpipe(gspca_dev->dev,ep->desc.bEndpointAddress);
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
urb->interval = ep->desc.bInterval;
urb->complete = isoc_irq;
urb->number_of_packets = npkt;
for (i = 0; i < npkt; i++) {
urb->iso_frame_desc[i].length = psize;
urb->iso_frame_desc[i].offset = psize * i;
}
} else { /* bulk */
urb->pipe = usb_rcvbulkpipe(gspca_dev->dev,ep->desc.bEndpointAddress),
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
urb->complete = bulk_irq;
}
}
return 0;
}
上面就是几个重要的cmd,剩下的一部分是很重要的数据填充部分。这个部分需要好好的理解,不过这个前提是理解JPEG格式。每次数据的填充主要是在urb完成的处理函数中。如isoc为isoc_irq,bulk为bulk_irq。我们以bulk_irq为例。
static void bulk_irq(struct urb *urb)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
struct gspca_frame *frame;
if (!gspca_dev->streaming)
return;
if (urb->status != 0 && urb->status != -ECONNRESET) {
return; /* disconnection ? */
}
frame = gspca_get_i_frame(gspca_dev); --检查入队的buffer是否可用
if (!frame) {
gspca_dev->last_packet_type = DISCARD_PACKET;
} else {
gspca_dev->sd_desc->pkt_scan(gspca_dev,frame,urb->transfer_buffer,urb->actual_length);
}
}
最后一个函数指针指向该函数
static void sd_pkt_scan(struct gspca_dev *gspca_dev,struct gspca_frame *frame,__u8 *data,int len)
{
struct sd *sd = (struct sd *) gspca_dev;
int sof, avg_lum;
sof = len - 64;
if (sof >= 0 && data[sof] == 0xff && data[sof + 1] == 0xd9) { --查询是否为帧尾
gspca_frame_add(gspca_dev, LAST_PACKET,frame, data, sof + 2);
if (sd->ag_cnt < 0)
return;
/* w1 w2 w3 */
/* w4 w5 w6 */
/* w7 w8 */
/* w4 */
avg_lum = ((data[sof + 29] << 8) | data[sof + 30]) >> 6;
/* w6 */
avg_lum += ((data[sof + 33] << 8) | data[sof + 34]) >> 6;
/* w2 */
avg_lum += ((data[sof + 25] << 8) | data[sof + 26]) >> 6;
/* w8 */
avg_lum += ((data[sof + 37] << 8) | data[sof + 38]) >> 6;
/* w5 */
avg_lum += ((data[sof + 31] << 8) | data[sof + 32]) >> 4;
avg_lum >>= 4;
atomic_set(&sd->avg_lum, avg_lum);
return;
}
if (gspca_dev->last_packet_type == LAST_PACKET) { --如果上一次的包类型为最后一包,则重新填充一帧
/* put the JPEG 422 header */
jpeg_put_header(gspca_dev, frame, sd->qindex, 0x21);
}
gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len); --如果不是上面两种则直接填充。
}
对于填充一帧数据对于包类型而言,FIRST_PACKET和LAST_PACKET是必须的,而INTER_PACKET可以没有也可以有多个。
struct gspca_frame *gspca_frame_add(struct gspca_dev *gspca_dev,
enum gspca_packet_type packet_type,
struct gspca_frame *frame,
const __u8 *data,
int len)
{
int i, j;
if (packet_type == FIRST_PACKET) {
if ((frame->v4l2_buf.flags & BUF_ALL_FLAGS)!= V4L2_BUF_FLAG_QUEUED) {
gspca_dev->last_packet_type = DISCARD_PACKET;
return frame;
}
frame->data_end = frame->data;
jiffies_to_timeval(get_jiffies_64()&frame->v4l2_buf.timestamp);
frame->v4l2_buf.sequence = ++gspca_dev->sequence;
} else if (gspca_dev->last_packet_type == DISCARD_PACKET) { --这个对于第一帧图像而言有极其重要的作用。
if (packet_type == LAST_PACKET)
gspca_dev->last_packet_type = packet_type;
return frame;
}
if (len > 0) {
if (frame->data_end - frame->data + len> frame->v4l2_buf.length) {
packet_type = DISCARD_PACKET; --错误将整个帧无效
}else{
memcpy(frame->data_end, data, len);
frame->data_end += len;
}
}
gspca_dev->last_packet_type = packet_type;
if (packet_type == LAST_PACKET) {
frame->v4l2_buf.bytesused = frame->data_end - frame->data; --记录一帧的长度
frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_QUEUED;
frame->v4l2_buf.flags |= V4L2_BUF_FLAG_DONE;
atomic_inc(&gspca_dev->nevent);
wake_up_interruptible(&gspca_dev->wq); --这儿唤醒wq的等待,这个在frame_wait中阻塞。
i = (gspca_dev->fr_i + 1) % gspca_dev->nframes; --下个等待存储数据的frame
gspca_dev->fr_i = i;
j = gspca_dev->fr_queue[i];
frame = &gspca_dev->frame[j];
}
return frame; --返回帧信息。
}
三.总结
到此usb camera可以说看完了,但是距离自己编写自己单独的usb camera驱动还有一定的距离。但是还是有收获的,对于V4L2有了比较深的理解。对其中的几个重要的cmd有了深入的了解。
阅读(2887) | 评论(0) | 转发(0) |