class VideoDevice : public QObject
{
Q_OBJECT
public:
VideoDevice(QString dev_name);
//VideoDevice();
int open_device();
int close_device();
int init_device();
int start_capturing();
int stop_capturing();
int uninit_device();
int get_frame(void **, size_t*);
int unget_frame();
private:
int init_mmap();
struct buffer
{
void * start;
size_t length;
};
QString dev_name;
int fd;
buffer* buffers;
unsigned int n_buffers;
int index;
private:
VideoDevice *m_camera; //摄像头
uchar *imgBuf; //QImage图像空间
int camReturn; //摄像头使用过程返回的结果,用于状态判断
QImage *frame; //QImage图像
QTimer *timer;
IplImage *frameget; //摄像头每次抓取的图像为一帧,使用该指针指向一帧图像的内存空间
int convert_yuv_to_rgb_pixel(int y, int u, int v);
int convert_yuv_to_rgb_buffer(unsigned char *yuv, unsigned char *rgb, unsigned int width, unsigned int height);
IplImage *QImageToIplImage(const QImage * qImage);
int Widget::convert_yuv_to_rgb_pixel(int y, int u, int v)
{
unsigned int pixel32 = 0;
unsigned char *pixel = (unsigned char *)&pixel32;
int r, g, b;
r = y + (1.370705 * (v-128));
g = y - (0.698001 * (v-128)) - (0.337633 * (u-128));
b = y + (1.732446 * (u-128));
if(r > 255) r = 255;
if(g > 255) g = 255;
if(b > 255) b = 255;
if(r < 0) r = 0;
if(g < 0) g = 0;
if(b < 0) b = 0;
pixel[0] = r * 220 / 256;
pixel[1] = g * 220 / 256;
pixel[2] = b * 220 / 256;
return pixel32;
}
/*yuv格式转换为rgb格式*/
//QImage 转 IplImage
IplImage* Widget::QImageToIplImage(const QImage * qImage)
{
int width = qImage->width();
int height = qImage->height();
CvSize Size;
Size.height = height;
Size.width = width;
IplImage *IplImageBuffer = cvCreateImage(Size, IPL_DEPTH_8U, 3); //记着释放内存
for (int y = 0; y < height; ++y)
{
for (int x = 0; x < width; ++x)
{
QRgb rgb = qImage->pixel(x, y);
cvSet2D(IplImageBuffer, y, x, CV_RGB(qRed(rgb), qGreen(rgb), qBlue(rgb)));
}
}
return IplImageBuffer;
}