Android Camera AppNotifier工作机制分析
上面的章节中已经说过了,AppNotifier在initialize的时候会创建并且开启一个线程,这个线程不停的检查是否底层有消息传送过来,如果有消息传送过来,响应的消息会做相应的处理,主要包括以下三种消息:msgQ,mEventQ,mFrameQ,在之前的文章中也同样说到了,这些消息的来源,通过setEventProvider和setFrameProvider设置好消息来源并通过start和setMeasurements开始启动AppCallbackNotifier
这里我们以mFrameQ为例子先做一下说明
-
void AppCallbackNotifier::frameCallbackRelay(CameraFrame* caFrame)
-
{
-
LOG_FUNCTION_NAME;
-
AppCallbackNotifier *appcbn = (AppCallbackNotifier*) (caFrame->mCookie);
-
appcbn->frameCallback(caFrame);
-
LOG_FUNCTION_NAME_EXIT;
-
}
底层是通过调用定义好的frameCallbackRelay方法,这个方法调用frameCallback
-
void AppCallbackNotifier::frameCallback(CameraFrame* caFrame)
-
{
-
///Post the event to the event queue of AppCallbackNotifier
-
TIUTILS::Message msg;
-
CameraFrame *frame;
-
-
LOG_FUNCTION_NAME;
-
-
if ( NULL != caFrame )
-
{
-
-
frame = new CameraFrame(*caFrame);
-
if ( NULL != frame )
-
{
-
msg.command = AppCallbackNotifier::NOTIFIER_CMD_PROCESS_FRAME;
-
msg.arg1 = frame;
-
mFrameQ.put(&msg);
-
}
-
else
-
{
-
CAMHAL_LOGEA("Not enough resources to allocate CameraFrame");
-
}
-
-
}
-
-
LOG_FUNCTION_NAME_EXIT;
-
}
这里首先new CameraFrame,保存回调回来的数据,让后使用put方法将这个消息放进mFrameQ堆栈,所以上面说过的那个一直运行的线程就可以检查到有消息到来,开始干活处理
那么我们真正关注的是,这个frameCallbackRelay是怎么与底层实现关联的啊?地层是怎样通过这个方法实现回调的呢?
这里我们就要追究一下这个frameCallbackRelay是怎么样和底层实现关联的了
mFrameProvider = new FrameProvider(frameNotifier, this, frameCallbackRelay);
先看这里,在new FrameProvider的时候传入了这个framecallback方法,在FrameProvider的构造函数中将这个函数指针保存到mFrameCallback中,方法如下:
-
FrameProvider(FrameNotifier *fn, void* cookie, frame_callback frameCallback)
-
:mFrameNotifier(fn), mCookie(cookie),mFrameCallback(frameCallback) { }
然后在CameraHal的初始化setFrameProvider的方式中调用一下语句
mFrameProvider->enableFrameNotification(CameraFrame::IMAGE_FRAME);
mFrameProvider->enableFrameNotification(CameraFrame::RAW_FRAME);
我们就看看enableFrameNotification的实现吧,他的实现在CameraHalUtilClasses中
-
int FrameProvider::enableFrameNotification(int32_t frameTypes)
-
{
-
LOG_FUNCTION_NAME;
-
status_t ret = NO_ERROR;
-
-
///Enable the frame notification to CameraAdapter (which implements FrameNotifier interface)
-
mFrameNotifier->enableMsgType(frameTypes<<MessageNotifier::FRAME_BIT_FIELD_POSITION, mFrameCallback , NULL, mCookie);
-
-
LOG_FUNCTION_NAME_EXIT;
-
return ret;
-
}
说一下上面这个方法,这里调用了我们上面FrameProvider构造函数中保存下来的,我们接着看看enableMsgType的实现,在BaseCameraAdapter中
-
void BaseCameraAdapter::enableMsgType(int32_t msgs, frame_callback callback, event_callback eventCb, void* cookie)
-
{
-
Mutex::Autolock lock(mSubscriberLock);
-
-
LOG_FUNCTION_NAME;
-
-
int32_t frameMsg = ((msgs >> MessageNotifier::FRAME_BIT_FIELD_POSITION) & EVENT_MASK);
-
int32_t eventMsg = ((msgs >> MessageNotifier::EVENT_BIT_FIELD_POSITION) & EVENT_MASK);
-
-
if ( frameMsg != 0 )
-
{
-
CAMHAL_LOGVB("Frame message type id=0x%x subscription request", frameMsg);
-
switch ( frameMsg )
-
{
-
case CameraFrame::PREVIEW_FRAME_SYNC:
-
mFrameSubscribers.add((int) cookie, callback);
-
break;
-
case CameraFrame::FRAME_DATA_SYNC:
-
mFrameDataSubscribers.add((int) cookie, callback);
-
break;
-
case CameraFrame::SNAPSHOT_FRAME:
-
mSnapshotSubscribers.add((int) cookie, callback);
-
break;
-
case CameraFrame::IMAGE_FRAME:
-
mImageSubscribers.add((int) cookie, callback);
-
break;
-
case CameraFrame::RAW_FRAME:
-
mRawSubscribers.add((int) cookie, callback);
-
break;
-
case CameraFrame::VIDEO_FRAME_SYNC:
-
mVideoSubscribers.add((int) cookie, callback);
-
break;
-
case CameraFrame::REPROCESS_INPUT_FRAME:
-
mVideoInSubscribers.add((int) cookie, callback);
-
break;
-
default:
-
CAMHAL_LOGEA("Frame message type id=0x%x subscription no supported yet!", frameMsg);
-
break;
-
}
-
}
-
-
if ( eventMsg != 0)
-
{
-
CAMHAL_LOGVB("Event message type id=0x%x subscription request", eventMsg);
-
if ( CameraHalEvent::ALL_EVENTS == eventMsg )
-
{
-
mFocusSubscribers.add((int) cookie, eventCb);
-
mShutterSubscribers.add((int) cookie, eventCb);
-
mZoomSubscribers.add((int) cookie, eventCb);
-
mMetadataSubscribers.add((int) cookie, eventCb);
-
}
-
else
-
{
-
CAMHAL_LOGEA("Event message type id=0x%x subscription no supported yet!", eventMsg);
-
}
-
}
-
-
LOG_FUNCTION_NAME_EXIT;
-
}
这里出现很多的向量表,我们看一下这几个向量表mFrameDataSubscribers,mImageSubscribers,mRawSubscribers
在这里将我们初始化定义好的callback方法加入向量表中,底层到底怎么调用到还是不知道啊,接着找啊找
跟上一篇文章是一样的,之前采用V4LCameraAdapter的方式分析其实我们早就在之前文章中说过了,这里这篇文章我们针对OMXCameraAdapter做分析
在OMXCameraAdapter中有这样一个方法
-
status_t OMXCameraAdapter::sendCallBacks(CameraFrame frame, OMX_IN OMX_BUFFERHEADERTYPE *pBuffHeader, unsigned int mask, OMXCameraPortParameters *port)
-
{
-
status_t ret = NO_ERROR;
-
-
LOG_FUNCTION_NAME;
-
-
if ( NULL == port)
-
{
-
CAMHAL_LOGEA("Invalid portParam");
-
return -EINVAL;
-
}
-
-
if ( NULL == pBuffHeader )
-
{
-
CAMHAL_LOGEA("Invalid Buffer header");
-
return -EINVAL;
-
}
-
-
Mutex::Autolock lock(mSubscriberLock);
-
-
//frame.mFrameType = typeOfFrame;
-
frame.mFrameMask = mask;
-
frame.mBuffer = (CameraBuffer *)pBuffHeader->pAppPrivate;
-
frame.mLength = pBuffHeader->nFilledLen;
-
frame.mAlignment = port->mStride;
-
frame.mOffset = pBuffHeader->nOffset;
-
frame.mWidth = port->mWidth;
-
frame.mHeight = port->mHeight;
-
frame.mYuv[0] = NULL;
-
frame.mYuv[1] = NULL;
-
-
if ( onlyOnce && mRecording )
-
{
-
mTimeSourceDelta = (pBuffHeader->nTimeStamp * 1000) - systemTime(SYSTEM_TIME_MONOTONIC);
-
onlyOnce = false;
-
}
-
-
frame.mTimestamp = (pBuffHeader->nTimeStamp * 1000) - mTimeSourceDelta;
-
-
ret = setInitFrameRefCount(frame.mBuffer, mask);
-
-
if (ret != NO_ERROR) {
-
CAMHAL_LOGDB("Error in setInitFrameRefCount %d", ret);
-
} else {
-
ret = sendFrameToSubscribers(&frame);
-
}
-
-
CAMHAL_LOGVB("B 0x%x T %llu", frame.mBuffer, pBuffHeader->nTimeStamp);
-
-
LOG_FUNCTION_NAME_EXIT;
-
-
return ret;
-
}
这里这个方法其实和V4LCameraAdapter中的使用方法是很类似的,先是填充CameraFrame这个结构,最后通过sendFrameToSubscribers这个方法,我们看看这个方法吧
这个方法在BaseCameraAdapter中实现
-
status_t BaseCameraAdapter::sendFrameToSubscribers(CameraFrame *frame)
-
{
-
status_t ret = NO_ERROR;
-
unsigned int mask;
-
-
if ( NULL == frame )
-
{
-
CAMHAL_LOGEA("Invalid CameraFrame");
-
return -EINVAL;
-
}
-
-
for( mask = 1; mask < CameraFrame::ALL_FRAMES; mask <<= 1){
-
if( mask & frame->mFrameMask ){
-
switch( mask ){
-
-
case CameraFrame::IMAGE_FRAME:
-
{
-
#if PPM_INSTRUMENTATION || PPM_INSTRUMENTATION_ABS
-
CameraHal::PPM("Shot to Jpeg: ", &mStartCapture);
-
#endif
-
ret = __sendFrameToSubscribers(frame, &mImageSubscribers, CameraFrame::IMAGE_FRAME);
-
}
-
break;
-
case CameraFrame::RAW_FRAME:
-
{
-
ret = __sendFrameToSubscribers(frame, &mRawSubscribers, CameraFrame::RAW_FRAME);
-
}
-
break;
-
case CameraFrame::PREVIEW_FRAME_SYNC:
-
{
-
ret = __sendFrameToSubscribers(frame, &mFrameSubscribers, CameraFrame::PREVIEW_FRAME_SYNC);
-
}
-
break;
-
case CameraFrame::SNAPSHOT_FRAME:
-
{
-
ret = __sendFrameToSubscribers(frame, &mSnapshotSubscribers, CameraFrame::SNAPSHOT_FRAME);
-
}
-
break;
-
case CameraFrame::VIDEO_FRAME_SYNC:
-
{
-
ret = __sendFrameToSubscribers(frame, &mVideoSubscribers, CameraFrame::VIDEO_FRAME_SYNC);
-
}
-
break;
-
case CameraFrame::FRAME_DATA_SYNC:
-
{
-
ret = __sendFrameToSubscribers(frame, &mFrameDataSubscribers, CameraFrame::FRAME_DATA_SYNC);
-
}
-
break;
-
case CameraFrame::REPROCESS_INPUT_FRAME:
-
{
-
ret = __sendFrameToSubscribers(frame, &mVideoInSubscribers, CameraFrame::REPROCESS_INPUT_FRAME);
-
}
-
break;
-
default:
-
CAMHAL_LOGEB("FRAMETYPE NOT SUPPORTED 0x%x", mask);
-
break;
-
}//SWITCH
-
frame->mFrameMask &= ~mask;
-
-
if (ret != NO_ERROR) {
-
goto EXIT;
-
}
-
}//IF
-
}//FOR
-
-
EXIT:
-
return ret;
-
}
他接着调用上面标出的方法,到这里我们发现V4LCameraAdapter和OMXCameraAdapter使用的是同一个方法与AppNotifier实现交互,BaseCameraAdapter作为一个接口使用
我们接着看看上面标注部分的实现,其实之前早就说过了
-
status_t BaseCameraAdapter::__sendFrameToSubscribers(CameraFrame* frame,
-
KeyedVector<int, frame_callback> *subscribers,
-
CameraFrame::FrameType frameType)
-
{
-
size_t refCount = 0;
-
status_t ret = NO_ERROR;
-
frame_callback callback = NULL;
-
-
frame->mFrameType = frameType;
-
-
if ( (frameType == CameraFrame::PREVIEW_FRAME_SYNC) ||
-
(frameType == CameraFrame::VIDEO_FRAME_SYNC) ||
-
(frameType == CameraFrame::SNAPSHOT_FRAME) ){
-
if (mFrameQueue.size() > 0){
-
CameraFrame *lframe = (CameraFrame *)mFrameQueue.valueFor(frame->mBuffer);
-
frame->mYuv[0] = lframe->mYuv[0];
-
frame->mYuv[1] = frame->mYuv[0] + (frame->mLength + frame->mOffset)*2/3;
-
}
-
else{
-
CAMHAL_LOGDA("Empty Frame Queue");
-
return -EINVAL;
-
}
-
}
-
-
if (NULL != subscribers) {
-
refCount = getFrameRefCount(frame->mBuffer, frameType);
-
-
if (refCount == 0) {
-
CAMHAL_LOGDA("Invalid ref count of 0");
-
return -EINVAL;
-
}
-
-
if (refCount > subscribers->size()) {
-
CAMHAL_LOGEB("Invalid ref count for frame type: 0x%x", frameType);
-
return -EINVAL;
-
}
-
-
CAMHAL_LOGVB("Type of Frame: 0x%x address: 0x%x refCount start %d",
-
frame->mFrameType,
-
( uint32_t ) frame->mBuffer,
-
refCount);
-
-
for ( unsigned int i = 0 ; i < refCount; i++ ) {
-
frame->mCookie = ( void * ) subscribers->keyAt(i);
-
callback = (frame_callback) subscribers->valueAt(i);
-
-
if (!callback) {
-
CAMHAL_LOGEB("callback not set for frame type: 0x%x", frameType);
-
return -EINVAL;
-
}
-
-
callback(frame);
-
}
-
} else {
-
CAMHAL_LOGEA("Subscribers is null??");
-
return -EINVAL;
-
}
-
-
return ret;
-
}
在上面这个方法中,通过对应的向量表找到需要的callback方法,最终调用callback方法,这里调用的这个callback方法其实就是我们上面初始化定义的callback方法,到这里其实底层与AppNotifier已经实现了关联,不过我们并没有花很多时间去说明底层是如何通过OMX方式与kernel driver进行交互最后获取到数据并调用上面的sendcallback方法的,这里之后要单独作为一个章节去研究;同样,我们也还没有好好的去说明一下AppNotifier又是怎么样一步一步把数据真正的递交到上层app手中的,这里简单说明一下吧
上面已经说过了,AppNotifier在构造的时候会创建并启动一个线程检查是否有消息到达,消息到达则进行相应的分类处理
这里只针对notifyFrame进行分析
-
void AppCallbackNotifier::notifyFrame()
-
{
-
///Receive and send the frame notifications to app
-
TIUTILS::Message msg;
-
CameraFrame *frame;
-
MemoryHeapBase *heap;
-
MemoryBase *buffer = NULL;
-
sp<MemoryBase> memBase;
-
void *buf = NULL;
-
-
LOG_FUNCTION_NAME;
-
-
{
-
Mutex::Autolock lock(mLock);
-
if(!mFrameQ.isEmpty()) {
-
mFrameQ.get(&msg);
-
} else {
-
return;
-
}
-
}
-
-
bool ret = true;
-
-
frame = NULL;
-
switch(msg.command)
-
{
-
case AppCallbackNotifier::NOTIFIER_CMD_PROCESS_FRAME:
-
-
frame = (CameraFrame *) msg.arg1;
-
if(!frame)
-
{
-
break;
-
}
-
-
if ( (CameraFrame::RAW_FRAME == frame->mFrameType )&&
-
( NULL != mCameraHal ) &&
-
( NULL != mDataCb) &&
-
( NULL != mNotifyCb ) )
-
{
-
-
if ( mCameraHal->msgTypeEnabled(CAMERA_MSG_RAW_IMAGE) )
-
{
-
#ifdef COPY_IMAGE_BUFFER
-
copyAndSendPictureFrame(frame, CAMERA_MSG_RAW_IMAGE);
-
#else
-
//TODO: Find a way to map a Tiler buffer to a MemoryHeapBase
-
#endif
-
}
-
else {
-
if ( mCameraHal->msgTypeEnabled(CAMERA_MSG_RAW_IMAGE_NOTIFY) ) {
-
mNotifyCb(CAMERA_MSG_RAW_IMAGE_NOTIFY, 0, 0, mCallbackCookie);
-
}
-
mFrameProvider->returnFrame(frame->mBuffer,
-
(CameraFrame::FrameType) frame->mFrameType);
-
}
-
-
mRawAvailable = true;
-
-
}
-
else if ( (CameraFrame::IMAGE_FRAME == frame->mFrameType) &&
-
(NULL != mCameraHal) &&
-
(NULL != mDataCb) &&
-
(CameraFrame::ENCODE_RAW_YUV422I_TO_JPEG & frame->mQuirks) )
-
{
-
-
int encode_quality = 100, tn_quality = 100;
-
int tn_width, tn_height;
-
unsigned int current_snapshot = 0;
-
Encoder_libjpeg::params *main_jpeg = NULL, *tn_jpeg = NULL;
-
void* exif_data = NULL;
-
const char *previewFormat = NULL;
-
camera_memory_t* raw_picture = mRequestMemory(-1, frame->mLength, 1, NULL);
-
-
if(raw_picture) {
-
buf = raw_picture->data;
-
}
-
-
CameraParameters parameters;
-
char *params = mCameraHal->getParameters();
-
const String8 strParams(params);
-
parameters.unflatten(strParams);
-
-
encode_quality = parameters.getInt(CameraParameters::KEY_JPEG_QUALITY);
-
if (encode_quality < 0 || encode_quality > 100) {
-
encode_quality = 100;
-
}
-
-
tn_quality = parameters.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY);
-
if (tn_quality < 0 || tn_quality > 100) {
-
tn_quality = 100;
-
}
-
-
if (CameraFrame::HAS_EXIF_DATA & frame->mQuirks) {
-
exif_data = frame->mCookie2;
-
}
-
-
main_jpeg = (Encoder_libjpeg::params*)
-
malloc(sizeof(Encoder_libjpeg::params));
-
-
// Video snapshot with LDCNSF on adds a few bytes start offset
-
// and a few bytes on every line. They must be skipped.
-
int rightCrop = frame->mAlignment/2 - frame->mWidth;
-
-
CAMHAL_LOGDB("Video snapshot right crop = %d", rightCrop);
-
CAMHAL_LOGDB("Video snapshot offset = %d", frame->mOffset);
-
-
if (main_jpeg) {
-
main_jpeg->src = (uint8_t *)frame->mBuffer->mapped;
-
main_jpeg->src_size = frame->mLength;
-
main_jpeg->dst = (uint8_t*) buf;
-
main_jpeg->dst_size = frame->mLength;
-
main_jpeg->quality = encode_quality;
-
main_jpeg->in_width = frame->mAlignment/2; // use stride here
-
main_jpeg->in_height = frame->mHeight;
-
main_jpeg->out_width = frame->mAlignment/2;
-
main_jpeg->out_height = frame->mHeight;
-
main_jpeg->right_crop = rightCrop;
-
main_jpeg->start_offset = frame->mOffset;
-
if ( CameraFrame::FORMAT_YUV422I_UYVY & frame->mQuirks) {
-
main_jpeg->format = TICameraParameters::PIXEL_FORMAT_YUV422I_UYVY;
-
}
-
else { //if ( CameraFrame::FORMAT_YUV422I_YUYV & frame->mQuirks)
-
main_jpeg->format = CameraParameters::PIXEL_FORMAT_YUV422I;
-
}
-
}
-
-
tn_width = parameters.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH);
-
tn_height = parameters.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT);
-
previewFormat = parameters.getPreviewFormat();
-
-
if ((tn_width > 0) && (tn_height > 0) && ( NULL != previewFormat )) {
-
tn_jpeg = (Encoder_libjpeg::params*)
-
malloc(sizeof(Encoder_libjpeg::params));
-
// if malloc fails just keep going and encode main jpeg
-
if (!tn_jpeg) {
-
tn_jpeg = NULL;
-
}
-
}
-
-
if (tn_jpeg) {
-
int width, height;
-
parameters.getPreviewSize(&width,&height);
-
current_snapshot = (mPreviewBufCount + MAX_BUFFERS - 1) % MAX_BUFFERS;
-
tn_jpeg->src = (uint8_t *)mPreviewBuffers[current_snapshot].mapped;
-
tn_jpeg->src_size = mPreviewMemory->size / MAX_BUFFERS;
-
tn_jpeg->dst_size = calculateBufferSize(tn_width,
-
tn_height,
-
previewFormat);
-
tn_jpeg->dst = (uint8_t*) malloc(tn_jpeg->dst_size);
-
tn_jpeg->quality = tn_quality;
-
tn_jpeg->in_width = width;
-
tn_jpeg->in_height = height;
-
tn_jpeg->out_width = tn_width;
-
tn_jpeg->out_height = tn_height;
-
tn_jpeg->right_crop = 0;
-
tn_jpeg->start_offset = 0;
-
tn_jpeg->format = CameraParameters::PIXEL_FORMAT_YUV420SP;;
-
}
-
-
sp<Encoder_libjpeg> encoder = new Encoder_libjpeg(main_jpeg,
-
tn_jpeg,
-
AppCallbackNotifierEncoderCallback,
-
(CameraFrame::FrameType)frame->mFrameType,
-
this,
-
raw_picture,
-
exif_data, frame->mBuffer);
-
gEncoderQueue.add(frame->mBuffer->mapped, encoder);
-
encoder->run();
-
encoder.clear();
-
if (params != NULL)
-
{
-
mCameraHal->putParameters(params);
-
}
-
}
-
else if ( ( CameraFrame::IMAGE_FRAME == frame->mFrameType ) &&
-
( NULL != mCameraHal ) &&
-
( NULL != mDataCb) )
-
{
-
-
// CTS, MTS requirements: Every 'takePicture()' call
-
// who registers a raw callback should receive one
-
// as well. This is not always the case with
-
// CameraAdapters though.
-
if (!mCameraHal->msgTypeEnabled(CAMERA_MSG_RAW_IMAGE)) {
-
dummyRaw();
-
} else {
-
mRawAvailable = false;
-
}
-
-
#ifdef COPY_IMAGE_BUFFER
-
{
-
Mutex::Autolock lock(mBurstLock);
-
#if defined(OMAP_ENHANCEMENT)
-
if ( mBurst )
-
{
-
copyAndSendPictureFrame(frame, CAMERA_MSG_COMPRESSED_BURST_IMAGE);
-
}
-
else
-
#endif
-
{
-
copyAndSendPictureFrame(frame, CAMERA_MSG_COMPRESSED_IMAGE);
-
}
-
}
-
#else
-
//TODO: Find a way to map a Tiler buffer to a MemoryHeapBase
-
#endif
-
}
-
else if ( ( CameraFrame::VIDEO_FRAME_SYNC == frame->mFrameType ) &&
-
( NULL != mCameraHal ) &&
-
( NULL != mDataCb) &&
-
( mCameraHal->msgTypeEnabled(CAMERA_MSG_VIDEO_FRAME) ) )
-
{
-
AutoMutex locker(mRecordingLock);
-
if(mRecording)
-
{
-
if(mUseMetaDataBufferMode)
-
{
-
camera_memory_t *videoMedatadaBufferMemory =
-
mVideoMetadataBufferMemoryMap.valueFor(frame->mBuffer->opaque);
-
video_metadata_t *videoMetadataBuffer = (video_metadata_t *) videoMedatadaBufferMemory->data;
-
-
if( (NULL == videoMedatadaBufferMemory) || (NULL == videoMetadataBuffer) || (NULL == frame->mBuffer) )
-
{
-
CAMHAL_LOGEA("Error! One of the video buffers is NULL");
-
break;
-
}
-
-
if ( mUseVideoBuffers )
-
{
-
CameraBuffer *vBuf = mVideoMap.valueFor(frame->mBuffer->opaque);
-
GraphicBufferMapper &mapper = GraphicBufferMapper::get();
-
Rect bounds;
-
bounds.left = 0;
-
bounds.top = 0;
-
bounds.right = mVideoWidth;
-
bounds.bottom = mVideoHeight;
-
-
void *y_uv[2];
-
mapper.lock((buffer_handle_t)vBuf, CAMHAL_GRALLOC_USAGE, bounds, y_uv);
-
y_uv[1] = y_uv[0] + mVideoHeight*4096;
-
-
structConvImage input = {frame->mWidth,
-
frame->mHeight,
-
4096,
-
IC_FORMAT_YCbCr420_lp,
-
(mmByte *)frame->mYuv[0],
-
(mmByte *)frame->mYuv[1],
-
frame->mOffset};
-
-
structConvImage output = {mVideoWidth,
-
mVideoHeight,
-
4096,
-
IC_FORMAT_YCbCr420_lp,
-
(mmByte *)y_uv[0],
-
(mmByte *)y_uv[1],
-
0};
-
-
VT_resizeFrame_Video_opt2_lp(&input, &output, NULL, 0);
-
mapper.unlock((buffer_handle_t)vBuf->opaque);
-
videoMetadataBuffer->metadataBufferType = (int) kMetadataBufferTypeCameraSource;
-
/* FIXME remove cast */
-
videoMetadataBuffer->handle = (void *)vBuf->opaque;
-
videoMetadataBuffer->offset = 0;
-
}
-
else
-
{
-
videoMetadataBuffer->metadataBufferType = (int) kMetadataBufferTypeCameraSource;
-
videoMetadataBuffer->handle = camera_buffer_get_omx_ptr(frame->mBuffer);
-
videoMetadataBuffer->offset = frame->mOffset;
-
}
-
-
CAMHAL_LOGVB("mDataCbTimestamp : frame->mBuffer=0x%x, videoMetadataBuffer=0x%x, videoMedatadaBufferMemory=0x%x",
-
frame->mBuffer->opaque, videoMetadataBuffer, videoMedatadaBufferMemory);
-
-
mDataCbTimestamp(frame->mTimestamp, CAMERA_MSG_VIDEO_FRAME,
-
videoMedatadaBufferMemory, 0, mCallbackCookie);
-
}
-
else
-
{
-
//TODO: Need to revisit this, should ideally be mapping the TILER buffer using mRequestMemory
-
camera_memory_t* fakebuf = mRequestMemory(-1, sizeof(buffer_handle_t), 1, NULL);
-
if( (NULL == fakebuf) || ( NULL == fakebuf->data) || ( NULL == frame->mBuffer))
-
{
-
CAMHAL_LOGEA("Error! One of the video buffers is NULL");
-
break;
-
}
-
-
*reinterpret_cast<buffer_handle_t*>(fakebuf->data) = reinterpret_cast<buffer_handle_t>(frame->mBuffer->mapped);
-
mDataCbTimestamp(frame->mTimestamp, CAMERA_MSG_VIDEO_FRAME, fakebuf, 0, mCallbackCookie);
-
fakebuf->release(fakebuf);
-
}
-
}
-
}
-
else if(( CameraFrame::SNAPSHOT_FRAME == frame->mFrameType ) &&
-
( NULL != mCameraHal ) &&
-
( NULL != mDataCb) &&
-
( NULL != mNotifyCb)) {
-
//When enabled, measurement data is sent instead of video data
-
if ( !mMeasurementEnabled ) {
-
copyAndSendPreviewFrame(frame, CAMERA_MSG_POSTVIEW_FRAME);
-
} else {
-
mFrameProvider->returnFrame(frame->mBuffer,
-
(CameraFrame::FrameType) frame->mFrameType);
-
}
-
}
-
else if ( ( CameraFrame::PREVIEW_FRAME_SYNC== frame->mFrameType ) &&
-
( NULL != mCameraHal ) &&
-
( NULL != mDataCb) &&
-
( mCameraHal->msgTypeEnabled(CAMERA_MSG_PREVIEW_FRAME)) ) {
-
//When enabled, measurement data is sent instead of video data
-
if ( !mMeasurementEnabled ) {
-
copyAndSendPreviewFrame(frame, CAMERA_MSG_PREVIEW_FRAME);
-
} else {
-
mFrameProvider->returnFrame(frame->mBuffer,
-
(CameraFrame::FrameType) frame->mFrameType);
-
}
-
}
-
else if ( ( CameraFrame::FRAME_DATA_SYNC == frame->mFrameType ) &&
-
( NULL != mCameraHal ) &&
-
( NULL != mDataCb) &&
-
( mCameraHal->msgTypeEnabled(CAMERA_MSG_PREVIEW_FRAME)) ) {
-
copyAndSendPreviewFrame(frame, CAMERA_MSG_PREVIEW_FRAME);
-
} else {
-
mFrameProvider->returnFrame(frame->mBuffer,
-
( CameraFrame::FrameType ) frame->mFrameType);
-
CAMHAL_LOGDB("Frame type 0x%x is still unsupported!", frame->mFrameType);
-
}
-
-
break;
-
-
default:
-
-
break;
-
-
};
-
-
exit:
-
-
if ( NULL != frame )
-
{
-
delete frame;
-
}
-
-
LOG_FUNCTION_NAME_EXIT;
-
}
以上按照相应方式处理消息,returnFrame,copyAndSendPicture,还有一些回调函数,这些都是很重要的,这里先不做过多研究,在之前的文章中其实也已经提到过,数据怎样最终上传到app层
待续。。。
阅读(1079) | 评论(0) | 转发(0) |