MediaPlayerService::Client::setDataSource(const char *url)
==> mPlayer = createPlayer(playerType);
==> new PVPlayer();
==> PVPlayer::PVPlayer()
==> mPlayerDriver = new PlayerDriver(this);
external/opencore/android/playerdriver.cpp
===>// start player thread 创建并启动一个线程startPlayerThread
createThreadEtc(PlayerDriver::startPlayerThread, this, "PV player");
===> PlayerDriver::playerThread()
====>mPlayer = PVPlayerFactory::CreatePlayer(this, this, this)//创建PlayerDriver使用的mPlayer
====>void PlayerDriver::Run() 最后该Run函数被启动,处理所有PlayerCommand,
也就是所有被mPlayerDriver->enqueueCommand()推入的命令
看看mPlayerCapConfig什么时候被赋值
在PVPlayer::PVPlayer()构造函数中会执行下面2行
PlayerSetup* setup = new PlayerSetup(0,0);
mInit = mPlayerDriver->enqueueCommand(setup);
来向void PlayerDriver::Run()发送PLAYER_SETUP命令,
相应的Run()将执行下面函数予以响应,进而获得了mPlayerCapConfig
void PlayerDriver::handleSetup(PlayerSetup* command)
{
int error = 0;
// Make sure we have the capabilities and config interface first.
OSCL_TRY(error, mPlayer->QueryInterface(PVMI_CAPABILITY_AND_CONFIG_PVUUID,
(PVInterface *&)mPlayerCapConfig, command)); // 获得mPlayerCapConfig
OSCL_FIRST_CATCH_ANY(error, commandFailed(command));
}
来向void PlayerDriver::Run()发送PLAYER_SET_AUDIO_SINK命令,
相应的Run()将执行下面函数予以响应
PlayerDriver::handleSetAudioSink
mAudioOutputMIO = new AndroidAudioOutput();
mAudioOutputMIO->setAudioSink(command->audioSink());
mPlayer->AddDataSink(*mAudioSink, command)
看看MediaPlayer::prepare()的调用流程
MediaPlayer::prepare()
==> MediaPlayer::prepareAsync_l()
==> mPlayer->prepareAsync(); 这里MediaPlayer::mPlayer通过binder指向了MediaPlayerService::Client[luther.gliethttp]
==> MediaPlayerService::Client::prepareAsync
==> p->prepareAsync();
==> PVPlayer::prepareAsync()
==> mPlayerDriver->enqueueCommand(new PlayerPrepare(do_nothing, NULL));
来向void PlayerDriver::Run()发送PLAYER_PREPARE命令,
相应的Run()将执行下面函数予以响应
PLAYER_PREPARE
==> case PlayerCommand::PLAYER_PREPARE:
handlePrepare(static_cast
(command)); break;
==> handlePrepare会继续调用
mPlayer->Prepare(command));
==> mPlayer->Prepare
PVPlayer::prepare()
mPlayerDriver->enqueueCommand(new PlayerSetDataSource(mDataSourcePath,0,0));
mPlayerDriver->enqueueCommand(new PlayerSetAudioSink(mAudioSink,0,0));
==> handlePrepare之后会继续调用
mPlayerCapConfig->setParametersSync(NULL, &iKVPSetAsync, 1, iErrorKVP)
==> AndroidAudioOutput::setParametersSync
AudioOutput_Thread.Create((TOsclThreadFuncPtr)start_audout_thread_func,
0, (TOsclThreadFuncArg)this, Start_on_creation);
==> AndroidAudioOutput::start_audout_thread_func
==> int AndroidAudioOutput::audout_thread_func()
===>mAudioSink->open即mAudioOutput->open
MediaPlayerService::Client::setDataSource
==> mAudioOutput = new AudioOutput();
MediaPlayerService::AudioOutput::open
==> new AudioTrack
==> AudioTrack::AudioTrack构造函数
==> AudioTrack::set
==> audio_io_handle_t output = AudioSystem::getOutput((AudioSystem::stream_type)streamType,
sampleRate, format, channels, (AudioSystem::output_flags)flags);
==> 如果发现该stream类型AudioSystem::gStreamOutputMap.valueFor(stream);没有建立output,那么会
frameworks/base/libs/audioflinger/AudioPolicyService.cpp
==> 调用AudioPolicyService::getOutput进一步建立该stream类型对应的output
==> 它会继续调用mpPolicyManager->getOutput,而mpPolicyManager在
==> AudioPolicyService::AudioPolicyService()中使用createAudioPolicyManager(this);创建
==> 因此AudioPolicyManagerALSA调用的mpClientInterface变量也就指向了this,也就是AudioPolicyService
==> mpClientInterface->openOutput就是AudioPolicyService::openOutput
audio_io_handle_t AudioPolicyService::openOutput(uint32_t *pDevices,
uint32_t *pSamplingRate,
uint32_t *pFormat,
uint32_t *pChannels,
uint32_t *pLatencyMs,
AudioSystem::output_flags flags)
{
sp af = AudioSystem::get_audio_flinger();
if (af == 0) {
LOGW("openOutput() could not get AudioFlinger");
return 0;
}
return af->openOutput(pDevices, pSamplingRate, (uint32_t *)pFormat, pChannels, pLatencyMs, flags);
}
==> 调用audioflinger的AudioFlinger::openOutput
==> mPlaybackThreads.add(mNextThreadId, thread); // 将mNextThreadId和thread绑定,添加到mPlaybackThreads管理向量中
return mNextThreadId;返回一个mNextThreadId++自加后的变量.
因此绕了一大圈output最后就是mNextThreadId了[luther.gliethttp].
阅读(2820) | 评论(0) | 转发(0) |