2024年1月18日发(作者:)

RTSP的主要命令表:RTSP状态码:Status-Code =| "100" ; Continue| "200" ; OK| "201" ; Created| "250" ; Low on Storage Space| "300" ; Multiple Choices| "301" ; Moved Permanently| "302" ; Moved Temporarily| "303" ; See Other| "304" ; Not Modified| "305" ; Use Proxy| "400" ; Bad Request| "401" ; Unauthorized| "402" ; Payment Required| "403" ; Forbidden| "404" ; Not Found| "405" ; Method Not Allowed| "406" ; Not Acceptable| "407" ; Proxy Authentication Required| "408" ; Request Time-out| "410" ; Gone| "411" ; Length Required| "412" ; Precondition Failed| "413" ; Request Entity Too Large| "414" ; Request-URI Too Large| "415" ; Unsupported Media Type| "451" ; Parameter Not Understood| "452" ; Conference Not Found| "453" ; Not Enough Bandwidth| "454" ; Session Not Found| "455" ; Method Not Valid in This State| "456" ; Header Field Not Valid for Resource| "457" ; Invalid Range| "458" ; Parameter Is Read-Only| "459" ; Aggregate operation not allowed| "460" ; Only aggregate operation allowed| "461" ; Unsupported transport| "462" ; Destination unreachable| "500" ; Internal Server Error| "501" ; Not Implemented| "502" ; Bad Gateway| "503" ; Service Unavailable| "504" ; Gateway Time-out| "505" ; RTSP Version not supported| "551" ; Option not supportedSDP的格式:

ARTPAssembler::AssemblyStatus AMPEG4AudioAssembler::addPacket( const sp &source) { List > *queue = source->queue(); (queue->empty()) { NOT_ENOUGH_DATA; } (mNextExpectedSeqNoValid) { List >::iterator it = queue->begin(); (it != queue->end()) { ((uint32_t)(*it)->int32Data() >= mNextExpectedSeqNo) { break; } it = queue->erase(it); } (queue->empty()) { NOT_ENOUGH_DATA; } } sp buffer = *queue->begin(); (!mNextExpectedSeqNoValid) { mNextExpectedSeqNoValid = true; mNextExpectedSeqNo = (uint32_t)buffer->int32Data(); } ((uint32_t)buffer->int32Data() != mNextExpectedSeqNo) {#if VERBOSE (VERBOSE) << "Not the sequence number I expected";#endif WRONG_SEQUENCE_NUMBER; } uint32_t rtpTime; CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime)); //提交AccessUnit (() > 0 && rtpTime != mAccessUnitRTPTime) { submitAccessUnit(); } mAccessUnitRTPTime = rtpTime; //将缓存添加到mPackets _back(buffer); queue->erase(queue->begin()); ++mNextExpectedSeqNo; OK;}submitAccessUnit中回调‘accu’,交给MyHandler处理

void AMPEG4AudioAssembler::submitAccessUnit() { CHECK(!());#if VERBOSE (VERBOSE) << "Access unit complete (" << () << " packets)";#endif sp accessUnit = MakeCompoundFromPackets(mPackets); accessUnit = removeLATMFraming(accessUnit); CopyTimes(accessUnit, *()); (mAccessUnitDamaged) { accessUnit->meta()->setInt32("damaged", true); } (); mAccessUnitDamaged = false; //回调‘accu’ sp msg = mNotifyMsg->dup(); msg->setBuffer("access-unit", accessUnit); msg->post();} 'accu':{ int32_t timeUpdate; (msg->findInt32("time-update", &timeUpdate) && timeUpdate) { size_t trackIndex; CHECK(msg->findSize("track-index", &trackIndex)); uint32_t rtpTime; uint64_t ntpTime; CHECK(msg->findInt32("rtp-time", (int32_t *)&rtpTime)); CHECK(msg->findInt64("ntp-time", (int64_t *)&ntpTime)); onTimeUpdate(trackIndex, rtpTime, ntpTime); ; } int32_t first; (msg->findInt32("first-rtcp", &first)) { mReceivedFirstRTCPPacket = ; ; } (msg->findInt32("first-rtp", &first)) { mReceivedFirstRTPPacket = ; ; } ++mNumAccessUnitsReceived; postAccessUnitTimeoutCheck(); size_t trackIndex; CHECK(msg->findSize("track-index", &trackIndex)); (trackIndex >= ()) { ALOGV("late packets ignored."); ; } TrackInfo *track = &emAt(trackIndex);

MyHandler::kWhatAccessUnit:{ size_t trackIndex; CHECK(msg->findSize("trackIndex", &trackIndex)); (mTSParser == ) { CHECK_LT(trackIndex, ()); } { CHECK_EQ(trackIndex, 0u); } sp accessUnit; CHECK(msg->findBuffer("accessUnit", &accessUnit)); int32_t damaged; (accessUnit->meta()->findInt32("damaged", &damaged) && damaged) { ALOGI("dropping damaged access unit."); ; } (mTSParser != ) { size_t offset = 0; status_t err = OK; (offset + 188 <= accessUnit->size()) { err = mTSParser->feedTSPacket( accessUnit->data() + offset, 188); (err != OK) { ; } offset += 188; } (offset < accessUnit->size()) { err = ERROR_MALFORMED; } (err != OK) { sp source = getSource( /* audio */); (source != ) { source->signalEOS(err); } source = getSource( /* audio */); (source != ) { source->signalEOS(err); } } ; } TrackInfo *info = &emAt(trackIndex); sp source = info->mSource; (source != ) { uint32_t rtpTime;queueAccessUnit(accessUnit);将AU数据存放到AnotherPacketSource 的mBuffers中供解码器解码播放: CHECK(accessUnit->meta()->findInt32("rtp-time", (int32_t *)&rtpTime)); (!info->mNPTMappingValid) { // This is a live stream, we didn't receive any normal // playtime mapping. We won't map to npt time. source->queueAccessUnit(accessUnit); ; } int64_t nptUs = (()rtpTime - ()info->mRTPTime) / info->mTimeScale * 1000000ll + info->mNormalPlaytimeUs; accessUnit->meta()->setInt64("timeUs", nptUs); //。。。。。。。。。。。。。。。 source->queueAccessUnit(accessUnit); } ;}

void NuPlayer::onStart(int64_t startPositionUs) { (!mSourceStarted) { mSourceStarted = true; mSource->start(); } mOffloadAudio = false; mAudioEOS = false; mVideoEOS = false; mStarted = true; uint32_t flags = 0; sp audioMeta = mSource->getFormatMeta(true /* audio */); audio_stream_type_t streamType = AUDIO_STREAM_MUSIC; (mAudioSink != NULL) { streamType = mAudioSink->getAudioStreamType(); } sp videoFormat = mSource->getFormat(false /* audio */); sp notify = new AMessage(kWhatRendererNotify, this); ++mRendererGeneration; notify->setInt32("generation", mRendererGeneration); mRenderer = new Renderer(mAudioSink, notify, flags); mRendererLooper = new ALooper; mRendererLooper->setName("NuPlayerRenderer"); mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO); mRendererLooper->registerHandler(mRenderer); status_t err = mRenderer->setPlaybackSettings(mPlaybackSettings); float rate = getFrameRate(); (rate > 0) { mRenderer->setVideoFrameRate(rate); } (mVideoDecoder != NULL) { mVideoDecoder->setRenderer(mRenderer); } (mAudioDecoder != NULL) { mAudioDecoder->setRenderer(mRenderer); }紧接着我们看下初始化编码器部分:void NuPlayer::postScanSources() { postScanSources(); (mScanSourcesPending) {} ; } sp msg = new AMessage(kWhatScanSources, this); msg->setInt32("generation", mScanSourcesGeneration); msg->post(); mScanSourcesPending = true;}

ACodec::UninitializedState::onAllocateComponent( sp &msg) {

Vector matchingCodecs; AString mime; AString componentName; uint32_t quirks = 0; int32_t encoder = ; (msg->findString("componentName", &componentName)) { ssize_t index = (); OMXCodec::CodecNameAndQuirks *entry = &emAt(index); entry->mName = String8(componentName.c_str()); (!OMXCodec::findCodecQuirks(componentName.c_str(), &entry->mQuirks)) { entry->mQuirks = 0; } } { CHECK(msg->findString("mime", &mime)); (!msg->findInt32("encoder", &encoder)) { encoder = ; } OMXCodec::findMatchingCodecs( mime.c_str(), encoder, // createEncoder , // matchComponentName 0, // flags &matchingCodecs); } sp observer = CodecObserver; IOMX::node_id node = 0; status_t err = NAME_NOT_FOUND; (size_t matchIndex = 0; matchIndex < ();++matchIndex) { componentName = (matchIndex).(); quirks = (matchIndex).mQuirks; pid_t tid = gettid(); prevPriority = androidGetThreadPriority(tid); androidSetThreadPriority(tid, ANDROID_PRIORITY_FOREGROUND); err = omx->allocateNode(componentName.c_str(), observer, &node); androidSetThreadPriority(tid, prevPriority); node = 0; } notify = AMessage(kWhatOMXMessageList, mCodec); observer->setNotificationMessage(notify); mCodec->mComponentName = componentName; mCodec->ponentName(componentName); mCodec->mFlags = 0; mCodec->mQuirks = quirks; mCodec->mOMX = omx; mCodec->mNode = node; { sp notify = mCodec->mNotify->dup(); notify->setInt32("what", CodecBase::kWhatComponentAllocated); notify->setString("componentName", mCodec->mComponentName.c_str()); notify->post(); } mCodec->changeState(mCodec->mLoadedState); ;}

解码器的配置status_t MediaCodec::configure( sp &format, sp &surface, sp &crypto, uint32_t flags) { sp msg = AMessage(kWhatConfigure, this); (mIsVideo) { format->findInt32("width", &mVideoWidth); format->findInt32("height", &mVideoHeight); (!format->findInt32("rotation-degrees", &mRotationDegrees)) { mRotationDegrees = 0; } } msg->setMessage("format", format); msg->setInt32("flags", flags); msg->setObject("surface", surface); //..................... // save msg for reset mConfigureMsg = msg; //..................... ( i = 0; i <= kMaxRetry; ++i) { (i > 0) { // Don't try to reclaim resource for the first time. (!mResourceManagerService->reclaimResource(resources)) { ; } } sp response; err = PostAndAwaitResponse(msg, &response); //..................... } err;}

mInputMetadataType = kMetadataBufferTypeInvalid; mOutputMetadataType = kMetadataBufferTypeInvalid; status_t err = setComponentRole(encoder /* isEncoder */, mime); (err != OK) { err; } int32_t bitRate = 0; // FLAC encoder doesn't need a bitrate, other encoders do (encoder && strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC) && !msg->findInt32("bitrate", &bitRate)) { INVALID_OPERATION; } int32_t storeMeta; (encoder && msg->findInt32("store-metadata-in-buffers", &storeMeta) && storeMeta != 0) { err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexInput, OMX_TRUE, &mInputMetadataType); (err != OK) { ALOGE("[%s] storeMetaDataInBuffers (input) failed w/ err %d", mComponentName.c_str(), err); err; } // For this specific case we could be using camera source even if storeMetaDataInBuffers // returns Gralloc source. Pretend that we are; this will force us to use nBufferSize. (mInputMetadataType == kMetadataBufferTypeGrallocSource) { mInputMetadataType = kMetadataBufferTypeCameraSource; } uint32_t usageBits; (mOMX->getParameter( mNode, (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits, &usageBits, (usageBits)) == OK) { inputFormat->setInt32( "using-sw-read-often", !!(usageBits & GRALLOC_USAGE_SW_READ_OFTEN)); } } int32_t prependSPSPPS = 0; (encoder && msg->findInt32("prepend-sps-pps-to-idr-frames", &prependSPSPPS) && prependSPSPPS != 0) { OMX_INDEXTYPE index; err = mOMX->getExtensionIndex( mNode, "dSPSPPSToIDRFrames", &index); (err == OK) { PrependSPSPPSToIDRFramesParams params; InitOMXParams(¶ms); e = OMX_TRUE; err = mOMX->setParameter( mNode, index, ¶ms, (params)); } (err != OK) { ALOGE("Encoder could not be configured to emit SPS/PPS before " "IDR frames. (err %d)", err); err; } } // Only enable metadata mode on encoder output if encoder can prepend // sps/pps to idr frames, since in metadata mode the bitstream is in an // opaque handle, to which we don't have access. int32_t video = !strncasecmp(mime, "video/", 6); mIsVideo = video; (encoder && video) { OMX_BOOL enable = (OMX_BOOL) (prependSPSPPS && msg->findInt32("store-metadata-in-buffers-output", &storeMeta) && storeMeta != 0);

err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexOutput, enable, &mOutputMetadataType); (err != OK) { ALOGE("[%s] storeMetaDataInBuffers (output) failed w/ err %d", mComponentName.c_str(), err); } (!msg->findInt64( "repeat-previous-frame-after", &mRepeatFrameDelayUs)) { mRepeatFrameDelayUs = -1ll; } (!msg->findInt64("max-pts-gap-to-encoder", &mMaxPtsGapUs)) { mMaxPtsGapUs = -1ll; } (!msg->findFloat("max-fps-to-encoder", &mMaxFps)) { mMaxFps = -1; } (!msg->findInt64("time-lapse", &mTimePerCaptureUs)) { mTimePerCaptureUs = -1ll; } (!msg->findInt32( "create-input-buffers-suspended", (int32_t*)&mCreateInputBuffersSuspended)) { mCreateInputBuffersSuspended = ; } } // NOTE: we only use native window for video decoders sp obj; haveNativeWindow = msg->findObject("native-window", &obj) && obj != NULL && video && !encoder; mLegacyAdaptiveExperiment = ; (video && !encoder) { inputFormat->setInt32("adaptive-playback", ); int32_t usageProtected; (msg->findInt32("protected", &usageProtected) && usageProtected) { (!haveNativeWindow) { ALOGE("protected output buffers must be sent to an ANativeWindow"); PERMISSION_DENIED; } mFlags |= kFlagIsGrallocUsageProtected; mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown; } } (haveNativeWindow) { sp nativeWindow = static_cast(static_cast(())); // START of temporary support for automatic FRC - THIS WILL BE REMOVED int32_t autoFrc; (msg->findInt32("auto-frc", &autoFrc)) { enabled = autoFrc; OMX_CONFIG_BOOLEANTYPE config; InitOMXParams(&config); ed = (OMX_BOOL)enabled; status_t temp = mOMX->setConfig( mNode, (OMX_INDEXTYPE)OMX_IndexConfigAutoFramerateConversion, &config, (config)); (temp == OK) { outputFormat->setInt32("auto-frc", enabled); } (enabled) { ALOGI("codec does not support requested auto-frc (err %d)", temp); } } // END of temporary support for automatic FRC int32_t tunneled; (msg->findInt32("feature-tunneled-playback", &tunneled) && tunneled != 0) { ALOGI("Configuring TUNNELED video playback."); mTunneled = ; int32_t audioHwSync = 0;

int32_t audioHwSync = 0; (!msg->findInt32("audio-hw-sync", &audioHwSync)) { ALOGW("No Audio HW Sync provided for video tunnel"); } err = configureTunneledVideoPlayback(audioHwSync, nativeWindow); (err != OK) { ALOGE("configureTunneledVideoPlayback(%d,%p) failed!", audioHwSync, ()); err; } int32_t maxWidth = 0, maxHeight = 0; (msg->findInt32("max-width", &maxWidth) && msg->findInt32("max-height", &maxHeight)) { err = mOMX->prepareForAdaptivePlayback( mNode, kPortIndexOutput, OMX_TRUE, maxWidth, maxHeight); (err != OK) { ALOGW("[%s] prepareForAdaptivePlayback failed w/ err %d", mComponentName.c_str(), err); // allow failure err = OK; } { inputFormat->setInt32("max-width", maxWidth); inputFormat->setInt32("max-height", maxHeight); inputFormat->setInt32("adaptive-playback", ); } } } { ALOGV("Configuring CPU controlled video playback."); mTunneled = ; // Explicity reset the sideband handle of the window for // non-tunneled video in case the window was previously used // for a tunneled video playback. err = native_window_set_sideband_stream((), NULL); (err != OK) { ALOGE("set_sideband_stream(NULL) failed! (err %d).", err); err; } // Always try to enable dynamic output buffers on native surface err = mOMX->storeMetaDataInBuffers( mNode, kPortIndexOutput, OMX_TRUE, &mOutputMetadataType); (err != OK) { ALOGE("[%s] storeMetaDataInBuffers failed w/ err %d", mComponentName.c_str(), err); // if adaptive playback has been requested, try JB fallback // NOTE: THIS FALLBACK MECHANISM WILL BE REMOVED DUE TO ITS // LARGE MEMORY REQUIREMENT // we will not do adaptive playback on software accessed // surfaces as they never had to respond to changes in the // crop window, and we don't trust that they will be able to. usageBits = 0; canDoAdaptivePlayback; (nativeWindow->query( (), NATIVE_WINDOW_CONSUMER_USAGE_BITS, &usageBits) != OK) { canDoAdaptivePlayback = ; } { canDoAdaptivePlayback = (usageBits & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) == 0; } int32_t maxWidth = 0, maxHeight = 0; (canDoAdaptivePlayback && msg->findInt32("max-width", &maxWidth) && msg->findInt32("max-height", &maxHeight)) { ALOGV("[%s] prepareForAdaptivePlayback(%dx%d)", mComponentName.c_str(), maxWidth, maxHeight); err = mOMX->prepareForAdaptivePlayback( mNode, kPortIndexOutput, OMX_TRUE, maxWidth, maxHeight);

maxHeight); ALOGW_IF(err != OK, "[%s] prepareForAdaptivePlayback failed w/ err %d", mComponentName.c_str(), err); (err == OK) { inputFormat->setInt32("max-width", maxWidth); inputFormat->setInt32("max-height", maxHeight); inputFormat->setInt32("adaptive-playback", ); } } // allow failure err = OK; } { ALOGV("[%s] storeMetaDataInBuffers succeeded", mComponentName.c_str()); CHECK(storingMetadataInDecodedBuffers()); mLegacyAdaptiveExperiment = ADebug::isExperimentEnabled( "legacy-adaptive", !msg->contains("no-experiments")); inputFormat->setInt32("adaptive-playback", ); } int32_t push; (msg->findInt32("push-blank-buffers-on-shutdown", &push) && push != 0) { mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown; } } int32_t rotationDegrees; (msg->findInt32("rotation-degrees", &rotationDegrees)) { mRotationDegrees = rotationDegrees; } { mRotationDegrees = 0; } } (video) { // determine need for software renderer usingSwRenderer = ; (haveNativeWindow && With(".")) { usingSwRenderer = ; haveNativeWindow = ; } (encoder) { err = setupVideoEncoder(mime, msg); } { err = setupVideoDecoder(mime, msg, haveNativeWindow); } (err != OK) { err; } (haveNativeWindow) { mNativeWindow = static_cast(()); } // initialize native window now to get actual output format // TODO: this is needed for some encoders even though they don't use native window err = initNativeWindow(); (err != OK) { err; } // fallback for devices that do not handle flex-YUV for native buffers (haveNativeWindow) { int32_t requestedColorFormat = OMX_COLOR_FormatUnused; (msg->findInt32("color-format", &requestedColorFormat) && requestedColorFormat == OMX_COLOR_FormatYUV420Flexible) { status_t err = getPortFormat(kPortIndexOutput, outputFormat); (err != OK) { err; } int32_t colorFormat = OMX_COLOR_FormatUnused; OMX_U32 flexibleEquivalent = OMX_COLOR_FormatUnused; (!outputFormat->findInt32("color-format", &colorFormat)) {

(!outputFormat->findInt32("color-format", &colorFormat)) { ALOGE("ouptut port did not have a color format (wrong domain?)"); BAD_VALUE; } ALOGD("[%s] Requested output format %#x and got %#x.", mComponentName.c_str(), requestedColorFormat, colorFormat); (!isFlexibleColorFormat( mOMX, mNode, colorFormat, haveNativeWindow, &flexibleEquivalent) || flexibleEquivalent != (OMX_U32)requestedColorFormat) { // device did not handle flex-YUV request for native window, fall back // to SW renderer ALOGI("[%s] Falling back to software renderer", mComponentName.c_str()); (); mNativeWindowUsageBits = 0; haveNativeWindow = ; usingSwRenderer = ; (storingMetadataInDecodedBuffers()) { err = mOMX->storeMetaDataInBuffers( mNode, kPortIndexOutput, OMX_FALSE, &mOutputMetadataType); mOutputMetadataType = kMetadataBufferTypeInvalid; // just in case // TODO: implement adaptive-playback support for bytebuffer mode. // This is done by SW codecs, but most HW codecs don't support it. inputFormat->setInt32("adaptive-playback", ); } (err == OK) { err = mOMX->enableGraphicBuffers(mNode, kPortIndexOutput, OMX_FALSE); } (mFlags & kFlagIsGrallocUsageProtected) { // fallback is not supported for protected playback err = PERMISSION_DENIED; } (err == OK) { err = setupVideoDecoder(mime, msg, ); } } } } (usingSwRenderer) { outputFormat->setInt32("using-sw-renderer", 1); } } (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) { int32_t numChannels, sampleRate; (!msg->findInt32("channel-count", &numChannels) || !msg->findInt32("sample-rate", &sampleRate)) { // Since we did not always check for these, leave them optional // and have the decoder figure it all out. err = OK; } { err = setupRawAudioFormat( encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels); } } (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) { int32_t numChannels, sampleRate; (!msg->findInt32("channel-count", &numChannels) || !msg->findInt32("sample-rate", &sampleRate)) { err = INVALID_OPERATION; } { int32_t isADTS, aacProfile; int32_t sbrMode; int32_t maxOutputChannelCount; int32_t pcmLimiterEnable; drcParams_t drc; (!msg->findInt32("is-adts", &isADTS)) { isADTS = 0; } (!msg->findInt32("aac-profile", &aacProfile)) { aacProfile = OMX_AUDIO_AACObjectNull; } (!msg->findInt32("aac-sbr-mode", &sbrMode)) { sbrMode = -1; } (!msg->findInt32("aac-max-output-channel_count", &maxOutputChannelCount)) { maxOutputChannelCount = -1; } (!msg->findInt32("aac-pcm-limiter-enable", &pcmLimiterEnable)) { // value is unknown

// value is unknown pcmLimiterEnable = -1; } (!msg->findInt32("aac-encoded-target-level", &dTargetLevel)) { // value is unknown dTargetLevel = -1; } (!msg->findInt32("aac-drc-cut-level", &)) { // value is unknown = -1; } (!msg->findInt32("aac-drc-boost-level", &st)) { // value is unknown st = -1; } (!msg->findInt32("aac-drc-heavy-compression", &ompression)) { // value is unknown ompression = -1; } (!msg->findInt32("aac-target-ref-level", &RefLevel)) { // value is unknown RefLevel = -1; } err = setupAACCodec( encoder, numChannels, sampleRate, bitRate, aacProfile, isADTS != 0, sbrMode, maxOutputChannelCount, drc, pcmLimiterEnable); } } (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) { err = setupAMRCodec(encoder, /* isWAMR */, bitRate); } (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB)) { err = setupAMRCodec(encoder, /* isWAMR */, bitRate); } (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_G711_ALAW) || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_G711_MLAW)) { // These are PCM-like formats with a fixed sample rate but // a variable number of channels. int32_t numChannels; (!msg->findInt32("channel-count", &numChannels)) { err = INVALID_OPERATION; } { int32_t sampleRate; (!msg->findInt32("sample-rate", &sampleRate)) { sampleRate = 8000; } err = setupG711Codec(encoder, sampleRate, numChannels); } } (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)) { int32_t numChannels = 0, sampleRate = 0, compressionLevel = -1; (encoder && (!msg->findInt32("channel-count", &numChannels) || !msg->findInt32("sample-rate", &sampleRate))) { ALOGE("missing channel count or sample rate for FLAC encoder"); err = INVALID_OPERATION; } { (encoder) { (!msg->findInt32( "complexity", &compressionLevel) && !msg->findInt32( "flac-compression-level", &compressionLevel)) { compressionLevel = 5; // default FLAC compression level } (compressionLevel < 0) { ALOGW("compression level %d outside [0..8] range, " "using 0", compressionLevel); compressionLevel = 0; } (compressionLevel > 8) { ALOGW("compression level %d outside [0..8] range, " "using 8", compressionLevel); compressionLevel = 8; } } err = setupFlacCodec( encoder, numChannels, sampleRate, compressionLevel); } } (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) { int32_t numChannels, sampleRate; (encoder

(encoder || !msg->findInt32("channel-count", &numChannels) || !msg->findInt32("sample-rate", &sampleRate)) { err = INVALID_OPERATION; } { err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels); } } (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC3)) { int32_t numChannels; int32_t sampleRate; (!msg->findInt32("channel-count", &numChannels) || !msg->findInt32("sample-rate", &sampleRate)) { err = INVALID_OPERATION; } { err = setupAC3Codec(encoder, numChannels, sampleRate); } } (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_EAC3)) { int32_t numChannels; int32_t sampleRate; (!msg->findInt32("channel-count", &numChannels) || !msg->findInt32("sample-rate", &sampleRate)) { err = INVALID_OPERATION; } { err = setupEAC3Codec(encoder, numChannels, sampleRate); } } (err != OK) { err; } (!msg->findInt32("encoder-delay", &mEncoderDelay)) { mEncoderDelay = 0; } (!msg->findInt32("encoder-padding", &mEncoderPadding)) { mEncoderPadding = 0; } (msg->findInt32("channel-mask", &mChannelMask)) { mChannelMaskPresent = ; } { mChannelMaskPresent = ; } int32_t maxInputSize; (msg->findInt32("max-input-size", &maxInputSize)) { err = setMinBufferSize(kPortIndexInput, (size_t)maxInputSize); } (!strcmp("r", mComponentName.c_str())) { err = setMinBufferSize(kPortIndexInput, 8192); //

} int32_t priority; (msg->findInt32("priority", &priority)) { err = setPriority(priority); } int32_t rateInt = -1; rateFloat = -1; (!msg->findFloat("operating-rate", &rateFloat)) { msg->findInt32("operating-rate", &rateInt); rateFloat = ()rateInt; // 16MHz (FLINTMAX) is OK for upper bound. } (rateFloat > 0) { err = setOperatingRate(rateFloat, video); } mBaseOutputFormat = outputFormat; err = getPortFormat(kPortIndexInput, inputFormat); (err == OK) { err = getPortFormat(kPortIndexOutput, outputFormat); (err == OK) { mInputFormat = inputFormat; mOutputFormat = outputFormat; } } err;}

void ACodec::ExecutingState::resume() { submitOutputBuffers(); // Post all available input buffers (mCodec->mBuffers[kPortIndexInput].size() == 0u) { ALOGW("[%s] we don't have any input buffers to resume", mCodec->mComponentName.c_str());void ACodec::BaseState::postFillThisBuffer(BufferInfo *info) { } (mCodec->mPortEOS[kPortIndexInput]) { ; (size_t i = 0; i < mCodec->mBuffers[kPortIndexInput].size(); i++) { } BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(i); (info->mStatus == BufferInfo::OWNED_BY_US) { CHECK_EQ(()info->mStatus, ()BufferInfo::OWNED_BY_US); postFillThisBuffer(info); sp notify = mCodec->mNotify->dup(); } notify->setInt32("what", CodecBase::kWhatFillThisBuffer); } notify->setInt32("buffer-id", info->mBufferID); mActive = ; info->mData->meta()->clear();} notify->setBuffer("buffer", info->mData); sp reply = AMessage(kWhatInputBufferFilled, mCodec); reply->setInt32("buffer-id", info->mBufferID); notify->setMessage("reply", reply); notify->post(); info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;} CodecBase::kWhatFillThisBuffer:{

//.......... (mFlags & kFlagIsAsync) { (!mHaveInputSurface) { (mState == FLUSHED) { mHavePendingInputBuffers = true; } { onInputBufferAvailable(); } } } (mFlags & kFlagDequeueInputPending) { CHECK(handleDequeueInputBuffer(mDequeueInputReplyID)); ++mDequeueInputTimeoutGeneration; mFlags &= ~kFlagDequeueInputPending; mDequeueInputReplyID = 0; } { postActivityNotificationIfPossible(); } ;}void MediaCodec::onInputBufferAvailable() { int32_t index; ((index = dequeuePortBuffer(kPortIndexInput)) >= 0) { sp msg = mCallback->dup(); msg->setInt32("callbackID", CB_INPUT_AVAILABLE); msg->setInt32("index", index); msg->post(); }}还记得这个mCallback怎么来的吗?

bool NuPlayer::Decoder::handleAnInputBuffer(size_t ) { (isDiscontinuityPending()) { ; } sp buffer; mCodec->getInputBuffer(, &buffer); (buffer == NULL) { handleError(UNKNOWN_ERROR); ; } ( >= ()) { (size_t i = (); i <= ; ++i) { (); (); (); emAt(i) = NULL; emAt(i) = ; } } emAt() = buffer; //CHECK_LT(bufferIx, ()); (mMediaBuffers[] != NULL) { mMediaBuffers[]->release(); emAt() = NULL; } emAt() = ; (!y()) { sp msg = AMessage(); msg->setSize("buffer-ix", ); sp buffer = (0); ALOGI("[%s] resubmitting CSD", mComponentName.c_str()); msg->setBuffer("buffer", buffer); At(0); CHECK(onInputBufferFetched(msg)); ; } (!()) { sp msg = *(); (!onInputBufferFetched(msg)) { ; } (()); } (!emAt()) { ; } _back(); onRequestInputBuffers(); ;}

NuPlayer::DecoderBase::onRequestInputBuffers() { (mRequestInputBuffersPending) { ; } // doRequestBuffers() return true if we should request more data (doRequestBuffers()) { mRequestInputBuffersPending = true; sp msg = AMessage(kWhatRequestInputBuffers, ); msg->post(10 * 1000ll); NuPlayer::Decoder::doRequestBuffers() { } // mRenderer is only NULL if we have a legacy widevine source that} // is not yet ready. In this case we must not fetch input. (isDiscontinuityPending() || mRenderer == ) { ; } status_t err = OK; (err == OK && !mDequeuedInputBuffers.()) { size_t bufferIx = *(); sp msg = AMessage(); msg->setSize("buffer-ix", bufferIx); err = fetchInputData(msg); (err != OK && err != ERROR_END_OF_STREAM) { // if EOS, need to queue EOS buffer ; } (()); (!mPendingInputMessages.() || !onInputBufferFetched(msg)) { _back(msg); } } err == -EWOULDBLOCK && mSource->feedMoreTSData() == OK;} NuPlayer::Decoder::fetchInputData(sp &reply) { sp accessUnit; dropAccessUnit; { err = mSource->dequeueAccessUnit(mIsAudio, &accessUnit); (err == -EWOULDBLOCK) { err; } (err != OK) { (err == INFO_DISCONTINUITY) { type; CHECK(accessUnit->meta()->findInt32("discontinuity", &type)); formatChange = (mIsAudio && (type & ATSParser::DISCONTINUITY_AUDIO_FORMAT)) || (!mIsAudio && (type & ATSParser::DISCONTINUITY_VIDEO_FORMAT)); timeChange = (type & ATSParser::DISCONTINUITY_TIME) != 0; ALOGI("%s discontinuity (format=%d, time=%d)", mIsAudio ? "audio" : "video", formatChange, timeChange); seamlessFormatChange = false; sp newFormat = mSource->getFormat(mIsAudio); (formatChange) { seamlessFormatChange = supportsSeamlessFormatChange(newFormat); // treat seamless format change separately formatChange = !seamlessFormatChange; } // For format or time change, return EOS to queue EOS input, // then wait for EOS on output. (formatChange /* not seamless */) { mFormatChangePending = true; err = ERROR_END_OF_STREAM; } (timeChange) {

} (timeChange) { rememberCodecSpecificData(newFormat); mTimeChangePending = true; err = ERROR_END_OF_STREAM; } (seamlessFormatChange) { // reuse existing decoder and don't flush rememberCodecSpecificData(newFormat); ; } { // This stream is unaffected by the discontinuity -EWOULDBLOCK; } } // reply should only be returned without a buffer set // when there is an error (including EOS) CHECK(err != OK); reply->setInt32("err", err); ERROR_END_OF_STREAM; } dropAccessUnit = false; (!mIsAudio && !mIsSecure && mRenderer->getVideoLateByUs() > 100000ll && mIsVideoAVC && !IsAVCReferenceFrame(accessUnit)) { dropAccessUnit = true; ++mNumInputFramesDropped; } } (dropAccessUnit); // ALOGV("returned a valid buffer of %s data", mIsAudio ? "mIsAudio" : "video");# 0 mediaTimeUs; CHECK(accessUnit->meta()->findInt64("timeUs", &mediaTimeUs)); ALOGV("[%s] feeding input buffer at media time %.3f", mIsAudio ? "audio" : "video", mediaTimeUs / 1E6);# (mCCDecoder != NULL) { mCCDecoder->decode(accessUnit); } reply->setBuffer("buffer", accessUnit); OK;}