一、代码分析

在公司项目中,音频解码及播放是把数据传到Java层进行解码播放的,其实这个步骤没有必要,完全可以在底层进行处理。

通过代码发现其实也做了在底层进行解码,那么为啥不直接使用底层解码播放呢,我们可以看看原先代码怎么做的:







代码中通过定义的宏DECODE_AUDIO_IN_JAVA来控制mAudioCodec对象是否创建,然后在通过mAudioCodec对象是否为null来控制音频数据是否传给Java层处理,代码中原来已经支持了在底层解码然后在传回上传使用AudioTrack进行播放,那我求改宏DECODE_AUDIO_IN_JAVA来让其在底层进行解码,运行后会发现播放的声音非常的卡顿。

二、解决办法

最终发现原来是在使用底层处理时播放的音频是数据大小传的不对,会导致播放的声音非常的卡顿。

解决办法就是将下面红框的修改成info.size就可以了。

三、底层播放音频

但是这样还是将音频的播放传给Java层进行播放。

我们可以通过使用OpenSLES来处理底层音频的播放

3.1 OpenSLRender类的实现

#ifndef _OPENSLRENDER_HEAD_
#define _OPENSLRENDER_HEAD_
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h> namespace android{
class OpenSLRender : public Thread {
public:
OpenSLRender(int64_t buffertime,int32_t bufferframes=5);
~OpenSLRender();
bool init(int32_t chanNum,int rate);
void stop();
void setBufferTimes(int64_t buffertime);
void queueInputBuffer(sp<ABuffer> data);
void playerCallback();
private:
SLAndroidSimpleBufferQueueItf bqPlayerBufferQueue;
SLObjectItf bqPlayerObject;
SLPlayItf bqPlayerPlay;
SLObjectItf outputMixObject;
SLObjectItf engineObject; List<sp<ABuffer>> mList;
int64_t mBufferTimeUs;
int32_t mBufferFrames;
int64_t mLasPts;
bool bFist;
pthread_mutex_t startMutex;
pthread_cond_t startCond; Mutex dataMutex; // for data in/out on diffrent thread
bool bRun;
sp<ABuffer> mMuteData;
int64_t mlastAudtime;
int mPlayAudFrames;
int mDropFrames;
int32_t muteCounts;
sp<ABuffer> mRenderData;
int32_t mOverFlowContinuetimes;
private:
virtual bool threadLoop();
sp<ABuffer> intervalOut(int gap);
sp<ABuffer> dropToMaxBuffer(int gap);
sp<ABuffer> dropAutoNums();
sp<ABuffer> getNextBuffer();
void destroy();
static void bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *context);
DISALLOW_EVIL_CONSTRUCTORS(OpenSLRender);
};
}
#endif
//#define LOG_NDEBUG 0
#define LOG_TAG "OpenSLRender" #include"OpenSLRender.h" #define UNUSED(x) ((void)x)
#define AUD_DROP_THRESHOLD 5
namespace android{
OpenSLRender::OpenSLRender(int64_t bufferTime,int32_t bufferFrames):
mBufferTimeUs(bufferTime),
mBufferFrames(bufferFrames),
bFist(true),
startMutex(PTHREAD_MUTEX_INITIALIZER),
startCond(PTHREAD_COND_INITIALIZER),
bRun(true),
mRenderData(NULL),
mMuteData(new ABuffer(2048)),
muteCounts(0),
mlastAudtime(0),
mPlayAudFrames(0),
mDropFrames(0),
mOverFlowContinuetimes(0){
memset(mMuteData->data(),0,mMuteData->size());
}
OpenSLRender::~OpenSLRender(){
stop();
requestExit();
requestExitAndWait();
//this.clear(); //sp<>.clear, this is not sp
} bool OpenSLRender::init(int32_t chanNum,int rate){
// engine interfaces
SLEngineItf engineEngine; // output mix interfaces
SLEnvironmentalReverbItf outputMixEnvironmentalReverb = NULL; // aux effect on the output mix, used by the buffer queue player
const SLEnvironmentalReverbSettings reverbSettings =
SL_I3DL2_ENVIRONMENT_PRESET_DEFAULT; // buffer queue player interfaces
SLresult result; // create engine
result = slCreateEngine(&engineObject, 0, NULL, 0, NULL, NULL);
CHECK(SL_RESULT_SUCCESS == result);
(void)result; // realize the engine
result = (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
CHECK(SL_RESULT_SUCCESS == result);
(void)result; // get the engine interface, which is needed in order to create other objects
result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
CHECK(SL_RESULT_SUCCESS == result);
(void)result; // create output mix, with environmental reverb specified as a non-required interface
const SLInterfaceID ids[1] = {SL_IID_ENVIRONMENTALREVERB};
const SLboolean req[1] = {SL_BOOLEAN_FALSE};
result = (*engineEngine)->CreateOutputMix(engineEngine, &outputMixObject, 1, ids, req);
CHECK(SL_RESULT_SUCCESS == result);
(void)result; // realize the output mix
result = (*outputMixObject)->Realize(outputMixObject, SL_BOOLEAN_FALSE);
CHECK(SL_RESULT_SUCCESS == result);
(void)result; // get the environmental reverb interface
// this could fail if the environmental reverb effect is not available,
// either because the feature is not present, excessive CPU load, or
// the required MODIFY_AUDIO_SETTINGS permission was not requested and granted
result = (*outputMixObject)->GetInterface(outputMixObject, SL_IID_ENVIRONMENTALREVERB,
&outputMixEnvironmentalReverb);
if (SL_RESULT_SUCCESS == result) {
result = (*outputMixEnvironmentalReverb)->SetEnvironmentalReverbProperties(
outputMixEnvironmentalReverb, &reverbSettings);
(void)result;
}
// ignore unsuccessful result codes for environmental reverb, as it is optional for this example {
// configure audio source
SLuint32 samplesPerSec = SL_SAMPLINGRATE_48;
if(48000 == rate){
samplesPerSec = SL_SAMPLINGRATE_48;
}else if(44100 == rate){
samplesPerSec = SL_SAMPLINGRATE_44_1;
}
SLuint32 audChan = chanNum;
SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM,
(audChan == 0) ? 2 : audChan,
samplesPerSec,
SL_PCMSAMPLEFORMAT_FIXED_16,
SL_PCMSAMPLEFORMAT_FIXED_16,
SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT,//
SL_BYTEORDER_LITTLEENDIAN};
/*
* Enable Fast Audio when possible: once we set the same rate to be the native, fast audio path
* will be triggered
*/
SLDataSource audioSrc = {&loc_bufq, &format_pcm}; // configure audio sink
SLDataLocator_OutputMix loc_outmix = {SL_DATALOCATOR_OUTPUTMIX, outputMixObject};
SLDataSink audioSnk = {&loc_outmix, NULL}; /*
* create audio player:
* fast audio does not support when SL_IID_EFFECTSEND is required, skip it
* for fast audio case
*/
const SLInterfaceID ids[3] = {SL_IID_BUFFERQUEUE, SL_IID_VOLUME,/* SL_IID_EFFECTSEND,
SL_IID_MUTESOLO,*/};
const SLboolean req[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
/*SL_BOOLEAN_TRUE,*/ }; result = (*engineEngine)->CreateAudioPlayer(engineEngine, &bqPlayerObject, &audioSrc, &audioSnk,
2, ids, req);
CHECK(SL_RESULT_SUCCESS == result);
(void)result; // realize the player
result = (*bqPlayerObject)->Realize(bqPlayerObject, SL_BOOLEAN_FALSE);
CHECK(SL_RESULT_SUCCESS == result);
(void)result; // get the play interface
result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_PLAY, &bqPlayerPlay);
CHECK(SL_RESULT_SUCCESS == result);
(void)result; // get the buffer queue interface
result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_BUFFERQUEUE,
&bqPlayerBufferQueue);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// register callback on the buffer queue
result = (*bqPlayerBufferQueue)->RegisterCallback(bqPlayerBufferQueue, bqPlayerCallback, this);
CHECK(SL_RESULT_SUCCESS == result);
(void)result; // set the player's state to playing
result = (*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_PLAYING);
CHECK(SL_RESULT_SUCCESS == result);
(void)result; status_t err = run("opensl buffering", ANDROID_PRIORITY_AUDIO);
CHECK(err==OK);
return true;
}
} void OpenSLRender:: destroy(){
ALOGE("opeslRender destroy ![%s][%d]",__FUNCTION__,__LINE__);
(*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_STOPPED);
// destroy buffer queue audio player object, and invalidate all associated interfaces
if (bqPlayerObject != NULL) {
(*bqPlayerObject)->Destroy(bqPlayerObject);
bqPlayerObject = NULL;
bqPlayerPlay = NULL;
bqPlayerBufferQueue = NULL;
} // destroy output mix object, and invalidate all associated interfaces
if (outputMixObject != NULL) {
(*outputMixObject)->Destroy(outputMixObject);
outputMixObject = NULL;
} // destroy engine object, and invalidate all associated interfaces
if (engineObject != NULL) {
(*engineObject)->Destroy(engineObject);
engineObject = NULL;
}
}
void OpenSLRender::stop(){
// AutoMutex _l(dataMutex);
ALOGE("OpenSLRender_stop:[%s%d]",__FUNCTION__,__LINE__);
if(bRun==true){
bRun=false;
destroy();
}
}
//to support Adjustment
void OpenSLRender::setBufferTimes(int64_t buffertime){
AutoMutex _l(dataMutex);
mBufferTimeUs = buffertime;
}
void OpenSLRender::queueInputBuffer(sp<ABuffer> data){
//input buffer, becareful!!!!!!!
AutoMutex _l(dataMutex);
//to chek pts
if(!mList.empty()){
sp<ABuffer> max = *(--mList.end());
int64_t dataPts=0,maxPts=0;
CHECK(data->meta()->findInt64("timePts", &dataPts));
CHECK(max->meta()->findInt64("timePts", &maxPts));
if(dataPts < maxPts){
ALOGD("[%s%d] pts erro data:%ld list:%ld\n",__FUNCTION__,__LINE__,maxPts,dataPts);
return;
}
}
#if ENABLE_STATISTICS
ALOGD(COMMON_DEBUG," Audio in, %lld remain __%ld__ [%s%d]\n",ALooper::GetNowUs(), mList.size(),__FUNCTION__,__LINE__);
#endif
mList.push_back(data);
if(bFist){
sp<ABuffer> min = *mList.begin();
sp<ABuffer> max = *(--mList.end());
int64_t minPts=0,maxPts=0;
CHECK(min->meta()->findInt64("timePts", &minPts));
CHECK(max->meta()->findInt64("timePts", &maxPts));
// ALOGE("==minPts=%lld,maxPts:%lld,mBufferTimeUs:%lld,(maxPts - minPts)=%lld",minPts,maxPts,mBufferTimeUs,(maxPts - minPts));
//if((maxPts - minPts > mBufferTimeUs) || mList.size()>=mBufferFrames){
if((maxPts - minPts > mBufferTimeUs/2) || mList.size()>=mBufferFrames/2){
//buffer over! go---------
pthread_mutex_lock(&startMutex);
pthread_cond_signal(&startCond);
pthread_mutex_unlock(&startMutex);
}
}
}
void OpenSLRender::playerCallback(){
AutoMutex _l(dataMutex);
if(!bRun){
return;
}
int64_t nowUs = ALooper::GetNowUs();
if(!mList.empty()){
sp<ABuffer> min = *mList.begin();
sp<ABuffer> max = *(--mList.end());
int64_t minPts=0,maxPts=0;
CHECK(min->meta()->findInt64("timePts", &minPts));
CHECK(max->meta()->findInt64("timePts", &maxPts));
//if(maxPts - minPts > mBufferTimeUs -timeDuration){
if(mList.size()>=mBufferFrames) {
mOverFlowContinuetimes++;
}else{
mOverFlowContinuetimes = 0;
}
if(mOverFlowContinuetimes > AUD_DROP_THRESHOLD)
{
//"Break out"
//Take one output to render for every two buffers
//data = intervalOut(2);
//data = dropAutoNums();
int flowFrames = mList.size() - mBufferFrames;
if( flowFrames >= mBufferFrames){
//ALOGD(COMMON_DEBUG,"video jetterbuff dopallflows %d [%s%d] mList.size():%ld \n",flowFrames,__FUNCTION__,__LINE__,mList.size());
sp<ABuffer> data = dropToMaxBuffer(flowFrames);
mRenderData = getNextBuffer();
}else{
//"Break out"
//Take one output to render for every two buffers
sp<ABuffer> data = dropToMaxBuffer(2);
mRenderData = getNextBuffer();
}
mOverFlowContinuetimes = 0;
}else{
//one by one
mRenderData = getNextBuffer();
mPlayAudFrames++;
}
}else{
mRenderData = mMuteData;
muteCounts++;
mOverFlowContinuetimes = 0;
}
if(mRenderData ==NULL){
//just give the mutex data
mRenderData = mMuteData;
muteCounts++;
}
SLresult result;
//enqueue another buffer
result = (*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, mRenderData->data(), mRenderData->size());
// the most likely other result is SL_RESULT_BUFFER_INSUFFICIENT,
// which for this code example would indicate a programming error
if (SL_RESULT_SUCCESS != result) {
} if(!mlastAudtime)
{
mlastAudtime = nowUs;
}
if(nowUs - mlastAudtime >= 1000*1000)
{
ALOGE("playback(%d) droped(%d) muteCounts(%d) frames in one second,QSize:%d",mPlayAudFrames,mDropFrames,muteCounts,(int32_t)mList.size());
mDropFrames = 0;
mPlayAudFrames = 0;
mlastAudtime = nowUs;
muteCounts = 0;
}
} void OpenSLRender::bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *context){
UNUSED(bq);
OpenSLRender * pRender =static_cast<OpenSLRender*>(context);
if(pRender){
pRender->playerCallback();
}
} sp<ABuffer> OpenSLRender::intervalOut(int gap){
int count =0;
sp<ABuffer> data = NULL;
while( (data = getNextBuffer())!=NULL && ++count < gap){
//ALOGD(COMMON_DEBUG," audio drop one [%s%d] remain mList.size():%ld \n",__FUNCTION__,__LINE__,mList.size());
}
return data;
} sp<ABuffer> OpenSLRender::dropToMaxBuffer(int gap){
sp<ABuffer> data = NULL;
int count = 0;
while( (data = getNextBuffer())!=NULL && count++ < gap){
mDropFrames++;
//ALOGD(COMMON_DEBUG," audio drop one [%s%d] remain mList.size():%ld \n",__FUNCTION__,__LINE__,mList.size());
}
return data;
}
sp<ABuffer> OpenSLRender::dropAutoNums(){
sp<ABuffer> data = NULL;
while( (data = getNextBuffer())!=NULL && muteCounts>0){
muteCounts--;
//ALOGD(COMMON_DEBUG," audio drop one [%s%d] remain mList.size():%ld \n",__FUNCTION__,__LINE__,mList.size());
}
return data;
} sp<ABuffer> OpenSLRender::getNextBuffer(){
if(!mList.empty()){
sp<ABuffer> data = *mList.begin();
mList.erase(mList.begin());
return data;
}
return NULL;
} bool OpenSLRender::threadLoop(){
if(bFist){
pthread_mutex_lock(&startMutex);
pthread_cond_wait(&startCond,&startMutex);
pthread_mutex_unlock(&startMutex);
ALOGE("[%s%d]start out\n",__FUNCTION__,__LINE__);
bFist = false;
}
//to start play
playerCallback();
return false;
}
}

3.2 OpenSLRender类的使用

3.2.1 创建OpenSLRender对象并初始化

3.2.2 OpenSLRender的数据处理播放

3.2.3 OpenSLRender的停止

投屏Sink端音频底层解码并用OpenSLES进行播放的更多相关文章

  1. iphone 与 PC端电脑投屏设置

    1. iphone端安装: 屏幕投影助手 下载地址 https://itunes.apple.com/cn/app/ping-mu-tou-ying-zhu-shou/id1152332174?mt= ...

  2. FFMPEG视音频编解码零基础学习方法-b

    感谢大神分享,虽然现在还看不懂,留着大家一起看啦 PS:有不少人不清楚“FFmpeg”应该怎么读.它读作“ef ef em peg” 0. 背景知识 本章主要介绍一下FFMPEG都用在了哪里(在这里仅 ...

  3. [总结]FFMPEG视音频编解码零基础学习方法

    在CSDN上的这一段日子,接触到了很多同行业的人,尤其是使用FFMPEG进行视音频编解码的人,有的已经是有多年经验的“大神”,有的是刚开始学习的初学者.在和大家探讨的过程中,我忽然发现了一个问题:在“ ...

  4. [转载] FFMPEG视音频编解码零基础学习方法

    在CSDN上的这一段日子,接触到了很多同行业的人,尤其是使用FFMPEG进行视音频编解码的人,有的已经是有多年经验的“大神”,有的是刚开始学习的初学者.在和大家探讨的过程中,我忽然发现了一个问题:在“ ...

  5. [总结]FFMPEG视音频编解码零基础学习方法【转】

    本文转载自:http://blog.csdn.net/leixiaohua1020/article/details/15811977 在CSDN上的这一段日子,接触到了很多同行业的人,尤其是使用FFM ...

  6. ios屏幕怎么投屏到电脑显示器

    iphone在国内一直都很受欢迎,为什么这么受欢迎呢?其实苹果手机操作系统非常的新颖,让人对手机有了重新的认识.但是ios屏幕怎么投屏到电脑显示器.感兴趣的一起阅读下面的内容吧! 使用工具: 苹果手机 ...

  7. iphone屏幕镜像怎么用 手机投屏电脑

    手机看视频有的时候总会感觉到累,屏幕太小看的不够爽又或者用手一直拿着手机看累得慌.我就就喜欢看电视因为电视屏幕大看的爽,而且现在很多手机视频都可以往电视上投影视频,那么iphone屏幕镜像怎么用? 使 ...

  8. iphone怎么投屏到电脑屏幕上

    随着苹果手机的更显换代,苹果手机的功能越来越强大,其中iphone手机更新了airplay镜像功能,所以想要手机投屏电脑的小伙伴就更加方便了,但是iphone怎么投屏到电脑呢?大家不用着急,下面即将为 ...

  9. 教你如何开发一个完败Miracast的投屏新功能

      手机与电视,是陪伴在我们生活最常见,陪伴最长久的智能设备.迅猛发展的移动终端与通信技术,赋予了手机更广阔多元的应用生态,大屏电视则以大视野和震撼影音,弥补了手里方寸带来的视觉局限.而今,手机的延伸 ...

  10. CS5265 新出TYPEC转HDMI 4K60 高清投屏转接方案|可替代RTD2172

    CS5265是一种高度集成的单芯片,主要用于设计typec转HDMI转接线或者typeC转HDMI转换器,应用在各种手机或者电脑显示端设备当中.用CS5265设计的TYPEC转HDMI 4K高清投屏线 ...

随机推荐

  1. 洛谷P1102 过河卒

    P1102 过河卒 链接在此 过河卒 此题如果直接忽略掉马的影响的话,可以看出很简单的递推规律 即 \[dp[i][j]=dp[i-1][j]+dp[i][]j-1] \] 也就是说,由于卒只能走直线 ...

  2. 《ASP.NET Core 与 RESTful API 开发实战》-- (第8章)-- 读书笔记(下)

    第 8 章 认证和安全 8.3 HTTPS HTTP 协议能够在客户端和服务器之间传递信息,特点是以明文的方式发送内容,并不提供任何方式的数据加密 为了解决 HTTP 协议这一缺陷,需要使用另一种协议 ...

  3. Python-pymysql操作MySQL数据库

    一.安装pymysql py -m pip install pymysql; 二.pymysql数据库操作 1.简单示例 #coding=utf-8 import pymysql ## 打开数据库连接 ...

  4. Ubuntu下利用MWAN+LFTP解除单mac速度限制

    起因 公司网络限制了单个mac地址的速度, 而日常经常需要从线上环境下载几个G的数据到本地, 通过FileZilla每次需要一个多小时, 非常不方便 解决方案 WAN聚合 本地网口连接OpenWRT路 ...

  5. Lambda 表达式总结

    1 Lambda 表达式简介 ​ Lambda 表达式是 JDK 8 的新特性,主要用于简化匿名内部类的定义,帮助用户方便.高效地书写优雅的代码. ​ Lambda 表达式实现的必须是一个接口,并且接 ...

  6. Laravel入坑指南(8)——控制台程序

    我们知道,php代码不仅可以用web的形式对外提供服务,同时也可以在命令行下执行. 对于原生的php来说,假设我们有一个php文件,名为Command.php,如果想要在控制台下执行这个文件,那么我们 ...

  7. 将模型对象转换为json字典:model_to_dict

    例子 from rest_framework.views import APIView class StudentAPIView(APIView): def get(self, request): p ...

  8. Linux开端---Centos

    Linux-Centos 虚拟化所需工具:https://pan.baidu.com/s/1643-kYcx9oPGnGEZM1pLOw?pwd=g0v5 提取码:g0v5 问题解决 正常注册网络适配 ...

  9. logstash部署及项目日志输出到ES

    目录 logstash简介 安装logstash logstash的基本语法 测试标准输入输出 测试输出到文件 测试输出到ES 指定配置文件启动 配置文件内容 后台运行脚本 参考 logstash简介 ...

  10. 【Azure 应用服务】部署Azure Web App时,是否可以替换hostingstart.html文件呢?

    问题描述 当成功创建一个Web App时,通过高级工具(Kudu)可以查看 Web App的根目录(wwwroot)中有一个默认的文件(hostingstart.html).它就是应用服务的默认页面. ...