投屏Sink端音频底层解码并用OpenSLES进行播放
一、代码分析
在公司项目中,音频解码及播放是把数据传到Java层进行解码播放的,其实这个步骤没有必要,完全可以在底层进行处理。
通过代码发现其实也做了在底层进行解码,那么为啥不直接使用底层解码播放呢,我们可以看看原先代码怎么做的:



代码中通过定义的宏DECODE_AUDIO_IN_JAVA来控制mAudioCodec对象是否创建,然后在通过mAudioCodec对象是否为null来控制音频数据是否传给Java层处理,代码中原来已经支持了在底层解码然后在传回上传使用AudioTrack进行播放,那我求改宏DECODE_AUDIO_IN_JAVA来让其在底层进行解码,运行后会发现播放的声音非常的卡顿。
二、解决办法
最终发现原来是在使用底层处理时播放的音频是数据大小传的不对,会导致播放的声音非常的卡顿。
解决办法就是将下面红框的修改成info.size就可以了。

三、底层播放音频
但是这样还是将音频的播放传给Java层进行播放。
我们可以通过使用OpenSLES来处理底层音频的播放
3.1 OpenSLRender类的实现
#ifndef _OPENSLRENDER_HEAD_
#define _OPENSLRENDER_HEAD_
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
namespace android{
class OpenSLRender : public Thread {
public:
OpenSLRender(int64_t buffertime,int32_t bufferframes=5);
~OpenSLRender();
bool init(int32_t chanNum,int rate);
void stop();
void setBufferTimes(int64_t buffertime);
void queueInputBuffer(sp<ABuffer> data);
void playerCallback();
private:
SLAndroidSimpleBufferQueueItf bqPlayerBufferQueue;
SLObjectItf bqPlayerObject;
SLPlayItf bqPlayerPlay;
SLObjectItf outputMixObject;
SLObjectItf engineObject;
List<sp<ABuffer>> mList;
int64_t mBufferTimeUs;
int32_t mBufferFrames;
int64_t mLasPts;
bool bFist;
pthread_mutex_t startMutex;
pthread_cond_t startCond;
Mutex dataMutex; // for data in/out on diffrent thread
bool bRun;
sp<ABuffer> mMuteData;
int64_t mlastAudtime;
int mPlayAudFrames;
int mDropFrames;
int32_t muteCounts;
sp<ABuffer> mRenderData;
int32_t mOverFlowContinuetimes;
private:
virtual bool threadLoop();
sp<ABuffer> intervalOut(int gap);
sp<ABuffer> dropToMaxBuffer(int gap);
sp<ABuffer> dropAutoNums();
sp<ABuffer> getNextBuffer();
void destroy();
static void bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *context);
DISALLOW_EVIL_CONSTRUCTORS(OpenSLRender);
};
}
#endif
//#define LOG_NDEBUG 0
#define LOG_TAG "OpenSLRender"
#include"OpenSLRender.h"
#define UNUSED(x) ((void)x)
#define AUD_DROP_THRESHOLD 5
namespace android{
OpenSLRender::OpenSLRender(int64_t bufferTime,int32_t bufferFrames):
mBufferTimeUs(bufferTime),
mBufferFrames(bufferFrames),
bFist(true),
startMutex(PTHREAD_MUTEX_INITIALIZER),
startCond(PTHREAD_COND_INITIALIZER),
bRun(true),
mRenderData(NULL),
mMuteData(new ABuffer(2048)),
muteCounts(0),
mlastAudtime(0),
mPlayAudFrames(0),
mDropFrames(0),
mOverFlowContinuetimes(0){
memset(mMuteData->data(),0,mMuteData->size());
}
OpenSLRender::~OpenSLRender(){
stop();
requestExit();
requestExitAndWait();
//this.clear(); //sp<>.clear, this is not sp
}
bool OpenSLRender::init(int32_t chanNum,int rate){
// engine interfaces
SLEngineItf engineEngine;
// output mix interfaces
SLEnvironmentalReverbItf outputMixEnvironmentalReverb = NULL;
// aux effect on the output mix, used by the buffer queue player
const SLEnvironmentalReverbSettings reverbSettings =
SL_I3DL2_ENVIRONMENT_PRESET_DEFAULT;
// buffer queue player interfaces
SLresult result;
// create engine
result = slCreateEngine(&engineObject, 0, NULL, 0, NULL, NULL);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// realize the engine
result = (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// get the engine interface, which is needed in order to create other objects
result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// create output mix, with environmental reverb specified as a non-required interface
const SLInterfaceID ids[1] = {SL_IID_ENVIRONMENTALREVERB};
const SLboolean req[1] = {SL_BOOLEAN_FALSE};
result = (*engineEngine)->CreateOutputMix(engineEngine, &outputMixObject, 1, ids, req);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// realize the output mix
result = (*outputMixObject)->Realize(outputMixObject, SL_BOOLEAN_FALSE);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// get the environmental reverb interface
// this could fail if the environmental reverb effect is not available,
// either because the feature is not present, excessive CPU load, or
// the required MODIFY_AUDIO_SETTINGS permission was not requested and granted
result = (*outputMixObject)->GetInterface(outputMixObject, SL_IID_ENVIRONMENTALREVERB,
&outputMixEnvironmentalReverb);
if (SL_RESULT_SUCCESS == result) {
result = (*outputMixEnvironmentalReverb)->SetEnvironmentalReverbProperties(
outputMixEnvironmentalReverb, &reverbSettings);
(void)result;
}
// ignore unsuccessful result codes for environmental reverb, as it is optional for this example
{
// configure audio source
SLuint32 samplesPerSec = SL_SAMPLINGRATE_48;
if(48000 == rate){
samplesPerSec = SL_SAMPLINGRATE_48;
}else if(44100 == rate){
samplesPerSec = SL_SAMPLINGRATE_44_1;
}
SLuint32 audChan = chanNum;
SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM,
(audChan == 0) ? 2 : audChan,
samplesPerSec,
SL_PCMSAMPLEFORMAT_FIXED_16,
SL_PCMSAMPLEFORMAT_FIXED_16,
SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT,//
SL_BYTEORDER_LITTLEENDIAN};
/*
* Enable Fast Audio when possible: once we set the same rate to be the native, fast audio path
* will be triggered
*/
SLDataSource audioSrc = {&loc_bufq, &format_pcm};
// configure audio sink
SLDataLocator_OutputMix loc_outmix = {SL_DATALOCATOR_OUTPUTMIX, outputMixObject};
SLDataSink audioSnk = {&loc_outmix, NULL};
/*
* create audio player:
* fast audio does not support when SL_IID_EFFECTSEND is required, skip it
* for fast audio case
*/
const SLInterfaceID ids[3] = {SL_IID_BUFFERQUEUE, SL_IID_VOLUME,/* SL_IID_EFFECTSEND,
SL_IID_MUTESOLO,*/};
const SLboolean req[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
/*SL_BOOLEAN_TRUE,*/ };
result = (*engineEngine)->CreateAudioPlayer(engineEngine, &bqPlayerObject, &audioSrc, &audioSnk,
2, ids, req);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// realize the player
result = (*bqPlayerObject)->Realize(bqPlayerObject, SL_BOOLEAN_FALSE);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// get the play interface
result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_PLAY, &bqPlayerPlay);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// get the buffer queue interface
result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_BUFFERQUEUE,
&bqPlayerBufferQueue);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// register callback on the buffer queue
result = (*bqPlayerBufferQueue)->RegisterCallback(bqPlayerBufferQueue, bqPlayerCallback, this);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// set the player's state to playing
result = (*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_PLAYING);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
status_t err = run("opensl buffering", ANDROID_PRIORITY_AUDIO);
CHECK(err==OK);
return true;
}
}
void OpenSLRender:: destroy(){
ALOGE("opeslRender destroy ![%s][%d]",__FUNCTION__,__LINE__);
(*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_STOPPED);
// destroy buffer queue audio player object, and invalidate all associated interfaces
if (bqPlayerObject != NULL) {
(*bqPlayerObject)->Destroy(bqPlayerObject);
bqPlayerObject = NULL;
bqPlayerPlay = NULL;
bqPlayerBufferQueue = NULL;
}
// destroy output mix object, and invalidate all associated interfaces
if (outputMixObject != NULL) {
(*outputMixObject)->Destroy(outputMixObject);
outputMixObject = NULL;
}
// destroy engine object, and invalidate all associated interfaces
if (engineObject != NULL) {
(*engineObject)->Destroy(engineObject);
engineObject = NULL;
}
}
void OpenSLRender::stop(){
// AutoMutex _l(dataMutex);
ALOGE("OpenSLRender_stop:[%s%d]",__FUNCTION__,__LINE__);
if(bRun==true){
bRun=false;
destroy();
}
}
//to support Adjustment
void OpenSLRender::setBufferTimes(int64_t buffertime){
AutoMutex _l(dataMutex);
mBufferTimeUs = buffertime;
}
void OpenSLRender::queueInputBuffer(sp<ABuffer> data){
//input buffer, becareful!!!!!!!
AutoMutex _l(dataMutex);
//to chek pts
if(!mList.empty()){
sp<ABuffer> max = *(--mList.end());
int64_t dataPts=0,maxPts=0;
CHECK(data->meta()->findInt64("timePts", &dataPts));
CHECK(max->meta()->findInt64("timePts", &maxPts));
if(dataPts < maxPts){
ALOGD("[%s%d] pts erro data:%ld list:%ld\n",__FUNCTION__,__LINE__,maxPts,dataPts);
return;
}
}
#if ENABLE_STATISTICS
ALOGD(COMMON_DEBUG," Audio in, %lld remain __%ld__ [%s%d]\n",ALooper::GetNowUs(), mList.size(),__FUNCTION__,__LINE__);
#endif
mList.push_back(data);
if(bFist){
sp<ABuffer> min = *mList.begin();
sp<ABuffer> max = *(--mList.end());
int64_t minPts=0,maxPts=0;
CHECK(min->meta()->findInt64("timePts", &minPts));
CHECK(max->meta()->findInt64("timePts", &maxPts));
// ALOGE("==minPts=%lld,maxPts:%lld,mBufferTimeUs:%lld,(maxPts - minPts)=%lld",minPts,maxPts,mBufferTimeUs,(maxPts - minPts));
//if((maxPts - minPts > mBufferTimeUs) || mList.size()>=mBufferFrames){
if((maxPts - minPts > mBufferTimeUs/2) || mList.size()>=mBufferFrames/2){
//buffer over! go---------
pthread_mutex_lock(&startMutex);
pthread_cond_signal(&startCond);
pthread_mutex_unlock(&startMutex);
}
}
}
void OpenSLRender::playerCallback(){
AutoMutex _l(dataMutex);
if(!bRun){
return;
}
int64_t nowUs = ALooper::GetNowUs();
if(!mList.empty()){
sp<ABuffer> min = *mList.begin();
sp<ABuffer> max = *(--mList.end());
int64_t minPts=0,maxPts=0;
CHECK(min->meta()->findInt64("timePts", &minPts));
CHECK(max->meta()->findInt64("timePts", &maxPts));
//if(maxPts - minPts > mBufferTimeUs -timeDuration){
if(mList.size()>=mBufferFrames) {
mOverFlowContinuetimes++;
}else{
mOverFlowContinuetimes = 0;
}
if(mOverFlowContinuetimes > AUD_DROP_THRESHOLD)
{
//"Break out"
//Take one output to render for every two buffers
//data = intervalOut(2);
//data = dropAutoNums();
int flowFrames = mList.size() - mBufferFrames;
if( flowFrames >= mBufferFrames){
//ALOGD(COMMON_DEBUG,"video jetterbuff dopallflows %d [%s%d] mList.size():%ld \n",flowFrames,__FUNCTION__,__LINE__,mList.size());
sp<ABuffer> data = dropToMaxBuffer(flowFrames);
mRenderData = getNextBuffer();
}else{
//"Break out"
//Take one output to render for every two buffers
sp<ABuffer> data = dropToMaxBuffer(2);
mRenderData = getNextBuffer();
}
mOverFlowContinuetimes = 0;
}else{
//one by one
mRenderData = getNextBuffer();
mPlayAudFrames++;
}
}else{
mRenderData = mMuteData;
muteCounts++;
mOverFlowContinuetimes = 0;
}
if(mRenderData ==NULL){
//just give the mutex data
mRenderData = mMuteData;
muteCounts++;
}
SLresult result;
//enqueue another buffer
result = (*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, mRenderData->data(), mRenderData->size());
// the most likely other result is SL_RESULT_BUFFER_INSUFFICIENT,
// which for this code example would indicate a programming error
if (SL_RESULT_SUCCESS != result) {
}
if(!mlastAudtime)
{
mlastAudtime = nowUs;
}
if(nowUs - mlastAudtime >= 1000*1000)
{
ALOGE("playback(%d) droped(%d) muteCounts(%d) frames in one second,QSize:%d",mPlayAudFrames,mDropFrames,muteCounts,(int32_t)mList.size());
mDropFrames = 0;
mPlayAudFrames = 0;
mlastAudtime = nowUs;
muteCounts = 0;
}
}
void OpenSLRender::bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *context){
UNUSED(bq);
OpenSLRender * pRender =static_cast<OpenSLRender*>(context);
if(pRender){
pRender->playerCallback();
}
}
sp<ABuffer> OpenSLRender::intervalOut(int gap){
int count =0;
sp<ABuffer> data = NULL;
while( (data = getNextBuffer())!=NULL && ++count < gap){
//ALOGD(COMMON_DEBUG," audio drop one [%s%d] remain mList.size():%ld \n",__FUNCTION__,__LINE__,mList.size());
}
return data;
}
sp<ABuffer> OpenSLRender::dropToMaxBuffer(int gap){
sp<ABuffer> data = NULL;
int count = 0;
while( (data = getNextBuffer())!=NULL && count++ < gap){
mDropFrames++;
//ALOGD(COMMON_DEBUG," audio drop one [%s%d] remain mList.size():%ld \n",__FUNCTION__,__LINE__,mList.size());
}
return data;
}
sp<ABuffer> OpenSLRender::dropAutoNums(){
sp<ABuffer> data = NULL;
while( (data = getNextBuffer())!=NULL && muteCounts>0){
muteCounts--;
//ALOGD(COMMON_DEBUG," audio drop one [%s%d] remain mList.size():%ld \n",__FUNCTION__,__LINE__,mList.size());
}
return data;
}
sp<ABuffer> OpenSLRender::getNextBuffer(){
if(!mList.empty()){
sp<ABuffer> data = *mList.begin();
mList.erase(mList.begin());
return data;
}
return NULL;
}
bool OpenSLRender::threadLoop(){
if(bFist){
pthread_mutex_lock(&startMutex);
pthread_cond_wait(&startCond,&startMutex);
pthread_mutex_unlock(&startMutex);
ALOGE("[%s%d]start out\n",__FUNCTION__,__LINE__);
bFist = false;
}
//to start play
playerCallback();
return false;
}
}
3.2 OpenSLRender类的使用
3.2.1 创建OpenSLRender对象并初始化

3.2.2 OpenSLRender的数据处理播放

3.2.3 OpenSLRender的停止

投屏Sink端音频底层解码并用OpenSLES进行播放的更多相关文章
- iphone 与 PC端电脑投屏设置
1. iphone端安装: 屏幕投影助手 下载地址 https://itunes.apple.com/cn/app/ping-mu-tou-ying-zhu-shou/id1152332174?mt= ...
- FFMPEG视音频编解码零基础学习方法-b
感谢大神分享,虽然现在还看不懂,留着大家一起看啦 PS:有不少人不清楚“FFmpeg”应该怎么读.它读作“ef ef em peg” 0. 背景知识 本章主要介绍一下FFMPEG都用在了哪里(在这里仅 ...
- [总结]FFMPEG视音频编解码零基础学习方法
在CSDN上的这一段日子,接触到了很多同行业的人,尤其是使用FFMPEG进行视音频编解码的人,有的已经是有多年经验的“大神”,有的是刚开始学习的初学者.在和大家探讨的过程中,我忽然发现了一个问题:在“ ...
- [转载] FFMPEG视音频编解码零基础学习方法
在CSDN上的这一段日子,接触到了很多同行业的人,尤其是使用FFMPEG进行视音频编解码的人,有的已经是有多年经验的“大神”,有的是刚开始学习的初学者.在和大家探讨的过程中,我忽然发现了一个问题:在“ ...
- [总结]FFMPEG视音频编解码零基础学习方法【转】
本文转载自:http://blog.csdn.net/leixiaohua1020/article/details/15811977 在CSDN上的这一段日子,接触到了很多同行业的人,尤其是使用FFM ...
- ios屏幕怎么投屏到电脑显示器
iphone在国内一直都很受欢迎,为什么这么受欢迎呢?其实苹果手机操作系统非常的新颖,让人对手机有了重新的认识.但是ios屏幕怎么投屏到电脑显示器.感兴趣的一起阅读下面的内容吧! 使用工具: 苹果手机 ...
- iphone屏幕镜像怎么用 手机投屏电脑
手机看视频有的时候总会感觉到累,屏幕太小看的不够爽又或者用手一直拿着手机看累得慌.我就就喜欢看电视因为电视屏幕大看的爽,而且现在很多手机视频都可以往电视上投影视频,那么iphone屏幕镜像怎么用? 使 ...
- iphone怎么投屏到电脑屏幕上
随着苹果手机的更显换代,苹果手机的功能越来越强大,其中iphone手机更新了airplay镜像功能,所以想要手机投屏电脑的小伙伴就更加方便了,但是iphone怎么投屏到电脑呢?大家不用着急,下面即将为 ...
- 教你如何开发一个完败Miracast的投屏新功能
手机与电视,是陪伴在我们生活最常见,陪伴最长久的智能设备.迅猛发展的移动终端与通信技术,赋予了手机更广阔多元的应用生态,大屏电视则以大视野和震撼影音,弥补了手里方寸带来的视觉局限.而今,手机的延伸 ...
- CS5265 新出TYPEC转HDMI 4K60 高清投屏转接方案|可替代RTD2172
CS5265是一种高度集成的单芯片,主要用于设计typec转HDMI转接线或者typeC转HDMI转换器,应用在各种手机或者电脑显示端设备当中.用CS5265设计的TYPEC转HDMI 4K高清投屏线 ...
随机推荐
- SUM和IF使用求部分和
GROUP BY可以按照某一列的不同值进行分组,然后将不同组的数据可以利用聚合函数进行汇总取值. --我们可以在老师表里面求解不同班级的老师分别有多少名 SELECT class_id,COUNT(t ...
- Linux NFS挂载报错wrong fs type, bad option, bad superblock
1.故障现象 2.解决方案 1.故障现象 我的测试环境有一个NAS,之前配置都是按照测试需求在/etc/fstab里添加配置挂载选项: vi /etc/fstab 192.168.1.2:/mnt/H ...
- ArrayList中的遍历删除
ArrayList 中的遍历删除 在代码编写过程中经常会遇到这样的要求:遍历一个线性表,要求只遍历一遍(时间复杂度\(O(n)\)),删除符合指定条件的元素,且要求空间复杂度 \(O(1)\). 例如 ...
- JOISC 2018 记录
Day1 T1 Construction of Highway 每一次操作形如查询一条到根的链上的逆序对数,然后将这条链的权值全部修改成同一个权值. 发现这个操作类似于 LCT 的 Access 操作 ...
- Activiti7 多实例子流程
顾名思义,子流程是一个包含其他活动.网关.事件等的活动,这些活动本身形成了一个流程,该流程是更大流程的一部分. 使用子流程确实有一些限制: 一个子流程只能有一个none类型的启动事件,不允许有其他类型 ...
- useEffect与useLayoutEffect
useEffect与useLayoutEffect useEffect与useLayoutEffect可以统称为Effect Hook,Effect Hook可以在函数组件中执行副作用操作,副作用是指 ...
- fgrep命令
fgrep命令 当需要搜索包含很多正则表达式元字符的字符串时,例如$.^等,fgrep很有用,其通过指定搜索字符串包含固定字符,从而无需对每个字符进行转义用反斜杠,如果搜索的字符串包含换行符,则每行将 ...
- thinkphp集成webuploader实战
介绍 最近用了下thinkphp搞自己的博客,期间用到了百度的webuploader上传图片.百度出来的参考质量一言难尽,写教程没有一点追求,千篇一律的复制粘贴,某些个作者自己都没搞清楚就发文,误人又 ...
- zabbix-server.service failed解决方法
1.问题描述 centos7中安装的zabbix server在重启系统后无法启动了,查看状态报错如下: 2.问题原因 selinux没有关闭! 3.解决 永久关闭selinux, 将SELINUX值 ...
- 在Android开发中如何使用OpenSL ES库播放解码后的pcm音频文件?
一.认识OpenSL ES OpenSL ES的全称是Open Sound Library For Embedded Systems,即应用于嵌入式系统的开源音频库.Android从2.3版本起就开始 ...