java 层AudioTrack
legacy method 1:
public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
int bufferSizeInBytes, int mode) //这里的mode指的是static 还是stream
throws IllegalArgumentException {
this(streamType, sampleRateInHz, channelConfig, audioFormat,
bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE);
//ID_GENERATE 说的是native 分配一个session id
}
legacy method 2: legacy method1 -> legacy method2 -> attribute way
public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
int bufferSizeInBytes, int mode, int sessionId)
throws IllegalArgumentException {
// mState already == STATE_UNINITIALIZED
this((new AudioAttributes.Builder())
.setLegacyStreamType(streamType)
.build(),
(new AudioFormat.Builder())
.setChannelMask(channelConfig)
.setEncoding(audioFormat)
.setSampleRate(sampleRateInHz)
.build(),
bufferSizeInBytes,
mode, sessionId);
deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()"); //提醒log
}
attribure way 1
public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
int mode, int sessionId)
throws IllegalArgumentException {
this(attributes, format, bufferSizeInBytes, mode, sessionId, false /*offload*/,
ENCAPSULATION_MODE_NONE, null /* tunerConfiguration */);
}
最终实现方式
// encapsulationMode: indicates metadata encapsulation, 数据中有metadata不是纯净的pcm data
// TunerConfiguration: 说的是 used to convey tuner information
// from the android.media.tv.Tuner API to AudioTrack construction.
// offload 说的是hardware直接处理
// 这三个参数默认(正常情况)都是false
private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
int mode, int sessionId, boolean offload, int encapsulationMode,
@Nullable TunerConfiguration tunerConfiguration)
throws IllegalArgumentException {
// native initialization
int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
offload, encapsulationMode, tunerConfiguration,
getCurrentOpPackageName());
if (initResult != SUCCESS) {
loge("Error code "+initResult+" when initializing AudioTrack.");
return; // with mState == STATE_UNINITIALIZED
}
}
JNI level: native_setup
native_setup 创建native 层 AudioTrack 并set 赋值
1. lpTrack = new AudioTrack(opPackageNameStr.c_str());
2. lpTrack->set
frameworks/base/core/jni/android_media_AudioTrack.cpp
static const JNINativeMethod gMethods[] = {
// name, signature, funcPtr
{"native_setup",
"(Ljava/lang/Object;Ljava/lang/Object;[IIIIII[IJZILjava/lang/Object;Ljava/lang/String;)I",
(void *)android_media_AudioTrack_setup},
}
static jint android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,
jobject jaa, jintArray jSampleRate,
jint channelPositionMask, jint channelIndexMask,
jint audioFormat, jint buffSizeInBytes, jint memoryMode,
jintArray jSession, jlong nativeAudioTrack,
jboolean offload, jint encapsulationMode,
jobject tunerConfiguration, jstring opPackageName) {
ALOGV("sampleRates=%p, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d,"
" nativeAudioTrack=0x%" PRIX64 ", offload=%d encapsulationMode=%d tuner=%p",
jSampleRate, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes,
nativeAudioTrack, offload, encapsulationMode, tunerConfiguration);
// if we pass in an existing *Native* AudioTrack, we don't need to create/initialize one.
sp<AudioTrack> lpTrack;
if (nativeAudioTrack == 0) {
// create the native AudioTrack object
ScopedUtfChars opPackageNameStr(env, opPackageName);
lpTrack = new AudioTrack(opPackageNameStr.c_str());
// read the AudioAttributes values
auto paa = JNIAudioAttributeHelper::makeUnique();
jint jStatus = JNIAudioAttributeHelper::nativeFromJava(env, jaa, paa.get());
ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s",
paa->usage, paa->content_type, paa->flags, paa->tags);
// initialize the native AudioTrack object
status_t status = NO_ERROR;
switch (memoryMode) {
case MODE_STREAM:
status = lpTrack->set(AUDIO_STREAM_DEFAULT,
// stream type, but more info conveyed
// in paa (last argument)
sampleRateInHertz,
format, // word length, PCM
nativeChannelMask, offload ? 0 : frameCount,
offload ? AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
: AUDIO_OUTPUT_FLAG_NONE,
audioCallback,
&(lpJniStorage->mCallbackData), // callback, callback data (user)
0, // notificationFrames == 0 since not using EVENT_MORE_DATA
// to feed the AudioTrack
0, // shared mem
true, // thread can call Java
sessionId, // audio session ID
offload ? AudioTrack::TRANSFER_SYNC_NOTIF_CALLBACK
: AudioTrack::TRANSFER_SYNC,
offload ? &offloadInfo : NULL, -1, -1, // default uid, pid values
paa.get());
break;
}
}
Native AudioTack
AudioTrack构造函数
frameworks/av/media/libaudioclient/AudioTrack.cpp
AudioTrack::AudioTrack(const std::string& opPackageName)
: mStatus(NO_INIT),
mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
mOpPackageName(opPackageName),
mPauseTimeRealUs(0),
mAudioTrackCallback(new AudioTrackCallback())
{
mShootDetectWrapper = NULL;
isAppSupported4D = false;
mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
mAttributes.usage = AUDIO_USAGE_UNKNOWN;
mAttributes.flags = 0x0;
strcpy(mAttributes.tags, "");
}
AudioTrack::set
status_t AudioTrack::set(
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
audio_output_flags_t flags,
callback_t cbf,
void* user,
int32_t notificationFrames,
const sp<IMemory>& sharedBuffer,
bool threadCanCallJava,
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
float maxRequiredSpeed,
audio_port_handle_t selectedDeviceId)
{
status_t status;
uint32_t channelCount;
pid_t callingPid;
pid_t myPid;
ALOGD("set(sessionId=%d)", sessionId);
// Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
ALOGD("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
__func__,
streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
sessionId, transferType, uid, pid);
// AudioTrack Thread
if (cbf != NULL) {
mAudioTrackThread = new AudioTrackThread(*this);
mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
// thread begins in paused state, and will not reference us until start()
}
// create the IAudioTrack
{
AutoMutex lock(mLock);
status = createTrack_l();
}
}
AudioTrackThread
AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver)
: Thread(true /* bCanCallJava */) // binder recursion on restoreTrack_l() may call Java.
, mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
mIgnoreNextPausedInt(false)
{
}
bool AudioTrack::AudioTrackThread::threadLoop() //对应的执行体
{
}
createTrack_l()
根据输入参数 CreateTrackInput 得到 CreateTrackOutput
status_t AudioTrack::createTrack_l()
{
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
IAudioFlinger::CreateTrackInput input;
if (mStreamType != AUDIO_STREAM_DEFAULT) {
input.attr = AudioSystem::streamTypeToAttributes(mStreamType);
} else {
input.attr = mAttributes;
}
input.config = AUDIO_CONFIG_INITIALIZER;
input.config.sample_rate = mSampleRate;
input.config.channel_mask = mChannelMask;
input.config.format = mFormat;
input.config.offload_info = mOffloadInfoCopy;
input.clientInfo.clientUid = mClientUid;
input.clientInfo.clientPid = mClientPid;
input.clientInfo.clientTid = -1;
input.flags = mFlags;
input.frameCount = mReqFrameCount;
input.notificationFrameCount = mNotificationFramesReq;
input.selectedDeviceId = mSelectedDeviceId;
input.sessionId = mSessionId;
input.audioTrackCallback = mAudioTrackCallback;
input.opPackageName = mOpPackageName;
IAudioFlinger::CreateTrackOutput output;
sp<IAudioTrack> track = audioFlinger->createTrack(input,
output,
&status);
}
/* CreateTrackInput contains all input arguments sent by AudioTrack to AudioFlinger
* when calling createTrack() including arguments that will be updated by AudioFlinger
* and returned in CreateTrackOutput object
*/
class CreateTrackInput : public Parcelable {
/* input */
audio_attributes_t attr;
audio_config_t config;
AudioClient clientInfo;
sp<IMemory> sharedBuffer;
uint32_t notificationsPerBuffer;
float speed;
sp<media::IAudioTrackCallback> audioTrackCallback;
std::string opPackageName;
/* input/output */
audio_output_flags_t flags;
size_t frameCount;
size_t notificationFrameCount;
audio_port_handle_t selectedDeviceId; // (mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE))
audio_session_t sessionId;
};
/* CreateTrackOutput contains all output arguments returned by AudioFlinger to AudioTrack
* when calling createTrack() including arguments that were passed as I/O for update by
* CreateTrackInput.
*/
class CreateTrackOutput : public Parcelable {
/* input/output */
audio_output_flags_t flags;
size_t frameCount;
size_t notificationFrameCount;
audio_port_handle_t selectedDeviceId; // (output.selectedDeviceId = input.selectedDeviceId;)
audio_session_t sessionId;
/* output */
uint32_t sampleRate;
size_t afFrameCount;
uint32_t afSampleRate;
uint32_t afLatencyMs;
audio_io_handle_t outputId;
audio_port_handle_t portId;
};
audioFlinger->createTrack
AudioTack最终调用到audioFlinger IBinder的createTrack
audioFlinger client: createTrack
class BpAudioFlinger : public BpInterface<IAudioFlinger>
{
virtual sp<IAudioTrack> createTrack(const CreateTrackInput& input,
CreateTrackOutput& output,
status_t *status)
{
Parcel data, reply;
sp<IAudioTrack> track;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
if (status == nullptr) {
return track;
}
input.writeToParcel(&data);
status_t lStatus = remote()->transact(CREATE_TRACK, data, &reply);
if (lStatus != NO_ERROR) {
ALOGE("createTrack transaction error %d", lStatus);
*status = DEAD_OBJECT;
return track;
}
*status = reply.readInt32();
if (*status != NO_ERROR) {
ALOGE("createTrack returned error %d", *status);
return track;
}
track = interface_cast<IAudioTrack>(reply.readStrongBinder());
if (track == 0) {
ALOGE("createTrack returned an NULL IAudioTrack with status OK");
*status = DEAD_OBJECT;
return track;
}
output.readFromParcel(&reply);
return track;
}
}
AudioFlinger Service: createTrack
status_t BnAudioFlinger::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch (code) {
case CREATE_TRACK: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
CreateTrackInput input;
if (input.readFromParcel((Parcel*)&data) != NO_ERROR) {
reply->writeInt32(DEAD_OBJECT);
return NO_ERROR;
}
status_t status;
CreateTrackOutput output;
sp<IAudioTrack> track= createTrack(input,
output,
&status);
LOG_ALWAYS_FATAL_IF((track != 0) != (status == NO_ERROR));
reply->writeInt32(status);
if (status != NO_ERROR) {
return NO_ERROR;
}
reply->writeStrongBinder(IInterface::asBinder(track));
output.writeToParcel(reply);
return NO_ERROR;
} break;
}
AudioFlinger service createTrack的实现
frameworks/av/services/audioflinger/AudioFlinger.cpp
sp<IAudioTrack> AudioFlinger::createTrack(const CreateTrackInput& input,
CreateTrackOutput& output,
status_t *status)
{
sp<PlaybackThread::Track> track;
sp<TrackHandle> trackHandle;
sp<Client> client;
status_t lStatus;
audio_stream_type_t streamType;
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
std::vector<audio_io_handle_t> secondaryOutputs;
// session的生成
audio_session_t sessionId = input.sessionId;
if (sessionId == AUDIO_SESSION_ALLOCATE) {
sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
}
// AudioSystem::getOutputForAttr
output.sessionId = sessionId;
output.outputId = AUDIO_IO_HANDLE_NONE;
output.selectedDeviceId = input.selectedDeviceId;
lStatus = AudioSystem::getOutputForAttr(&localAttr, &output.outputId, sessionId, &streamType,
clientPid, clientUid, &input.config, input.flags,
&output.selectedDeviceId, &portId, &secondaryOutputs);
}
AudioSystem::getOutputForAttr
frameworks/av/media/libaudioclient/AudioSystem.cpp
status_t AudioSystem::getOutputForAttr(audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
pid_t pid,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId,
std::vector<audio_io_handle_t> *secondaryOutputs)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return NO_INIT;
return aps->getOutputForAttr(attr, output, session, stream, pid, uid,
config,
flags, selectedDeviceId, portId, secondaryOutputs);
}
AudioPolicyService client 接口getOutputForAttr
frameworks/av/media/libaudioclient/IAudioPolicyService.cpp
status_t getOutputForAttr(audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
pid_t pid,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId,
std::vector<audio_io_handle_t> *secondaryOutputs) override
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.write(attr, sizeof(audio_attributes_t));
data.writeInt32(session);
if (stream == NULL) {
data.writeInt32(0);
} else {
data.writeInt32(1);
data.writeInt32(*stream);
}
data.writeInt32(pid);
data.writeInt32(uid);
data.write(config, sizeof(audio_config_t));
data.writeInt32(static_cast <uint32_t>(flags));
data.writeInt32(*selectedDeviceId);
data.writeInt32(*portId);
status_t status = remote()->transact(GET_OUTPUT_FOR_ATTR, data, &reply);
if (status != NO_ERROR) {
return status;
}
status = (status_t)reply.readInt32();
if (status != NO_ERROR) {
return status;
}
status = (status_t)reply.read(&attr, sizeof(audio_attributes_t));
if (status != NO_ERROR) {
return status;
}
*output = (audio_io_handle_t)reply.readInt32();
audio_stream_type_t lStream = (audio_stream_type_t)reply.readInt32();
if (stream != NULL) {
*stream = lStream;
}
*selectedDeviceId = (audio_port_handle_t)reply.readInt32();
*portId = (audio_port_handle_t)reply.readInt32();
secondaryOutputs->resize(reply.readInt32());
return reply.read(secondaryOutputs->data(),
secondaryOutputs->size() * sizeof(audio_io_handle_t));
}
AudioPolicyService service 接口getOutputForAttr实现
status_t BnAudioPolicyService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
case GET_OUTPUT_FOR_ATTR: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_attributes_t attr = AUDIO_ATTRIBUTES_INITIALIZER;
status_t status = data.read(&attr, sizeof(audio_attributes_t));
if (status != NO_ERROR) {
return status;
}
audio_session_t session = (audio_session_t)data.readInt32();
audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
bool hasStream = data.readInt32() != 0;
if (hasStream) {
stream = (audio_stream_type_t)data.readInt32();
}
pid_t pid = (pid_t)data.readInt32();
uid_t uid = (uid_t)data.readInt32();
audio_config_t config;
memset(&config, 0, sizeof(audio_config_t));
data.read(&config, sizeof(audio_config_t));
audio_output_flags_t flags =
static_cast <audio_output_flags_t>(data.readInt32());
audio_port_handle_t selectedDeviceId = data.readInt32();
audio_port_handle_t portId = (audio_port_handle_t)data.readInt32();
audio_io_handle_t output = 0;
std::vector<audio_io_handle_t> secondaryOutputs;
status = AudioSanitizer::sanitizeAudioAttributes(&attr, "68953950");
if (status == NO_ERROR) {
status = getOutputForAttr(&attr,
&output, session, &stream, pid, uid,
&config,
flags, &selectedDeviceId, &portId, &secondaryOutputs);
}
reply->writeInt32(status);
status = reply->write(&attr, sizeof(audio_attributes_t));
if (status != NO_ERROR) {
return status;
}
reply->writeInt32(output);
reply->writeInt32(stream);
reply->writeInt32(selectedDeviceId);
reply->writeInt32(portId);
reply->writeInt32(secondaryOutputs.size());
return reply->write(secondaryOutputs.data(),
secondaryOutputs.size() * sizeof(audio_io_handle_t));
} break;
}
status_t AudioPolicyService::getOutputForAttr(audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
pid_t pid,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId,
std::vector<audio_io_handle_t> *secondaryOutputs)
{
ALOGV("%s()", __func__);
mLock.lock();
sp<AudioPolicyEffects> audioPolicyEffects = mAudioPolicyEffects;
mLock.unlock();
audioPolicyEffects -> createGlobalEffects();
Mutex::Autolock _l(mLock);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
if (!isAudioServerOrMediaServerUid(callingUid) || uid == (uid_t)-1) {
ALOGW_IF(uid != (uid_t)-1 && uid != callingUid,
"%s uid %d tried to pass itself off as %d", __func__, callingUid, uid);
uid = callingUid;
}
if (!mPackageManager.allowPlaybackCapture(uid)) {
attr->flags |= AUDIO_FLAG_NO_MEDIA_PROJECTION;
}
if (((attr->flags & (AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE)) != 0)
&& !bypassInterruptionPolicyAllowed(pid, uid)) {
attr->flags &= ~(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE);
}
AutoCallerClear acc;
AudioPolicyInterface::output_type_t outputType;
result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
config,
&flags, selectedDeviceId, portId,
secondaryOutputs,
&outputType);
// FIXME: Introduce a way to check for the the telephony device before opening the output
if (result == NO_ERROR) {
// enforce permission (if any) required for each type of input
switch (outputType) {
case AudioPolicyInterface::API_OUTPUT_LEGACY:
break;
case AudioPolicyInterface::API_OUTPUT_TELEPHONY_TX:
if (!modifyPhoneStateAllowed(pid, uid)) {
ALOGE("%s() permission denied: modify phone state not allowed for uid %d",
__func__, uid);
result = PERMISSION_DENIED;
}
break;
case AudioPolicyInterface::API_OUT_MIX_PLAYBACK:
if (!modifyAudioRoutingAllowed(pid, uid)) {
ALOGE("%s() permission denied: modify audio routing not allowed for uid %d",
__func__, uid);
result = PERMISSION_DENIED;
}
break;
case AudioPolicyInterface::API_OUTPUT_INVALID:
default:
LOG_ALWAYS_FATAL("%s() encountered an invalid output type %d",
__func__, (int)outputType);
}
}
if (result == NO_ERROR) {
sp <AudioPlaybackClient> client =
new AudioPlaybackClient(*attr, *output, uid, pid, session, *portId, *selectedDeviceId, *stream);
mAudioPlaybackClients.add(*portId, client);
}
return result;
}
AudioPolicyService getOutputForAttr 由APM 实现
frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId,
std::vector<audio_io_handle_t> *secondaryOutputs,
output_type_t *outputType)
{
// The supplied portId must be AUDIO_PORT_HANDLE_NONE
if (*portId != AUDIO_PORT_HANDLE_NONE) {
return INVALID_OPERATION;
}
const audio_port_handle_t requestedPortId = *selectedDeviceId;
audio_attributes_t resultAttr;
bool isRequestedDeviceForExclusiveUse = false;
std::vector<sp<AudioPolicyMix>> secondaryMixes;
const sp<DeviceDescriptor> requestedDevice =
mAvailableOutputDevices.getDeviceFromId(requestedPortId);
// Prevent from storing invalid requested device id in clients
const audio_port_handle_t sanitizedRequestedPortId =
requestedDevice != nullptr ? requestedPortId : AUDIO_PORT_HANDLE_NONE;
*selectedDeviceId = sanitizedRequestedPortId;
callingAppuid = uid;
callingAppName = getPackageName(uid);
audio_app_type_f appType = getAppMaskByName(callingAppName);
status_t status = getOutputForAttrInt(&resultAttr, output, session, attr, stream, uid,
config, flags, selectedDeviceId, &isRequestedDeviceForExclusiveUse,
secondaryOutputs != nullptr ? &secondaryMixes : nullptr, outputType);
if (status != NO_ERROR) {
return status;
}
std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryOutputDescs;
if (secondaryOutputs != nullptr) {
for (auto &secondaryMix : secondaryMixes) {
sp<SwAudioOutputDescriptor> outputDesc = secondaryMix->getOutput();
if (outputDesc != nullptr &&
outputDesc->mIoHandle != AUDIO_IO_HANDLE_NONE) {
secondaryOutputs->push_back(outputDesc->mIoHandle);
weakSecondaryOutputDescs.push_back(outputDesc);
}
}
}
audio_config_base_t clientConfig = {.sample_rate = config->sample_rate,
.channel_mask = config->channel_mask,
.format = config->format,
};
*portId = PolicyAudioPort::getNextUniqueId();
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
sp<TrackClientDescriptor> clientDesc =
new TrackClientDescriptor(*portId, uid, session, resultAttr, clientConfig,
sanitizedRequestedPortId, *stream,
mEngine->getProductStrategyForAttributes(resultAttr),
toVolumeSource(resultAttr),
*flags, isRequestedDeviceForExclusiveUse,
std::move(weakSecondaryOutputDescs),
outputDesc->mPolicyMix);
outputDesc->addClient(clientDesc);
clientDesc->setAppName(String8(callingAppName));
outputDesc->setAppMask(appType); //output
clientDesc->setAppMask(appType); //output
if ((attr->content_type == AUDIO_CONTENT_TYPE_SONIFICATION) &&
(attr->usage == AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE) &&
(*stream == AUDIO_STREAM_RING)) {
mpClientInterface->setParameters(outputDesc->mIoHandle, String8("output_for_sing"));
}
ALOGD("%s() returns output %d requestedPortId %d selectedDeviceId %d for port ID %d", __func__,
*output, requestedPortId, *selectedDeviceId, *portId);
return NO_ERROR;
}
AudioPolicyManager::getOutputForAttrInt
只关注getOutputForAttrInt部分,这是最终选择device的关键部分,policy的最终体现
status_t AudioPolicyManager::getOutputForAttrInt(
audio_attributes_t *resultAttr,
audio_io_handle_t *output,
audio_session_t session,
const audio_attributes_t *attr,
audio_stream_type_t *stream,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
bool *isRequestedDeviceForExclusiveUse,
std::vector<sp<AudioPolicyMix>> *secondaryMixes,
output_type_t *outputType)
{
DeviceVector outputDevices;
const audio_port_handle_t requestedPortId = *selectedDeviceId;
DeviceVector msdDevices = getMsdAudioOutDevices();
const sp<DeviceDescriptor> requestedDevice =
mAvailableOutputDevices.getDeviceFromId(requestedPortId);
*outputType = API_OUTPUT_INVALID;
status_t status = getAudioAttributes(resultAttr, attr, *stream);
// [1]: mEngine->getStreamTypeForAttributes
*stream = mEngine->getStreamTypeForAttributes(*resultAttr);
// 输入请求参数
ALOGD("%s()+ attributes=%s stream=%s session %d output %d selectedDeviceId %d device %s sampling rate %d format %#x"
"channel mask %#x flags %#x stream %s uid %d",
__func__, toString(*resultAttr).c_str(), toString(*stream).c_str(), session, *output,
requestedPortId, outputDevices.toString().c_str(), config->sample_rate, config->format,
config->channel_mask, *flags, toString(*stream).c_str(), uid);
// [2]: mEngine->getOutputDevicesForAttributes
outputDevices = mEngine->getOutputDevicesForAttributes(*resultAttr, requestedDevice, false);
// 选择到一组devices
ALOGV("%s() device %s, sampling rate %d, format %#x, channel mask %#x, flags %#x stream %s",
__func__, outputDevices.toString().c_str(), config->sample_rate, config->format,
config->channel_mask, *flags, toString(*stream).c_str());
*output = AUDIO_IO_HANDLE_NONE;
if (*output == AUDIO_IO_HANDLE_NONE) {
// [3] getOutputForDevices
*output = getOutputForDevices(outputDevices, session, *stream, config,
flags, resultAttr->flags & AUDIO_FLAG_MUTE_HAPTIC);
}
*selectedDeviceId = getFirstDeviceId(outputDevices);
// 选择一个Device
ALOGD("%s()- attributes=%s stream=%s session %d output %d selectedDeviceId %d device %s sampling rate %d format %#x"
"channel mask %#x flags %#x stream %s uid %d",
__func__, toString(*resultAttr).c_str(), toString(*stream).c_str(), session, *output,
requestedPortId, outputDevices.toString().c_str(), config->sample_rate, config->format,
config->channel_mask, *flags, toString(*stream).c_str(), uid);
return NO_ERROR;
}
mEngine->getStreamTypeForAttributes
audio_stream_type_t EngineBase::getStreamTypeForAttributes(const audio_attributes_t &attr) const
{
return mProductStrategies.getStreamTypeForAttributes(attr);
}
audio_stream_type_t ProductStrategy::getStreamTypeForAttributes(
const audio_attributes_t &attr) const
{
const auto &iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
[&attr](const auto &supportedAttr) {
return AudioProductStrategy::attributesMatches(supportedAttr.mAttributes, attr); });
audio_stream_type_t streamType = iter->mStream;
return streamType != AUDIO_STREAM_DEFAULT ? streamType : AUDIO_STREAM_MUSIC;
}
bool AudioProductStrategy::attributesMatches(const audio_attributes_t refAttributes,
const audio_attributes_t clientAttritubes)
{
return ((refAttributes.usage == AUDIO_USAGE_UNKNOWN) ||
(clientAttritubes.usage == refAttributes.usage)) &&
((refAttributes.content_type == AUDIO_CONTENT_TYPE_UNKNOWN) ||
(clientAttritubes.content_type == refAttributes.content_type)) &&
((refAttributes.flags == AUDIO_FLAG_NONE) ||
(clientAttritubes.flags != AUDIO_FLAG_NONE &&
(clientAttritubes.flags & refAttributes.flags) == refAttributes.flags)) &&
((strlen(refAttributes.tags) == 0) ||
(std::strcmp(clientAttritubes.tags, refAttributes.tags) == 0));
}
getOutputDevicesForAttributes
根据Attributes 得到Devices由两种方式:1. findPrefferredDevice,
2. getDevicesForProductStrategy (这里忽略了mDevicesForStrategies.at(strategy))
findPrefferredDevice没有看懂,这里主要分析 getDevicesForProductStrategy
DeviceVector Engine::getOutputDevicesForAttributes(const audio_attributes_t &attributes,
const sp<DeviceDescriptor> &preferredDevice,
bool fromCache) const
{
// First check for explict routing device, 显示routing 设备
if (preferredDevice != nullptr) {
ALOGV("%s explicit Routing on device %s", __func__, preferredDevice->toString().c_str());
return DeviceVector(preferredDevice);
}
product_strategy_t strategy = getProductStrategyForAttributes(attributes);
const DeviceVector availableOutputDevices =
getApmObserver()->getAvailableOutputDevices();
const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
//
// Honor explicit routing requests only if all active clients have a preferred route in which
// case the last active client route is used
sp<DeviceDescriptor> device = findPreferredDevice(outputs,
strategy, availableOutputDevices);
if (device != nullptr) {
return DeviceVector(device);
}
return fromCache?
mDevicesForStrategies.at(strategy) : getDevicesForProductStrategy(strategy);
}
findPreferredDevice
没看懂怎么找到 preferredDevice的
class DeviceVector : public SortedVector<sp<DeviceDescriptor> >
{
}
// 描述DevicePort
class DeviceDescriptor : public DeviceDescriptorBase,
public PolicyAudioPort, public PolicyAudioPortConfig
{
private:
std::string mTagName; // Unique human readable identifier for a device port found in conf file.
FormatVector mEncodedFormats;
audio_format_t mCurrentEncodedFormat;
}
class SwAudioOutputCollection :
public DefaultKeyedVector< audio_io_handle_t, sp<SwAudioOutputDescriptor> >
{
}
// Audio output driven by a software mixer in audio flinger.
class SwAudioOutputDescriptor: public AudioOutputDescriptor
{
}
// Audio output driven by an input device directly.
class HwAudioOutputDescriptor: public AudioOutputDescriptor
{
}
// descriptor for audio outputs. Used to maintain current configuration of each opened audio output
// and keep track of the usage of this output by each audio stream type.
class AudioOutputDescriptor: public AudioPortConfig,
public PolicyAudioPortConfig,
public AudioIODescriptorInterface,
public ClientMapHandler<TrackClientDescriptor>
{
}
getDevicesForProductStrategy
getDevicesForProductStrategy 有两种方式:1. 直接从getPreferredDeviceForStrategy得到
2. getDevicesForStrategyInt 根据strategy类型去选择
DeviceVector Engine::getDevicesForProductStrategy(product_strategy_t strategy) const {
DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
//[1] check if this strategy has a preferred device that is available,
// if yes, give priority to it
AudioDeviceTypeAddr preferredStrategyDevice;
const status_t status = getPreferredDeviceForStrategy(strategy, preferredStrategyDevice);
if (status == NO_ERROR) {
// there is a preferred device, is it available?
sp<DeviceDescriptor> preferredAvailableDevDescr = availableOutputDevices.getDevice(
preferredStrategyDevice.mType,
String8(preferredStrategyDevice.mAddress.c_str()),
AUDIO_FORMAT_DEFAULT);
if (preferredAvailableDevDescr != nullptr) {
ALOGVV("%s using pref device 0x%08x/%s for strategy %u",
__func__, preferredStrategyDevice.mType,
preferredStrategyDevice.mAddress.c_str(), strategy);
return DeviceVector(preferredAvailableDevDescr);
}
}
//[2] legacyStrategy
DeviceVector availableInputDevices = getApmObserver()->getAvailableInputDevices();
const SwAudioOutputCollection& outputs = getApmObserver()->getOutputs();
auto legacyStrategy = mLegacyStrategyMap.find(strategy) != end(mLegacyStrategyMap) ?
mLegacyStrategyMap.at(strategy) : STRATEGY_NONE;
return getDevicesForStrategyInt(legacyStrategy,
availableOutputDevices,
availableInputDevices, outputs);
}
[1] getPreferredDeviceForStrategy(strategy, preferredStrategyDevice)
// EngineBase::setPreferredDeviceForStrategy
// (product_strategy_t strategy, const AudioDeviceTypeAddr &device)
// 策略和Device的对应关系
status_t EngineBase::getPreferredDeviceForStrategy(product_strategy_t strategy,
AudioDeviceTypeAddr &device) const
{
// verify strategy exists
if (mProductStrategies.find(strategy) == mProductStrategies.end()) {
ALOGE("%s unknown strategy %u", __func__, strategy);
return BAD_VALUE;
}
// preferred device for this strategy?
auto devIt = mProductStrategyPreferredDevices.find(strategy);
if (devIt == mProductStrategyPreferredDevices.end()) {
ALOGV("%s no preferred device for strategy %u", __func__, strategy);
return NAME_NOT_FOUND;
}
device = devIt->second;
return NO_ERROR;
}
class ProductStrategyPreferredRoutingMap : public std::map<product_strategy_t, AudioDeviceTypeAddr>
{
public:
void dump(String8 *dst, int spaces = 0) const;
};
//[2] getDevicesForStrategyInt :mLegacyStrategyMap
struct legacy_strategy_map { const char *name; legacy_strategy id; };
static const std::vector<legacy_strategy_map>& getLegacyStrategy() {
static const std::vector<legacy_strategy_map> legacyStrategy = {
{ "STRATEGY_NONE", STRATEGY_NONE },
{ "STRATEGY_MEDIA", STRATEGY_MEDIA },
{ "STRATEGY_PHONE", STRATEGY_PHONE },
{ "STRATEGY_SONIFICATION", STRATEGY_SONIFICATION },
{ "STRATEGY_SONIFICATION_RESPECTFUL", STRATEGY_SONIFICATION_RESPECTFUL },
{ "STRATEGY_DTMF", STRATEGY_DTMF },
{ "STRATEGY_ENFORCED_AUDIBLE", STRATEGY_ENFORCED_AUDIBLE },
{ "STRATEGY_TRANSMITTED_THROUGH_SPEAKER", STRATEGY_TRANSMITTED_THROUGH_SPEAKER },
{ "STRATEGY_ACCESSIBILITY", STRATEGY_ACCESSIBILITY },
{ "STRATEGY_REROUTING", STRATEGY_REROUTING },
{ "STRATEGY_PATCH", STRATEGY_REROUTING }, // boiler to manage stream patch volume
{ "STRATEGY_CALL_ASSISTANT", STRATEGY_CALL_ASSISTANT },
};
return legacyStrategy;
}
getDevicesForStrategyInt:
代码的逻辑很简单switch 遍历strategy, 通过getDevicesFromType 找到devices
DeviceVector Engine::getDevicesForStrategyInt(legacy_strategy strategy,
DeviceVector availableOutputDevices,
DeviceVector availableInputDevices,
const SwAudioOutputCollection &outputs) const
{
DeviceVector devices;
switch (strategy) {
case STRATEGY_MEDIA: {
DeviceVector devices2;
if ((devices2.isEmpty()) &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP)) {
devices2 = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_HEARING_AID);
}
if ((devices2.isEmpty()) &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) == AUDIO_POLICY_FORCE_SPEAKER)) {
devices2 = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER);
}
}
if (devices.isEmpty()) {
ALOGV("getDevicesForStrategy() no device found for strategy %d", strategy);
sp<DeviceDescriptor> defaultOutputDevice = getApmObserver()->getDefaultOutputDevice();
if (defaultOutputDevice != nullptr) {
devices.add(defaultOutputDevice);
}
ALOGE_IF(devices.isEmpty(),
"getDevicesForStrategy() no default device defined");
}
// 输出的log
ALOGV("getDevices ForStrategy() strategy %d, device %s",
strategy, dumpDeviceTypes(devices.types()).c_str());
return devices;
}
上面我们根据attribute已经找到一组 devices, 还没有使用具体的信息如sample rate, format等等
getOutputForDevices 从Devices 中根据sample rate等具体信息根据策略配置得到bestDevice.
getOutputForDevices
audio_io_handle_t AudioPolicyManager::getOutputForDevices(
const DeviceVector &devices,
audio_session_t session,
audio_stream_type_t stream,
const audio_config_t *config,
audio_output_flags_t *flags,
bool forceMutingHaptic)
{
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
//[1] 对 flag的特殊处理
// Discard haptic channel mask when forcing muting haptic channels.
audio_channel_mask_t channelMask = forceMutingHaptic
? (config->channel_mask & ~AUDIO_CHANNEL_HAPTIC_ALL) : config->channel_mask;
// open a direct output if required by specified parameters
//force direct flag if offload flag is set: offloading implies a direct output stream
// and all common behaviors are driven by checking only the direct flag
// this should normally be set appropriately in the policy configuration file
if ((*flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
if ((*flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
// only allow deep buffering for music stream type
if (stream != AUDIO_STREAM_MUSIC) {
*flags = (audio_output_flags_t)(*flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
} else if (/* stream == AUDIO_STREAM_MUSIC && */
*flags == AUDIO_OUTPUT_FLAG_NONE &&
property_get_bool("audio.deep_buffer.media", false /* default_value */)) {
// use DEEP_BUFFER as default output for music stream type
*flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
}
if (stream == AUDIO_STREAM_TTS) {
*flags = AUDIO_OUTPUT_FLAG_TTS;
} else if (stream == AUDIO_STREAM_VOICE_CALL &&
audio_is_linear_pcm(config->format) &&
(*flags & AUDIO_OUTPUT_FLAG_INCALL_MUSIC) == 0) {
*flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
AUDIO_OUTPUT_FLAG_DIRECT);
ALOGV("Set VoIP and Direct output flags for PCM format");
}
//[2] openDirectOutput,没有 direct flag也需要openDirectOutput?
audio_config_t directConfig = *config;
directConfig.channel_mask = channelMask;
status_t status = openDirectOutput(stream, session, &directConfig, *flags, devices, &output);
if (status != NAME_NOT_FOUND) {
return output;
}
// A request for HW A/V sync cannot fallback to a mixed output because time
// stamps are embedded in audio data
if ((*flags & (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ)) != 0) {
return AUDIO_IO_HANDLE_NONE;
}
// ignoring channel mask due to downmix capability in mixer
// open a non direct output
//[3] for non direct outputs, only PCM is supported
if (audio_is_linear_pcm(config->format)) {
// get which output is suitable for the specified stream. The actual
// routing change will happen when startOutput() will be called
SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs);
// at this stage we should ignore the DIRECT flag as no direct output could be found earlier
*flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
output = selectOutput(outputs, *flags, config->format, channelMask, config->sample_rate);
}
ALOGW_IF((output == 0), "getOutputForDevices() could not find output for stream %d, "
"sampling rate %d, format %#x, channels %#x, flags %#x",
stream, config->sample_rate, config->format, channelMask, *flags);
return output;
}
这里主要关注audio_is_linear_pcm, 其中包括getOutputsForDevices, selectOutput
SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs);
output = selectOutput(outputs, *flags, config->format, channelMask, config->sample_rate);
getOutputsForDevices
从openOuputs中得到 DeviceVector
SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevices(
const DeviceVector &devices,
const SwAudioOutputCollection& openOutputs)
{
SortedVector<audio_io_handle_t> outputs;
ALOGVV("%s() devices %s", __func__, devices.toString().c_str());
for (size_t i = 0; i < openOutputs.size(); i++) {
ALOGVV("output %zu isDuplicated=%d device=%s",
i, openOutputs.valueAt(i)->isDuplicated(),
openOutputs.valueAt(i)->supportedDevices().toString().c_str());
if (openOutputs.valueAt(i)->supportsAllDevices(devices)
&& openOutputs.valueAt(i)->devicesSupportEncodedFormats(devices.types())) {
ALOGVV("%s() found output %d", __func__, openOutputs.keyAt(i));
outputs.add(openOutputs.keyAt(i));
}
}
return outputs;
}
selectOutput
根据flags, format, channel_mask从audio_io_handles 中找到bestMatched
audio_io_handle_t AudioPolicyManager::selectOutput(
const SortedVector<audio_io_handle_t>& outputs,
audio_output_flags_t flags,
audio_format_t format,
audio_channel_mask_t channelMask,
uint32_t samplingRate)
{
audio_io_handle_t primary_output = 0;
bool isCts = getAppMaskByName(callingAppName) & APP_TYPE_CTS_AUDIOPRO ? true : false;
LOG_ALWAYS_FATAL_IF(!(format == AUDIO_FORMAT_INVALID || audio_is_linear_pcm(format)),
"%s called with format %#x", __func__, format);
// Flags disqualifying an output: the match must happen before calling selectOutput()
static const audio_output_flags_t kExcludedFlags = (audio_output_flags_t)
(AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ | AUDIO_OUTPUT_FLAG_DIRECT);
// Flags expressing a functional request: must be honored in priority over
// other criteria
static const audio_output_flags_t kFunctionalFlags = (audio_output_flags_t)
(AUDIO_OUTPUT_FLAG_VOIP_RX | AUDIO_OUTPUT_FLAG_INCALL_MUSIC |
AUDIO_OUTPUT_FLAG_TTS | AUDIO_OUTPUT_FLAG_DIRECT_PCM | AUDIO_OUTPUT_FLAG_VIRTUAL_DEEP_BUFFER);
// Flags expressing a performance request: have lower priority than serving
// requested sampling rate or channel mask
static const audio_output_flags_t kPerformanceFlags = (audio_output_flags_t)
(AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_DEEP_BUFFER |
AUDIO_OUTPUT_FLAG_RAW | AUDIO_OUTPUT_FLAG_SYNC);
const audio_output_flags_t functionalFlags =
(audio_output_flags_t)(flags & kFunctionalFlags);
const audio_output_flags_t performanceFlags =
(audio_output_flags_t)(flags & kPerformanceFlags);
audio_io_handle_t bestOutput = (outputs.size() == 0) ? AUDIO_IO_HANDLE_NONE : outputs[0];
// select one output among several that provide a path to a particular device or set of
// devices (the list was previously build by getOutputsForDevices()).
// The priority is as follows:
// 1: the output supporting haptic playback when requesting haptic playback
// 2: the output with the highest number of requested functional flags
// 3: the output supporting the exact channel mask
// 4: the output with a higher channel count than requested
// 5: the output with a higher sampling rate than requested
// 6: the output with the highest number of requested performance flags
// 7: the output with the bit depth the closest to the requested one
// 8: the primary output
// 9: the first output in the list
// matching criteria values in priority order for best matching output so far
std::vector<uint32_t> bestMatchCriteria(8, 0);
const uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
const uint32_t hapticChannelCount = audio_channel_count_from_out_mask(
channelMask & AUDIO_CHANNEL_HAPTIC_ALL);
for (audio_io_handle_t output : outputs) {
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
// matching criteria values in priority order for current output
std::vector<uint32_t> currentMatchCriteria(8, 0);
if (outputDesc->isDuplicated()) {
continue;
}
if ((kExcludedFlags & outputDesc->mFlags) != 0) {
continue;
}
// If haptic channel is specified, use the haptic output if present.
// When using haptic output, same audio format and sample rate are required.
const uint32_t outputHapticChannelCount = audio_channel_count_from_out_mask(
outputDesc->getChannelMask() & AUDIO_CHANNEL_HAPTIC_ALL);
if ((hapticChannelCount == 0) != (outputHapticChannelCount == 0)) {
continue;
}
if (outputHapticChannelCount >= hapticChannelCount
&& format == outputDesc->getFormat()
&& samplingRate == outputDesc->getSamplingRate()) {
currentMatchCriteria[0] = outputHapticChannelCount;
}
// functional flags match
currentMatchCriteria[1] = popcount(outputDesc->mFlags & functionalFlags);
// channel mask and channel count match
uint32_t outputChannelCount = audio_channel_count_from_out_mask(
outputDesc->getChannelMask());
if (channelMask != AUDIO_CHANNEL_NONE && channelCount > 2 &&
channelCount <= outputChannelCount) {
if ((audio_channel_mask_get_representation(channelMask) ==
audio_channel_mask_get_representation(outputDesc->getChannelMask())) &&
((channelMask & outputDesc->getChannelMask()) == channelMask)) {
currentMatchCriteria[2] = outputChannelCount;
}
currentMatchCriteria[3] = outputChannelCount;
}
// sampling rate match
if (samplingRate > SAMPLE_RATE_HZ_DEFAULT &&
samplingRate <= outputDesc->getSamplingRate()) {
currentMatchCriteria[4] = outputDesc->getSamplingRate();
}
// performance flags match
currentMatchCriteria[5] = popcount(outputDesc->mFlags & performanceFlags);
// format match
if (format != AUDIO_FORMAT_INVALID) {
currentMatchCriteria[6] =
PolicyAudioPort::kFormatDistanceMax -
PolicyAudioPort::formatDistance(format, outputDesc->getFormat());
}
// primary output match
currentMatchCriteria[7] = outputDesc->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY;
if (currentMatchCriteria[7])
primary_output = output;
// compare match criteria by priority then value
if (std::lexicographical_compare(bestMatchCriteria.begin(), bestMatchCriteria.end(),
currentMatchCriteria.begin(), currentMatchCriteria.end())) {
bestMatchCriteria = currentMatchCriteria;
bestOutput = output;
std::stringstream result;
std::copy(bestMatchCriteria.begin(), bestMatchCriteria.end(),
std::ostream_iterator<int>(result, " "));
ALOGV("%s new bestOutput %d criteria %s",
__func__, bestOutput, result.str().c_str());
}
}
{
//only cts use ull, the others use primary output
//if do not have ull profile(audio_policy_configuration.xml), cannot create ull desc.
//so it must create ull output first
sp<SwAudioOutputDescriptor> outputDescPrimary = mOutputs.valueFor(bestOutput);
if (outputDescPrimary != nullptr && (outputDescPrimary->mFlags == (AUDIO_OUTPUT_FLAG_FAST|AUDIO_OUTPUT_FLAG_RAW))
&& !isCts && primary_output != 0) {
bestOutput = primary_output;
}
}
return bestOutput;
}