Hi All,
I’ve encountered an issue with AUGraph API while using a Bluetooth headset on a
device running iOS 10. When I start the graph the render callback is getting
called but AudioUnitRender only renders 0s.
Here’s how I set up the graph:
- (void)setupGraph
{
// ================= create a new AUGraph =================
LogOnError(NewAUGraph(&_graph), @"creating au graph");
if (unlikely(_graph == NULL))
{
return;
}
// ================= descriptions =================
AudioComponentDescription outputDesc;
AudioComponentDescription iPodTimeDesc;
AudioComponentDescription inConverterDesc;
AudioComponentDescription outConverterDesc;
// remote I/O
memset(&outputDesc, 0, sizeof(AudioComponentDescription));
outputDesc.componentType = kAudioUnitType_Output;
if (kHasMicrophone == self.micStatus)
{
// this gets us AGC and echo cancellation
outputDesc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
// use apple's Voice unit
}
else
{
outputDesc.componentSubType = kAudioUnitSubType_RemoteIO; //
raw audio unit, no system processing
}
outputDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
// input converter
memset(&inConverterDesc, 0, sizeof(AudioComponentDescription));
inConverterDesc.componentType = kAudioUnitType_FormatConverter;
inConverterDesc.componentSubType = kAudioUnitSubType_AUConverter;
inConverterDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
// iPodTime
memset(&iPodTimeDesc, 0, sizeof(AudioComponentDescription));
iPodTimeDesc.componentType = kAudioUnitType_FormatConverter;
iPodTimeDesc.componentSubType = kAudioUnitSubType_AUiPodTimeOther; //
this one sounds much better than kAudioUnitSubType_AUiPodTime, it is iOS5+ only
iPodTimeDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
// output converter
memset(&outConverterDesc, 0, sizeof(AudioComponentDescription));
outConverterDesc.componentType = kAudioUnitType_FormatConverter;
outConverterDesc.componentSubType = kAudioUnitSubType_AUConverter;
outConverterDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
// ================= create nodes and add to the graph =================
AUNode outputNode;
AUNode outConverterNode;
AUNode iPodTimeNode;
AUNode inConverterNode;
LogOnError(AUGraphAddNode(_graph, &outputDesc, &outputNode),
@"adding output Node");
LogOnError(AUGraphAddNode(_graph, &outConverterDesc, &outConverterNode),
@"adding output converter Node");
LogOnError(AUGraphAddNode(_graph, &iPodTimeDesc, &iPodTimeNode),
@"adding iPod Node");
LogOnError(AUGraphAddNode(_graph, &inConverterDesc, &inConverterNode),
@"adding input converter Node");
// ================= connect nodes: converter->iPodTime->converter->output
=================
LogOnError(AUGraphConnectNodeInput(_graph, inConverterNode, 0,
iPodTimeNode, 0), @"connecting nodes");
LogOnError(AUGraphConnectNodeInput(_graph, iPodTimeNode, 0,
outConverterNode, 0), @"connecting nodes");
LogOnError(AUGraphConnectNodeInput(_graph, outConverterNode, 0, outputNode,
0), @"connecting nodes");
// ================= open graph and grab the AUs =================
LogOnError(AUGraphOpen(_graph), @"opening the graph");
LogOnError(AUGraphNodeInfo(_graph, outputNode, NULL, &_outputAU),
@"getting AU instance");
LogOnError(AUGraphNodeInfo(_graph, outConverterNode, NULL,
&_outConverterAU), @"getting AU instance");
LogOnError(AUGraphNodeInfo(_graph, iPodTimeNode, NULL, &_iPodTimeAU),
@"getting AU instance");
LogOnError(AUGraphNodeInfo(_graph, inConverterNode, NULL,
&_inConverterAU), @"getting AU instance");
// ================= setup callbacks =================
AURenderCallbackStruct rcbs;
rcbs.inputProc = &VXAudioGraphRender; // our "input" callback to generate
the audio we playing
rcbs.inputProcRefCon = (__bridge void *)(self);
// set a callback for our first AU in the chain to feed our input
LogOnError(AUGraphSetNodeInputCallback(_graph, inConverterNode, 0, &rcbs),
@"adding input callback");
// set our remote i/o input callback so we can grab the data from the
device's input
// No sense firing up the input bus if the device has no microphone
if (kHasMicrophone == self.micStatus)
{
// Enable IO for recording
UInt32 enabled = 1;
LogOnError(AudioUnitSetProperty(_outputAU,
kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enabled,
sizeof(enabled)), @"enabling remote io recording");
LogOnError(AudioUnitSetProperty(_outputAU,
kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enabled,
sizeof(enabled)), @"enabling remote io recording");
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = VXAudioInputFromDevice;
callbackStruct.inputProcRefCon = (__bridge void *)(self);
LogOnError(AudioUnitSetProperty(_outputAU,
kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 1,
&callbackStruct, sizeof(callbackStruct)), @"setting remote io input callback");
}
// ================= set the stream formats =================
// according to apple
http://developer.apple.com/library/ios/#qa/qa1717/_index.html
// to use iPodAU we need to use the "AUCanonical" fixed-pt sample format...
AudioStreamBasicDescription audioFormat;
[self configFormat:&audioFormat];
AudioStreamBasicDescription audioFormatCanonical;
[self configAUCanonicalFormat:&audioFormatCanonical];
const UInt32 kMaxFramesPerSlice = 2048;
LogOnError(AudioUnitSetProperty(_inConverterAU,
kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0,
&audioFormat, sizeof(audioFormat)), @"setting stream format");
LogOnError(AudioUnitSetProperty(_inConverterAU,
kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0,
&audioFormatCanonical, sizeof(audioFormatCanonical)), @"setting stream format");
LogOnError(AudioUnitSetProperty(_inConverterAU,
kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0,
&kMaxFramesPerSlice, sizeof(kMaxFramesPerSlice)), @"setting max frames per
slice");
LogOnError(AudioUnitSetProperty(_iPodTimeAU,
kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0,
&audioFormatCanonical, sizeof(audioFormatCanonical)), @"setting stream format");
LogOnError(AudioUnitSetProperty(_iPodTimeAU,
kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0,
&audioFormatCanonical, sizeof(audioFormatCanonical)), @"setting stream format");
LogOnError(AudioUnitSetProperty(_iPodTimeAU,
kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0,
&kMaxFramesPerSlice, sizeof(kMaxFramesPerSlice)), @"setting max frames per
slice");
LogOnError(AudioUnitSetProperty(_outConverterAU,
kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0,
&audioFormatCanonical, sizeof(audioFormatCanonical)), @"setting stream format");
LogOnError(AudioUnitSetProperty(_outConverterAU,
kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0,
&audioFormat, sizeof(audioFormat)), @"setting stream format");
LogOnError(AudioUnitSetProperty(_outConverterAU,
kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0,
&kMaxFramesPerSlice, sizeof(kMaxFramesPerSlice)), @"setting max frames per
slice");
// for remote i/o, you set the output bus on the input scope, and vice
versa... i guess it makes sense if you think about it too long
LogOnError(AudioUnitSetProperty(_outputAU, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat)),
@"setting stream format");
LogOnError(AudioUnitSetProperty(_outputAU, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat)),
@"setting stream format");
LogOnError(AudioUnitSetProperty(_outputAU,
kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0,
&kMaxFramesPerSlice, sizeof(kMaxFramesPerSlice)), @"setting max frames per
slice");
// ================= set the voice processing AU options =================
if (outputDesc.componentSubType == kAudioUnitSubType_VoiceProcessingIO)
{
UInt32 flag = 1; // on
LogOnError(AudioUnitSetProperty(_outputAU,
kAUVoiceIOProperty_VoiceProcessingEnableAGC,
kAudioUnitScope_Global,
1,
&flag,
sizeof(UInt32)), @"turning AGC on");
}
// setup the ipodtime speed -- we set this directly as subclasses might be
// overriding the setter to cache ("non-live" vs. "live", for example) and
we don't want them to think this legit speed change from a user or something
_timeRate = 1;
if (gAudioLogging && (_graph != NULL))
CAShow(_graph);
}
And this is what render callback looks like:
OSStatus VXAudioInputFromDevice(void *inRefCon, AudioUnitRenderActionFlags
*ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32
inNumberFrames, AudioBufferList *ioData)
{
OSStatus status = noErr;
VXAudioEngine *SELF = (__bridge VXAudioEngine*)inRefCon;
if (SELF->_inputBuffer.mNumberBuffers)
{
SELF->_audioCallBackInvoked = YES;
UInt32 dataSize = inNumberFrames * 2; // sizeof(_audioBuffer);
if (likely(dataSize <= sizeof(SELF->_audioBuffer)))
{
SELF->_inputBuffer.mBuffers[0].mDataByteSize = dataSize;
status = AudioUnitRender(SELF->_outputAU, ioActionFlags,
inTimeStamp, inBusNumber, inNumberFrames, &SELF->_inputBuffer);
NSData* data = [NSData dataWithBytesNoCopy:
SELF->_inputBuffer.mBuffers[0].mData length:
SELF->_inputBuffer.mBuffers[0].mDataByteSize freeWhenDone: NO];
VXLogAlways(@"%@", data);
// let subclass process the input
//VXLogAlways(@"record callback > numFrames = %d mDataByteSize= %lu
copying= %d timestamp = %f",(unsigned int)inNumberFrames,inNumberFrames*2,
(unsigned int)SELF->_inputBuffer.mBuffers[0].mDataByteSize,
inTimeStamp->mSampleTime);
[SELF processInputBuffer];
}
else
{
// Data size is greater than the audio buffer size. Handle,
in-order to avoid
// memory overwrite in audio engine .
SELF->_inputBuffer.mBuffers[0].mDataByteSize = dataSize;
SELF->_inputBuffer.mBuffers[0].mData = NULL;
status = AudioUnitRender(SELF->_outputAU, ioActionFlags,
inTimeStamp, inBusNumber, inNumberFrames, &SELF->_inputBuffer);
[SELF processInputBuffer];
// Since, we had set the buffer to NULL and we dont know if the
buffer overflows in the next iteration,
// we should reset it to the default value
SELF->_inputBuffer.mBuffers[0].mData = SELF->_audioBuffer;
int currentOverflowCount =
__sync_add_and_fetch(&SELF->_bufferOverflowId, 1);
if (unlikely(currentOverflowCount == 1)) {
dispatch_async(SELF->_bufferOverflowLoggingQueue, ^{
VXLogAlways(@"DV: Audio buffer overflowing for the first
time. Available buffer Size is : %ld, and bytesToProcess is: %ld",
sizeof(SELF->_audioBuffer), (unsigned long) dataSize);
});
}
}
}
return status;
}
This same issue doesn’t occur on iOS 9.
Any help will be appreciated.
Thanks
_______________________________________________
Do not post admin requests to the list. They will be ignored.
Coreaudio-api mailing list ([email protected])
Help/Unsubscribe/Update your Subscription:
https://lists.apple.com/mailman/options/coreaudio-api/archive%40mail-archive.com
This email sent to [email protected]