Title: [294102] trunk
Revision
294102
Author
you...@apple.com
Date
2022-05-12 06:56:19 -0700 (Thu, 12 May 2022)

Log Message

Add a better mock for audio units used by CoreAudioSharedUnit
https://bugs.webkit.org/show_bug.cgi?id=240231

Reviewed by Eric Carlson.

Source/WebCore:

Move CoreAudioSharedUnit to its own file, CoreAudioSharedUnit.h/CoreAudioSharedUnit.cpp.
Introduce an internal unit to abstract out CoreAudioSharedUnit from actual CoreAudio units.
Implement a CoreAudio based internal unit in CoreAudioSharedUnit.cpp.
Implement a Mock based internal unit in MockAudioSharedUnit.h/MockAudioSharedUnit.mm.
Add inputput/ouput sample rate validation (debug assert and release error returned).
Add a test that uses this validation.

Test: fast/mediastream/audio-unit-reconfigure.html

* SourcesCocoa.txt:
* WebCore.xcodeproj/project.pbxproj:
* platform/mediastream/mac/BaseAudioSharedUnit.cpp:
* platform/mediastream/mac/BaseAudioSharedUnit.h:
* platform/mediastream/mac/CoreAudioCaptureSource.cpp:
* platform/mediastream/mac/CoreAudioSharedUnit.cpp: Added.
* platform/mediastream/mac/CoreAudioSharedUnit.h: Added.
* platform/mediastream/mac/MockAudioSharedUnit.h:
* platform/mediastream/mac/MockAudioSharedUnit.mm:

LayoutTests:

* fast/mediastream/audio-unit-reconfigure-expected.txt: Added.
* fast/mediastream/audio-unit-reconfigure.html: Added.

Modified Paths

Added Paths

Diff

Modified: trunk/LayoutTests/ChangeLog (294101 => 294102)


--- trunk/LayoutTests/ChangeLog	2022-05-12 13:49:11 UTC (rev 294101)
+++ trunk/LayoutTests/ChangeLog	2022-05-12 13:56:19 UTC (rev 294102)
@@ -1,3 +1,13 @@
+2022-05-12  Youenn Fablet  <you...@apple.com>
+
+        Add a better mock for audio units used by CoreAudioSharedUnit
+        https://bugs.webkit.org/show_bug.cgi?id=240231
+
+        Reviewed by Eric Carlson.
+
+        * fast/mediastream/audio-unit-reconfigure-expected.txt: Added.
+        * fast/mediastream/audio-unit-reconfigure.html: Added.
+
 2022-05-11  John Cunningham  <johncunning...@apple.com>
 
         [GPU Process] webrtc/vp8-then-h264-gpu-process-crash.html flaky fails after turning DOM in GPUP on by default

Added: trunk/LayoutTests/fast/mediastream/audio-unit-reconfigure-expected.txt (0 => 294102)


--- trunk/LayoutTests/fast/mediastream/audio-unit-reconfigure-expected.txt	                        (rev 0)
+++ trunk/LayoutTests/fast/mediastream/audio-unit-reconfigure-expected.txt	2022-05-12 13:56:19 UTC (rev 294102)
@@ -0,0 +1,5 @@
+
+
+PASS Reconfigure audio sample rate
+PASS Reconfigure audio unit when being in render-only mode
+

Added: trunk/LayoutTests/fast/mediastream/audio-unit-reconfigure.html (0 => 294102)


--- trunk/LayoutTests/fast/mediastream/audio-unit-reconfigure.html	                        (rev 0)
+++ trunk/LayoutTests/fast/mediastream/audio-unit-reconfigure.html	2022-05-12 13:56:19 UTC (rev 294102)
@@ -0,0 +1,70 @@
+<!DOCTYPE html>
+<html>
+<head>
+    <meta charset="utf-8">
+    <script src=""
+    <script src=""
+ </head>
+<body>
+<video id="video1" autoplay playsInline controls></video>
+<video id="video2" autoplay playsInline controls></video>
+<script>
+promise_test(async (test) => {
+    const context = new AudioContext();
+    const oscillator = context.createOscillator();
+    const streamDestination = context.createMediaStreamDestination();
+    oscillator.connect(streamDestination);
+    video1.srcObject = streamDestination.stream;
+    // video1 will be played through VPIO will fix the rate to a single value.
+    await video1.play();
+    oscillator.start();
+
+    const stream = await navigator.mediaDevices.getUserMedia({ audio: { sampleRate: 48000 }});
+    stream.getAudioTracks()[0]._onended_ = () => assert_not_reached("should not end");
+    await new Promise(resolve => setTimeout(resolve, 100));
+    stream.getAudioTracks()[0].stop();
+
+    const stream2 = await navigator.mediaDevices.getUserMedia({ audio: { sampleRate: 44100 }});
+    stream2.getAudioTracks()[0]._onended_ = () => assert_not_reached("should not end");
+    await new Promise(resolve => setTimeout(resolve, 100));
+}, "Reconfigure audio sample rate");
+
+promise_test(async (test) => {
+    const context = new AudioContext();
+    const oscillator = context.createOscillator();
+    const streamDestination = context.createMediaStreamDestination();
+    oscillator.connect(streamDestination);
+    video1.srcObject = streamDestination.stream;
+    await video1.play();
+    oscillator.start();
+
+    const defaultAudioConstraints = {
+        sampleRate: 48000,
+        sampleSize: 16,
+        channelCount: 1,
+        echoCancellation: true,
+        noiseSuppression: true
+    };
+    const musicModeAudioConstraints = {
+        sampleRate: 48000,
+        sampleSize: 16,
+        channelCount: 2,
+        echoCancellation: false,
+        noiseSuppression: false
+    };
+
+    const stream1 = await navigator.mediaDevices.getUserMedia({ audio: defaultAudioConstraints });
+    video2.srcObject = stream1;
+    await video2.play();
+    await new Promise(resolve => setTimeout(resolve, 1000));
+
+    // By stopping the capture audio track, the VPIO unit will go in render only mode as it needs to play video 1.
+    stream1.getAudioTracks()[0].stop();
+
+    const stream2 = await navigator.mediaDevices.getUserMedia({ audio: musicModeAudioConstraints });
+    stream2.getAudioTracks()[0]._onended_ = () => log.innerHTML += 'track ended';
+    await new Promise(resolve => setTimeout(resolve, 1000));
+}, "Reconfigure audio unit when being in render-only mode");
+</script>
+</body>
+</html>

Modified: trunk/Source/WebCore/ChangeLog (294101 => 294102)


--- trunk/Source/WebCore/ChangeLog	2022-05-12 13:49:11 UTC (rev 294101)
+++ trunk/Source/WebCore/ChangeLog	2022-05-12 13:56:19 UTC (rev 294102)
@@ -1,3 +1,29 @@
+2022-05-12  Youenn Fablet  <you...@apple.com>
+
+        Add a better mock for audio units used by CoreAudioSharedUnit
+        https://bugs.webkit.org/show_bug.cgi?id=240231
+
+        Reviewed by Eric Carlson.
+
+        Move CoreAudioSharedUnit to its own file, CoreAudioSharedUnit.h/CoreAudioSharedUnit.cpp.
+        Introduce an internal unit to abstract out CoreAudioSharedUnit from actual CoreAudio units.
+        Implement a CoreAudio based internal unit in CoreAudioSharedUnit.cpp.
+        Implement a Mock based internal unit in MockAudioSharedUnit.h/MockAudioSharedUnit.mm.
+        Add inputput/ouput sample rate validation (debug assert and release error returned).
+        Add a test that uses this validation.
+
+        Test: fast/mediastream/audio-unit-reconfigure.html
+
+        * SourcesCocoa.txt:
+        * WebCore.xcodeproj/project.pbxproj:
+        * platform/mediastream/mac/BaseAudioSharedUnit.cpp:
+        * platform/mediastream/mac/BaseAudioSharedUnit.h:
+        * platform/mediastream/mac/CoreAudioCaptureSource.cpp:
+        * platform/mediastream/mac/CoreAudioSharedUnit.cpp: Added.
+        * platform/mediastream/mac/CoreAudioSharedUnit.h: Added.
+        * platform/mediastream/mac/MockAudioSharedUnit.h:
+        * platform/mediastream/mac/MockAudioSharedUnit.mm:
+
 2022-05-12  Oriol Brufau  <obru...@igalia.com>
 
         [cssom] Serialize computed '-webkit-text-combine: none'

Modified: trunk/Source/WebCore/SourcesCocoa.txt (294101 => 294102)


--- trunk/Source/WebCore/SourcesCocoa.txt	2022-05-12 13:49:11 UTC (rev 294101)
+++ trunk/Source/WebCore/SourcesCocoa.txt	2022-05-12 13:56:19 UTC (rev 294102)
@@ -565,6 +565,7 @@
 platform/mediastream/mac/CoreAudioCaptureDevice.cpp
 platform/mediastream/mac/CoreAudioCaptureDeviceManager.cpp
 platform/mediastream/mac/CoreAudioCaptureSource.cpp
+platform/mediastream/mac/CoreAudioSharedUnit.cpp
 platform/mediastream/mac/DisplayCaptureManagerCocoa.cpp
 platform/mediastream/mac/MediaStreamTrackAudioSourceProviderCocoa.cpp
 platform/mediastream/mac/MockAudioSharedUnit.mm

Modified: trunk/Source/WebCore/WebCore.xcodeproj/project.pbxproj (294101 => 294102)


--- trunk/Source/WebCore/WebCore.xcodeproj/project.pbxproj	2022-05-12 13:49:11 UTC (rev 294101)
+++ trunk/Source/WebCore/WebCore.xcodeproj/project.pbxproj	2022-05-12 13:56:19 UTC (rev 294102)
@@ -8567,6 +8567,8 @@
 		410A8EF724F8F3F6004F9070 /* TextEncoderStreamEncoder.idl */ = {isa = PBXFileReference; lastKnownFileType = text; path = TextEncoderStreamEncoder.idl; sourceTree = "<group>"; };
 		410A8EF824F8F47A004F9070 /* TextEncoderStreamEncoder.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = TextEncoderStreamEncoder.cpp; sourceTree = "<group>"; };
 		410A8EF924F8F47B004F9070 /* TextEncoderStreamEncoder.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TextEncoderStreamEncoder.h; sourceTree = "<group>"; };
+		410AD2DE2822BBF6009C93C8 /* CoreAudioSharedUnit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CoreAudioSharedUnit.h; sourceTree = "<group>"; };
+		410AD2DF2822BBF6009C93C8 /* CoreAudioSharedUnit.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CoreAudioSharedUnit.cpp; sourceTree = "<group>"; };
 		410B7E711045FAB000D8224F /* JSMessageEventCustom.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSMessageEventCustom.cpp; sourceTree = "<group>"; };
 		410BA1312570FE57002E2F8A /* LibWebRTCRtpTransformableFrame.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = LibWebRTCRtpTransformableFrame.cpp; path = libwebrtc/LibWebRTCRtpTransformableFrame.cpp; sourceTree = "<group>"; };
 		410BCF5326F0CD8B0040B124 /* RTCLocalSessionDescriptionInit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RTCLocalSessionDescriptionInit.h; sourceTree = "<group>"; };
@@ -18813,6 +18815,8 @@
 				3F8020341E9E381D00DEC61D /* CoreAudioCaptureDeviceManager.h */,
 				3F3BB5821E709EE400C701F2 /* CoreAudioCaptureSource.cpp */,
 				3F3BB5831E709EE400C701F2 /* CoreAudioCaptureSource.h */,
+				410AD2DF2822BBF6009C93C8 /* CoreAudioSharedUnit.cpp */,
+				410AD2DE2822BBF6009C93C8 /* CoreAudioSharedUnit.h */,
 				07A6D8481FEB700B006441DE /* DisplayCaptureManagerCocoa.cpp */,
 				07A6D8491FEB700C006441DE /* DisplayCaptureManagerCocoa.h */,
 				4174E9202535DCDD00FE4202 /* MediaStreamTrackAudioSourceProviderCocoa.cpp */,

Modified: trunk/Source/WebCore/platform/mediastream/mac/BaseAudioSharedUnit.cpp (294101 => 294102)


--- trunk/Source/WebCore/platform/mediastream/mac/BaseAudioSharedUnit.cpp	2022-05-12 13:49:11 UTC (rev 294101)
+++ trunk/Source/WebCore/platform/mediastream/mac/BaseAudioSharedUnit.cpp	2022-05-12 13:56:19 UTC (rev 294102)
@@ -113,11 +113,6 @@
     return 0;
 }
 
-void BaseAudioSharedUnit::resetSampleRate()
-{
-    m_sampleRate = AudioSession::sharedSession().sampleRate();
-}
-
 void BaseAudioSharedUnit::prepareForNewCapture()
 {
     m_volume = 1;

Modified: trunk/Source/WebCore/platform/mediastream/mac/BaseAudioSharedUnit.h (294101 => 294102)


--- trunk/Source/WebCore/platform/mediastream/mac/BaseAudioSharedUnit.h	2022-05-12 13:49:11 UTC (rev 294101)
+++ trunk/Source/WebCore/platform/mediastream/mac/BaseAudioSharedUnit.h	2022-05-12 13:56:19 UTC (rev 294102)
@@ -83,6 +83,8 @@
     void whenAudioCaptureUnitIsNotRunning(Function<void()>&&);
     bool isRenderingAudio() const { return m_isRenderingAudio; }
 
+    const String& persistentIDForTesting() const { return m_capturingDevice ? m_capturingDevice->first : emptyString(); }
+
 protected:
     void forEachClient(const Function<void(CoreAudioCaptureSource&)>&) const;
     bool hasClients() const { return !m_clients.isEmpty(); }
@@ -92,7 +94,7 @@
     virtual OSStatus startInternal() = 0;
     virtual void stopInternal() = 0;
     virtual OSStatus reconfigureAudioUnit() = 0;
-    virtual void resetSampleRate();
+    virtual void resetSampleRate() = 0;
     virtual void captureDeviceChanged() = 0;
 
     void setSuspended(bool value) { m_suspended = value; }

Modified: trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.cpp (294101 => 294102)


--- trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.cpp	2022-05-12 13:49:11 UTC (rev 294101)
+++ trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.cpp	2022-05-12 13:56:19 UTC (rev 294102)
@@ -31,9 +31,9 @@
 #include "AudioSampleBufferList.h"
 #include "AudioSampleDataSource.h"
 #include "AudioSession.h"
-#include "BaseAudioSharedUnit.h"
 #include "CoreAudioCaptureDevice.h"
 #include "CoreAudioCaptureDeviceManager.h"
+#include "CoreAudioSharedUnit.h"
 #include "Logging.h"
 #include "PlatformMediaSessionManager.h"
 #include "Timer.h"
@@ -51,9 +51,6 @@
 #include <wtf/NeverDestroyed.h>
 #include <wtf/Scope.h>
 
-#include <pal/cf/AudioToolboxSoftLink.h>
-#include <pal/cf/CoreMediaSoftLink.h>
-
 #if PLATFORM(IOS_FAMILY)
 #include "AVAudioSessionCaptureDevice.h"
 #include "AVAudioSessionCaptureDeviceManager.h"
@@ -60,6 +57,9 @@
 #include "CoreAudioCaptureSourceIOS.h"
 #endif
 
+#include <pal/cf/AudioToolboxSoftLink.h>
+#include <pal/cf/CoreMediaSoftLink.h>
+
 namespace WebCore {
 
 #if PLATFORM(MAC)
@@ -70,614 +70,6 @@
 }
 #endif
 
-const UInt32 outputBus = 0;
-const UInt32 inputBus = 1;
-
-class CoreAudioSharedUnit final : public BaseAudioSharedUnit {
-public:
-    static CoreAudioSharedUnit& unit();
-    static BaseAudioSharedUnit& singleton()  { return unit(); }
-    CoreAudioSharedUnit();
-
-    void registerSpeakerSamplesProducer(CoreAudioSpeakerSamplesProducer&);
-    void unregisterSpeakerSamplesProducer(CoreAudioSpeakerSamplesProducer&);
-    bool isRunning() const { return m_ioUnitStarted; }
-
-private:
-    static size_t preferredIOBufferSize();
-
-    CapabilityValueOrRange sampleRateCapacities() const final { return CapabilityValueOrRange(8000, 96000); }
-    const CAAudioStreamDescription& microphoneFormat() const { return m_microphoneProcFormat; }
-
-    bool hasAudioUnit() const final { return m_ioUnit; }
-    void captureDeviceChanged() final;
-    OSStatus reconfigureAudioUnit() final;
-
-    OSStatus setupAudioUnit();
-    void cleanupAudioUnit() final;
-
-    OSStatus startInternal() final;
-    void stopInternal() final;
-    bool isProducingData() const final { return m_ioUnitStarted; }
-    void isProducingMicrophoneSamplesChanged() final;
-    void validateOutputDevice(uint32_t deviceID) final;
-    int actualSampleRate() const final;
-
-    OSStatus configureSpeakerProc(int sampleRate);
-    OSStatus configureMicrophoneProc(int sampleRate);
-    OSStatus defaultOutputDevice(uint32_t*);
-    OSStatus defaultInputDevice(uint32_t*);
-
-    static OSStatus microphoneCallback(void*, AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32, UInt32, AudioBufferList*);
-    OSStatus processMicrophoneSamples(AudioUnitRenderActionFlags&, const AudioTimeStamp&, UInt32, UInt32, AudioBufferList*);
-
-    static OSStatus speakerCallback(void*, AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32, UInt32, AudioBufferList*);
-    OSStatus provideSpeakerData(AudioUnitRenderActionFlags&, const AudioTimeStamp&, UInt32, UInt32, AudioBufferList&);
-
-    void unduck();
-
-    void verifyIsCapturing();
-
-    Seconds verifyCaptureInterval() { return isProducingMicrophoneSamples() ? 10_s : 2_s; }
-
-    AudioUnit m_ioUnit { nullptr };
-
-    // Only read/modified from the IO thread.
-    Vector<Ref<AudioSampleDataSource>> m_activeSources;
-
-    CAAudioStreamDescription m_microphoneProcFormat;
-    RefPtr<AudioSampleBufferList> m_microphoneSampleBuffer;
-    uint64_t m_latestMicTimeStamp { 0 };
-
-    CAAudioStreamDescription m_speakerProcFormat;
-
-    double m_DTSConversionRatio { 0 };
-
-    bool m_ioUnitInitialized { false };
-    bool m_ioUnitStarted { false };
-
-    mutable std::unique_ptr<RealtimeMediaSourceCapabilities> m_capabilities;
-    mutable std::optional<RealtimeMediaSourceSettings> m_currentSettings;
-
-#if !LOG_DISABLED
-    void checkTimestamps(const AudioTimeStamp&, uint64_t, double);
-
-    String m_ioUnitName;
-#endif
-
-    uint64_t m_microphoneProcsCalled { 0 };
-    uint64_t m_microphoneProcsCalledLastTime { 0 };
-    Timer m_verifyCapturingTimer;
-
-    bool m_isReconfiguring { false };
-    mutable Lock m_speakerSamplesProducerLock;
-    CoreAudioSpeakerSamplesProducer* m_speakerSamplesProducer WTF_GUARDED_BY_LOCK(m_speakerSamplesProducerLock) { nullptr };
-};
-
-CoreAudioSharedUnit& CoreAudioSharedUnit::unit()
-{
-    static NeverDestroyed<CoreAudioSharedUnit> singleton;
-    return singleton;
-}
-
-CoreAudioSharedUnit::CoreAudioSharedUnit()
-    : m_verifyCapturingTimer(*this, &CoreAudioSharedUnit::verifyIsCapturing)
-{
-}
-
-void CoreAudioSharedUnit::captureDeviceChanged()
-{
-#if PLATFORM(MAC)
-    reconfigureAudioUnit();
-#else
-    AVAudioSessionCaptureDeviceManager::singleton().setPreferredAudioSessionDeviceUID(persistentID());
-#endif
-}
-
-size_t CoreAudioSharedUnit::preferredIOBufferSize()
-{
-    return AudioSession::sharedSession().bufferSize();
-}
-
-OSStatus CoreAudioSharedUnit::setupAudioUnit()
-{
-    if (m_ioUnit)
-        return 0;
-
-    ASSERT(hasClients());
-
-    mach_timebase_info_data_t timebaseInfo;
-    mach_timebase_info(&timebaseInfo);
-    m_DTSConversionRatio = 1e-9 * static_cast<double>(timebaseInfo.numer) / static_cast<double>(timebaseInfo.denom);
-
-    AudioComponentDescription ioUnitDescription = { kAudioUnitType_Output, kAudioUnitSubType_VoiceProcessingIO, kAudioUnitManufacturer_Apple, 0, 0 };
-    AudioComponent ioComponent = PAL::AudioComponentFindNext(nullptr, &ioUnitDescription);
-    ASSERT(ioComponent);
-    if (!ioComponent) {
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) unable to find vpio unit component", this);
-        return -1;
-    }
-
-#if !LOG_DISABLED
-    CFStringRef name = nullptr;
-    PAL::AudioComponentCopyName(ioComponent, &name);
-    if (name) {
-        m_ioUnitName = name;
-        CFRelease(name);
-        RELEASE_LOG(WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) created \"%s\" component", this, m_ioUnitName.utf8().data());
-    }
-#endif
-
-    auto err = PAL::AudioComponentInstanceNew(ioComponent, &m_ioUnit);
-    if (err) {
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) unable to open vpio unit, error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
-    }
-
-    if (!enableEchoCancellation()) {
-        uint32_t param = 0;
-        err = PAL::AudioUnitSetProperty(m_ioUnit, kAUVoiceIOProperty_VoiceProcessingEnableAGC, kAudioUnitScope_Global, inputBus, &param, sizeof(param));
-        if (err) {
-            RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) unable to set vpio automatic gain control, error %d (%.4s)", this, (int)err, (char*)&err);
-            return err;
-        }
-        param = 1;
-        err = PAL::AudioUnitSetProperty(m_ioUnit, kAUVoiceIOProperty_BypassVoiceProcessing, kAudioUnitScope_Global, inputBus, &param, sizeof(param));
-        if (err) {
-            RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) unable to set vpio unit echo cancellation, error %d (%.4s)", this, (int)err, (char*)&err);
-            return err;
-        }
-    }
-
-#if PLATFORM(IOS_FAMILY)
-    uint32_t param = 1;
-    err = PAL::AudioUnitSetProperty(m_ioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, inputBus, &param, sizeof(param));
-    if (err) {
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) unable to enable vpio unit input, error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
-    }
-#else
-    auto deviceID = captureDeviceID();
-    if (!deviceID) {
-        err = defaultInputDevice(&deviceID);
-        if (err)
-            return err;
-    }
-
-    err = PAL::AudioUnitSetProperty(m_ioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, inputBus, &deviceID, sizeof(deviceID));
-    if (err) {
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) unable to set vpio unit capture device ID %d, error %d (%.4s)", this, (int)deviceID, (int)err, (char*)&err);
-        return err;
-    }
-
-    uint32_t defaultOutputDeviceID;
-    err = defaultOutputDevice(&defaultOutputDeviceID);
-    if (!err) {
-        err = PAL::AudioUnitSetProperty(m_ioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, outputBus, &defaultOutputDeviceID, sizeof(defaultOutputDeviceID));
-        RELEASE_LOG_ERROR_IF(err, WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) unable to set vpio unit output device ID %d, error %d (%.4s)", this, (int)defaultOutputDeviceID, (int)err, (char*)&err);
-    }
-    setOutputDeviceID(!err ? defaultOutputDeviceID : 0);
-#endif
-
-    // FIXME: Add support for different speaker/microphone sample rates.
-    int actualSampleRate = this->actualSampleRate();
-    err = configureMicrophoneProc(actualSampleRate);
-    if (err)
-        return err;
-
-    err = configureSpeakerProc(actualSampleRate);
-    if (err)
-        return err;
-
-    err = PAL::AudioUnitInitialize(m_ioUnit);
-    if (err) {
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) AudioUnitInitialize() failed, error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
-    }
-    m_ioUnitInitialized = true;
-
-    unduck();
-
-    return err;
-}
-
-void CoreAudioSharedUnit::unduck()
-{
-    uint32_t outputDevice;
-    if (!defaultOutputDevice(&outputDevice))
-        AudioDeviceDuck(outputDevice, 1.0, nullptr, 0);
-}
-
-int CoreAudioSharedUnit::actualSampleRate() const
-{
-    Locker locker { m_speakerSamplesProducerLock };
-    return m_speakerSamplesProducer ? m_speakerSamplesProducer->format().streamDescription().mSampleRate : sampleRate();
-}
-
-OSStatus CoreAudioSharedUnit::configureMicrophoneProc(int sampleRate)
-{
-    ASSERT(isMainThread());
-
-    AURenderCallbackStruct callback = { microphoneCallback, this };
-    auto err = PAL::AudioUnitSetProperty(m_ioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, inputBus, &callback, sizeof(callback));
-    if (err) {
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::configureMicrophoneProc(%p) unable to set vpio unit mic proc, error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
-    }
-
-    AudioStreamBasicDescription microphoneProcFormat = { };
-
-    UInt32 size = sizeof(microphoneProcFormat);
-    err = PAL::AudioUnitGetProperty(m_ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, inputBus, &microphoneProcFormat, &size);
-    if (err) {
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::configureMicrophoneProc(%p) unable to get output stream format, error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
-    }
-
-    microphoneProcFormat.mSampleRate = sampleRate;
-    err = PAL::AudioUnitSetProperty(m_ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, inputBus, &microphoneProcFormat, size);
-    if (err) {
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::configureMicrophoneProc(%p) unable to set output stream format, error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
-    }
-
-    m_microphoneSampleBuffer = AudioSampleBufferList::create(microphoneProcFormat, preferredIOBufferSize() * 2);
-    m_microphoneProcFormat = microphoneProcFormat;
-
-    return err;
-}
-
-OSStatus CoreAudioSharedUnit::configureSpeakerProc(int sampleRate)
-{
-    ASSERT(isMainThread());
-
-    AURenderCallbackStruct callback = { speakerCallback, this };
-    auto err = PAL::AudioUnitSetProperty(m_ioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, outputBus, &callback, sizeof(callback));
-    if (err) {
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::configureSpeakerProc(%p) unable to set vpio unit speaker proc, error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
-    }
-
-    AudioStreamBasicDescription speakerProcFormat;
-    UInt32 size = sizeof(speakerProcFormat);
-    {
-        Locker locker { m_speakerSamplesProducerLock };
-        if (m_speakerSamplesProducer) {
-            speakerProcFormat = m_speakerSamplesProducer->format().streamDescription();
-            ASSERT(speakerProcFormat.mSampleRate == sampleRate);
-        } else {
-            err = PAL::AudioUnitGetProperty(m_ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, outputBus, &speakerProcFormat, &size);
-            if (err) {
-                RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::configureSpeakerProc(%p) unable to get input stream format, error %d (%.4s)", this, (int)err, (char*)&err);
-                return err;
-            }
-        }
-    }
-    speakerProcFormat.mSampleRate = sampleRate;
-
-    err = PAL::AudioUnitSetProperty(m_ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, outputBus, &speakerProcFormat, size);
-    if (err) {
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::configureSpeakerProc(%p) unable to get input stream format, error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
-    }
-
-    m_speakerProcFormat = speakerProcFormat;
-
-    return err;
-}
-
-#if !LOG_DISABLED
-void CoreAudioSharedUnit::checkTimestamps(const AudioTimeStamp& timeStamp, uint64_t sampleTime, double hostTime)
-{
-    if (!timeStamp.mSampleTime || sampleTime == m_latestMicTimeStamp || !hostTime)
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::checkTimestamps: unusual timestamps, sample time = %lld, previous sample time = %lld, hostTime %f", sampleTime, m_latestMicTimeStamp, hostTime);
-}
-#endif
-
-OSStatus CoreAudioSharedUnit::provideSpeakerData(AudioUnitRenderActionFlags& flags, const AudioTimeStamp& timeStamp, UInt32 /*inBusNumber*/, UInt32 inNumberFrames, AudioBufferList& ioData)
-{
-    if (m_isReconfiguring || !m_speakerSamplesProducerLock.tryLock()) {
-        AudioSampleBufferList::zeroABL(ioData, static_cast<size_t>(inNumberFrames * m_speakerProcFormat.bytesPerFrame()));
-        flags = kAudioUnitRenderAction_OutputIsSilence;
-        return noErr;
-    }
-
-    Locker locker { AdoptLock, m_speakerSamplesProducerLock };
-    if (!m_speakerSamplesProducer) {
-        AudioSampleBufferList::zeroABL(ioData, static_cast<size_t>(inNumberFrames * m_speakerProcFormat.bytesPerFrame()));
-        flags = kAudioUnitRenderAction_OutputIsSilence;
-        return noErr;
-    }
-    return m_speakerSamplesProducer->produceSpeakerSamples(inNumberFrames, ioData, timeStamp.mSampleTime, timeStamp.mHostTime, flags);
-}
-
-OSStatus CoreAudioSharedUnit::speakerCallback(void *inRefCon, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData)
-{
-    ASSERT(ioActionFlags);
-    ASSERT(inTimeStamp);
-    auto dataSource = static_cast<CoreAudioSharedUnit*>(inRefCon);
-    return dataSource->provideSpeakerData(*ioActionFlags, *inTimeStamp, inBusNumber, inNumberFrames, *ioData);
-}
-
-OSStatus CoreAudioSharedUnit::processMicrophoneSamples(AudioUnitRenderActionFlags& ioActionFlags, const AudioTimeStamp& timeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* /*ioData*/)
-{
-    ++m_microphoneProcsCalled;
-
-    if (m_isReconfiguring)
-        return false;
-
-    // Pull through the vpio unit to our mic buffer.
-    m_microphoneSampleBuffer->reset();
-    AudioBufferList& bufferList = m_microphoneSampleBuffer->bufferList();
-    auto err = AudioUnitRender(m_ioUnit, &ioActionFlags, &timeStamp, inBusNumber, inNumberFrames, &bufferList);
-    if (err) {
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::processMicrophoneSamples(%p) AudioUnitRender failed with error %d (%.4s), bufferList size %d, inNumberFrames %d ", this, (int)err, (char*)&err, (int)bufferList.mBuffers[0].mDataByteSize, (int)inNumberFrames);
-        if (err == kAudio_ParamError) {
-            // Our buffer might be too small, the preferred buffer size or sample rate might have changed.
-            callOnMainThread([] {
-                CoreAudioSharedUnit::singleton().reconfigure();
-            });
-        }
-        // We return early so that if this error happens, we do not increment m_microphoneProcsCalled and fail the capture once timer kicks in.
-        return err;
-    }
-
-    if (!isProducingMicrophoneSamples())
-        return noErr;
-
-    double adjustedHostTime = m_DTSConversionRatio * timeStamp.mHostTime;
-    uint64_t sampleTime = timeStamp.mSampleTime;
-#if !LOG_DISABLED
-    checkTimestamps(timeStamp, sampleTime, adjustedHostTime);
-#endif
-    m_latestMicTimeStamp = sampleTime;
-    m_microphoneSampleBuffer->setTimes(adjustedHostTime, sampleTime);
-
-    if (volume() != 1.0)
-        m_microphoneSampleBuffer->applyGain(volume());
-
-    audioSamplesAvailable(MediaTime(sampleTime, m_microphoneProcFormat.sampleRate()), m_microphoneSampleBuffer->bufferList(), m_microphoneProcFormat, inNumberFrames);
-    return noErr;
-}
-
-OSStatus CoreAudioSharedUnit::microphoneCallback(void *inRefCon, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData)
-{
-    ASSERT(ioActionFlags);
-    ASSERT(inTimeStamp);
-    CoreAudioSharedUnit* dataSource = static_cast<CoreAudioSharedUnit*>(inRefCon);
-    return dataSource->processMicrophoneSamples(*ioActionFlags, *inTimeStamp, inBusNumber, inNumberFrames, ioData);
-}
-
-void CoreAudioSharedUnit::cleanupAudioUnit()
-{
-    if (m_ioUnitInitialized) {
-        ASSERT(m_ioUnit);
-        auto err = AudioUnitUninitialize(m_ioUnit);
-        if (err)
-            RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::cleanupAudioUnit(%p) AudioUnitUninitialize failed with error %d (%.4s)", this, (int)err, (char*)&err);
-        m_ioUnitInitialized = false;
-    }
-
-    if (m_ioUnit) {
-        PAL::AudioComponentInstanceDispose(m_ioUnit);
-        m_ioUnit = nullptr;
-    }
-
-    m_microphoneSampleBuffer = nullptr;
-#if !LOG_DISABLED
-    m_ioUnitName = emptyString();
-#endif
-}
-
-OSStatus CoreAudioSharedUnit::reconfigureAudioUnit()
-{
-    ASSERT(isMainThread());
-    OSStatus err;
-    if (!hasAudioUnit())
-        return 0;
-
-    m_isReconfiguring = true;
-    auto scope = makeScopeExit([this] { m_isReconfiguring = false; });
-
-    if (m_ioUnitStarted) {
-        err = PAL::AudioOutputUnitStop(m_ioUnit);
-        if (err) {
-            RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::reconfigureAudioUnit(%p) AudioOutputUnitStop failed with error %d (%.4s)", this, (int)err, (char*)&err);
-            return err;
-        }
-    }
-
-    cleanupAudioUnit();
-    err = setupAudioUnit();
-    if (err)
-        return err;
-
-    if (m_ioUnitStarted) {
-        err = PAL::AudioOutputUnitStart(m_ioUnit);
-        if (err) {
-            RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::reconfigureAudioUnit(%p) AudioOutputUnitStart failed with error %d (%.4s)", this, (int)err, (char*)&err);
-            return err;
-        }
-    }
-    return err;
-}
-
-OSStatus CoreAudioSharedUnit::startInternal()
-{
-    ASSERT(isMainThread());
-
-    setIsProducingMicrophoneSamples(true);
-
-    OSStatus err;
-    if (!m_ioUnit) {
-        err = setupAudioUnit();
-        if (err) {
-            cleanupAudioUnit();
-            ASSERT(!m_ioUnit);
-            return err;
-        }
-        ASSERT(m_ioUnit);
-    }
-
-    unduck();
-
-    {
-        Locker locker { m_speakerSamplesProducerLock };
-        if (m_speakerSamplesProducer)
-            m_speakerSamplesProducer->captureUnitIsStarting();
-    }
-
-    err = PAL::AudioOutputUnitStart(m_ioUnit);
-    if (err) {
-        {
-            Locker locker { m_speakerSamplesProducerLock };
-            if (m_speakerSamplesProducer)
-                m_speakerSamplesProducer->captureUnitHasStopped();
-        }
-
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::start(%p) AudioOutputUnitStart failed with error %d (%.4s)", this, (int)err, (char*)&err);
-        cleanupAudioUnit();
-        ASSERT(!m_ioUnit);
-        return err;
-    }
-
-    m_ioUnitStarted = true;
-
-    m_verifyCapturingTimer.startRepeating(verifyCaptureInterval());
-    m_microphoneProcsCalled = 0;
-    m_microphoneProcsCalledLastTime = 0;
-
-    return 0;
-}
-
-void CoreAudioSharedUnit::isProducingMicrophoneSamplesChanged()
-{
-    if (!isProducingData())
-        return;
-    m_verifyCapturingTimer.startRepeating(verifyCaptureInterval());
-}
-
-void CoreAudioSharedUnit::validateOutputDevice(uint32_t currentOutputDeviceID)
-{
-#if PLATFORM(MAC)
-    uint32_t currentDefaultOutputDeviceID = 0;
-    if (auto err = defaultOutputDevice(&currentDefaultOutputDeviceID))
-        return;
-
-    if (!currentDefaultOutputDeviceID || currentOutputDeviceID == currentDefaultOutputDeviceID)
-        return;
-
-    reconfigure();
-#else
-    UNUSED_PARAM(currentOutputDeviceID);
-#endif
-}
-
-void CoreAudioSharedUnit::verifyIsCapturing()
-{
-    if (m_microphoneProcsCalledLastTime != m_microphoneProcsCalled) {
-        m_microphoneProcsCalledLastTime = m_microphoneProcsCalled;
-        return;
-    }
-
-    RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::verifyIsCapturing - no audio received in %d seconds, failing", static_cast<int>(m_verifyCapturingTimer.repeatInterval().value()));
-    captureFailed();
-}
-
-void CoreAudioSharedUnit::stopInternal()
-{
-    ASSERT(isMainThread());
-
-    m_verifyCapturingTimer.stop();
-
-    if (!m_ioUnit || !m_ioUnitStarted)
-        return;
-
-    auto err = PAL::AudioOutputUnitStop(m_ioUnit);
-    if (err) {
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::stop(%p) AudioOutputUnitStop failed with error %d (%.4s)", this, (int)err, (char*)&err);
-        return;
-    }
-    {
-        Locker locker { m_speakerSamplesProducerLock };
-        if (m_speakerSamplesProducer)
-            m_speakerSamplesProducer->captureUnitHasStopped();
-    }
-
-    m_ioUnitStarted = false;
-}
-
-OSStatus CoreAudioSharedUnit::defaultInputDevice(uint32_t* deviceID)
-{
-    ASSERT(m_ioUnit);
-
-    UInt32 propertySize = sizeof(*deviceID);
-    auto err = PAL::AudioUnitGetProperty(m_ioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, inputBus, deviceID, &propertySize);
-    if (err)
-        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::defaultInputDevice(%p) unable to get default input device ID, error %d (%.4s)", this, (int)err, (char*)&err);
-
-    return err;
-}
-
-OSStatus CoreAudioSharedUnit::defaultOutputDevice(uint32_t* deviceID)
-{
-    OSErr err = -1;
-#if PLATFORM(MAC)
-    AudioObjectPropertyAddress address = {
-        kAudioHardwarePropertyDefaultOutputDevice,
-        kAudioObjectPropertyScopeGlobal,
-#if HAVE(AUDIO_OBJECT_PROPERTY_ELEMENT_MAIN)
-        kAudioObjectPropertyElementMain
-#else
-        ALLOW_DEPRECATED_DECLARATIONS_BEGIN
-        kAudioObjectPropertyElementMaster
-        ALLOW_DEPRECATED_DECLARATIONS_END
-#endif
-    };
-
-    if (AudioObjectHasProperty(kAudioObjectSystemObject, &address)) {
-        UInt32 propertySize = sizeof(AudioDeviceID);
-        err = AudioObjectGetPropertyData(kAudioObjectSystemObject, &address, 0, nullptr, &propertySize, deviceID);
-    }
-#else
-    UNUSED_PARAM(deviceID);
-#endif
-    return err;
-}
-
-void CoreAudioSharedUnit::registerSpeakerSamplesProducer(CoreAudioSpeakerSamplesProducer& producer)
-{
-    ASSERT(isMainThread());
-
-    setIsRenderingAudio(true);
-
-    CoreAudioSpeakerSamplesProducer* oldProducer;
-    {
-        Locker locker { m_speakerSamplesProducerLock };
-        oldProducer = m_speakerSamplesProducer;
-        m_speakerSamplesProducer = &producer;
-    }
-    if (oldProducer && oldProducer != &producer)
-        oldProducer->captureUnitHasStopped();
-
-    if (hasAudioUnit() && producer.format() != m_speakerProcFormat)
-        reconfigure();
-}
-
-void CoreAudioSharedUnit::unregisterSpeakerSamplesProducer(CoreAudioSpeakerSamplesProducer& producer)
-{
-    ASSERT(isMainThread());
-
-    {
-        Locker locker { m_speakerSamplesProducerLock };
-        if (m_speakerSamplesProducer != &producer)
-            return;
-        m_speakerSamplesProducer = nullptr;
-    }
-
-    setIsRenderingAudio(false);
-}
-
 static CaptureSourceOrError initializeCoreAudioCaptureSource(Ref<CoreAudioCaptureSource>&& source, const MediaConstraints* constraints)
 {
     if (constraints) {

Copied: trunk/Source/WebCore/platform/mediastream/mac/CoreAudioSharedUnit.cpp (from rev 294101, trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.cpp) (0 => 294102)


--- trunk/Source/WebCore/platform/mediastream/mac/CoreAudioSharedUnit.cpp	                        (rev 0)
+++ trunk/Source/WebCore/platform/mediastream/mac/CoreAudioSharedUnit.cpp	2022-05-12 13:56:19 UTC (rev 294102)
@@ -0,0 +1,652 @@
+/*
+ * Copyright (C) 2017-2022 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CoreAudioSharedUnit.h"
+
+#if ENABLE(MEDIA_STREAM)
+
+#include "AudioSampleBufferList.h"
+#include "AudioSession.h"
+#include "CoreAudioCaptureSource.h"
+#include "Logging.h"
+#include <AudioToolbox/AudioConverter.h>
+#include <AudioUnit/AudioUnit.h>
+#include <CoreMedia/CMSync.h>
+#include <mach/mach_time.h>
+#include <pal/avfoundation/MediaTimeAVFoundation.h>
+#include <pal/spi/cf/CoreAudioSPI.h>
+#include <sys/time.h>
+#include <wtf/Algorithms.h>
+#include <wtf/Lock.h>
+#include <wtf/MainThread.h>
+#include <wtf/NeverDestroyed.h>
+#include <wtf/Scope.h>
+
+#if PLATFORM(IOS_FAMILY)
+#include "AVAudioSessionCaptureDeviceManager.h"
+#endif
+
+#include <pal/cf/AudioToolboxSoftLink.h>
+#include <pal/cf/CoreMediaSoftLink.h>
+
+namespace WebCore {
+
+const UInt32 outputBus = 0;
+const UInt32 inputBus = 1;
+
+class CoreAudioSharedInternalUnit :  public CoreAudioSharedUnit::InternalUnit {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    static Expected<UniqueRef<InternalUnit>, OSStatus> create();
+    explicit CoreAudioSharedInternalUnit(AudioUnit);
+    ~CoreAudioSharedInternalUnit() final;
+
+private:
+    OSStatus initialize() final;
+    OSStatus uninitialize() final;
+    OSStatus start() final;
+    OSStatus stop() final;
+    OSStatus set(AudioUnitPropertyID, AudioUnitScope, AudioUnitElement, const void*, UInt32) final;
+    OSStatus get(AudioUnitPropertyID, AudioUnitScope, AudioUnitElement, void*, UInt32*) final;
+    OSStatus render(AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32, UInt32, AudioBufferList*) final;
+    OSStatus defaultInputDevice(uint32_t*) final;
+    OSStatus defaultOutputDevice(uint32_t*) final;
+
+    AudioUnit m_ioUnit { nullptr };
+};
+
+Expected<UniqueRef<CoreAudioSharedUnit::InternalUnit>, OSStatus> CoreAudioSharedInternalUnit::create()
+{
+    AudioComponentDescription ioUnitDescription = { kAudioUnitType_Output, kAudioUnitSubType_VoiceProcessingIO, kAudioUnitManufacturer_Apple, 0, 0 };
+    AudioComponent ioComponent = PAL::AudioComponentFindNext(nullptr, &ioUnitDescription);
+    ASSERT(ioComponent);
+    if (!ioComponent) {
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedInternalUnit unable to find vpio unit component");
+        return makeUnexpected(-1);
+    }
+
+#if !LOG_DISABLED
+    CFStringRef name = nullptr;
+    PAL::AudioComponentCopyName(ioComponent, &name);
+    if (name) {
+        String ioUnitName = name;
+        CFRelease(name);
+        RELEASE_LOG(WebRTC, "CoreAudioSharedInternalUnit created \"%{private}s\" component", ioUnitName.utf8().data());
+    }
+#endif
+
+    AudioUnit ioUnit;
+    auto err = PAL::AudioComponentInstanceNew(ioComponent, &ioUnit);
+    if (err) {
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedInternalUnit unable to open vpio unit, error %d (%.4s)", (int)err, (char*)&err);
+        return makeUnexpected(err);
+    }
+
+    RELEASE_LOG(WebRTC, "Successfully created a CoreAudioSharedInternalUnit");
+
+    UniqueRef<CoreAudioSharedUnit::InternalUnit> result = makeUniqueRef<CoreAudioSharedInternalUnit>(ioUnit);
+    return result;
+}
+
+CoreAudioSharedInternalUnit::CoreAudioSharedInternalUnit(AudioUnit ioUnit)
+    : m_ioUnit(ioUnit)
+{
+}
+
+CoreAudioSharedInternalUnit::~CoreAudioSharedInternalUnit()
+{
+    PAL::AudioComponentInstanceDispose(m_ioUnit);
+}
+
+OSStatus CoreAudioSharedInternalUnit::initialize()
+{
+    return PAL::AudioUnitInitialize(m_ioUnit);
+}
+
+OSStatus CoreAudioSharedInternalUnit::uninitialize()
+{
+    return AudioUnitUninitialize(m_ioUnit);
+}
+
+OSStatus CoreAudioSharedInternalUnit::start()
+{
+    return PAL::AudioOutputUnitStart(m_ioUnit);
+}
+
+OSStatus CoreAudioSharedInternalUnit::stop()
+{
+    return PAL::AudioOutputUnitStop(m_ioUnit);
+}
+
+OSStatus CoreAudioSharedInternalUnit::set(AudioUnitPropertyID propertyID, AudioUnitScope scope, AudioUnitElement element, const void* value, UInt32 size)
+{
+    return PAL::AudioUnitSetProperty(m_ioUnit, propertyID, scope, element, &value, size);
+}
+
+OSStatus CoreAudioSharedInternalUnit::get(AudioUnitPropertyID propertyID, AudioUnitScope scope, AudioUnitElement element, void* value, UInt32* size)
+{
+    return PAL::AudioUnitGetProperty(m_ioUnit, propertyID, scope, element, value, size);
+}
+
+OSStatus CoreAudioSharedInternalUnit::render(AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inOutputBusNumber, UInt32 inNumberFrames, AudioBufferList* list)
+{
+    return AudioUnitRender(m_ioUnit, ioActionFlags, inTimeStamp, inOutputBusNumber, inNumberFrames, list);
+}
+
+OSStatus CoreAudioSharedInternalUnit::defaultInputDevice(uint32_t* deviceID)
+{
+#if PLATFORM(MAC)
+    UInt32 propertySize = sizeof(*deviceID);
+    auto err = get(kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, inputBus, deviceID, &propertySize);
+    RELEASE_LOG_ERROR_IF(err, WebRTC, "CoreAudioSharedInternalUnit unable to get default input device ID, error %d (%.4s)", (int)err, (char*)&err);
+    return err;
+#else
+    UNUSED_PARAM(deviceID);
+    return -1;
+#endif
+}
+
+OSStatus CoreAudioSharedInternalUnit::defaultOutputDevice(uint32_t* deviceID)
+{
+#if PLATFORM(MAC)
+    AudioObjectPropertyAddress address = {
+        kAudioHardwarePropertyDefaultOutputDevice,
+        kAudioObjectPropertyScopeGlobal,
+#if HAVE(AUDIO_OBJECT_PROPERTY_ELEMENT_MAIN)
+        kAudioObjectPropertyElementMain
+#else
+        ALLOW_DEPRECATED_DECLARATIONS_BEGIN
+        kAudioObjectPropertyElementMaster
+        ALLOW_DEPRECATED_DECLARATIONS_END
+#endif
+    };
+
+    if (AudioObjectHasProperty(kAudioObjectSystemObject, &address)) {
+        UInt32 propertySize = sizeof(AudioDeviceID);
+        return AudioObjectGetPropertyData(kAudioObjectSystemObject, &address, 0, nullptr, &propertySize, deviceID);
+    }
+#else
+    UNUSED_PARAM(deviceID);
+#endif
+    return -1;
+}
+
+CoreAudioSharedUnit& CoreAudioSharedUnit::unit()
+{
+    static NeverDestroyed<CoreAudioSharedUnit> singleton;
+    return singleton;
+}
+
+CoreAudioSharedUnit::CoreAudioSharedUnit()
+    : m_sampleRateCapabilities(8000, 96000)
+    , m_verifyCapturingTimer(*this, &CoreAudioSharedUnit::verifyIsCapturing)
+{
+}
+
+void CoreAudioSharedUnit::resetSampleRate()
+{
+    setSampleRate(m_getSampleRateCallback ? m_getSampleRateCallback() : AudioSession::sharedSession().sampleRate());
+}
+
+void CoreAudioSharedUnit::captureDeviceChanged()
+{
+#if PLATFORM(MAC)
+    reconfigureAudioUnit();
+#else
+    AVAudioSessionCaptureDeviceManager::singleton().setPreferredAudioSessionDeviceUID(persistentID());
+#endif
+}
+
+size_t CoreAudioSharedUnit::preferredIOBufferSize()
+{
+    return AudioSession::sharedSession().bufferSize();
+}
+
+OSStatus CoreAudioSharedUnit::setupAudioUnit()
+{
+    if (m_ioUnit)
+        return 0;
+
+    ASSERT(hasClients());
+
+    mach_timebase_info_data_t timebaseInfo;
+    mach_timebase_info(&timebaseInfo);
+    m_DTSConversionRatio = 1e-9 * static_cast<double>(timebaseInfo.numer) / static_cast<double>(timebaseInfo.denom);
+
+    auto result = m_creationCallback ? m_creationCallback() : CoreAudioSharedInternalUnit::create();
+    if (!result.has_value())
+        return result.error();
+
+    m_ioUnit = WTFMove(result.value()).moveToUniquePtr();
+    if (!enableEchoCancellation()) {
+        uint32_t param = 0;
+        if (auto err = m_ioUnit->set(kAUVoiceIOProperty_VoiceProcessingEnableAGC, kAudioUnitScope_Global, inputBus, &param, sizeof(param))) {
+            RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) unable to set vpio automatic gain control, error %d (%.4s)", this, (int)err, (char*)&err);
+            return err;
+        }
+        param = 1;
+        if (auto err = m_ioUnit->set(kAUVoiceIOProperty_BypassVoiceProcessing, kAudioUnitScope_Global, inputBus, &param, sizeof(param))) {
+            RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) unable to set vpio unit echo cancellation, error %d (%.4s)", this, (int)err, (char*)&err);
+            return err;
+        }
+    }
+
+#if PLATFORM(IOS_FAMILY)
+    uint32_t param = 1;
+    if (auto err = m_ioUnit->set(kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, inputBus, &param, sizeof(param))) {
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) unable to enable vpio unit input, error %d (%.4s)", this, (int)err, (char*)&err);
+        return err;
+    }
+#else
+    auto deviceID = captureDeviceID();
+    // FIXME: We probably want to make default input/output devices.
+    if (!deviceID) {
+        if (auto err = m_ioUnit->defaultInputDevice(&deviceID))
+            return err;
+    }
+
+    if (auto err = m_ioUnit->set(kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, inputBus, &deviceID, sizeof(deviceID))) {
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) unable to set vpio unit capture device ID %d, error %d (%.4s)", this, (int)deviceID, (int)err, (char*)&err);
+        return err;
+    }
+
+    uint32_t defaultOutputDeviceID;
+    auto err = m_ioUnit->defaultOutputDevice(&defaultOutputDeviceID);
+    if (!err) {
+        err = m_ioUnit->set(kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, outputBus, &defaultOutputDeviceID, sizeof(defaultOutputDeviceID));
+        RELEASE_LOG_ERROR_IF(err, WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) unable to set vpio unit output device ID %d, error %d (%.4s)", this, (int)defaultOutputDeviceID, (int)err, (char*)&err);
+    }
+    setOutputDeviceID(!err ? defaultOutputDeviceID : 0);
+#endif
+
+    // FIXME: Add support for different speaker/microphone sample rates.
+    int actualSampleRate = this->actualSampleRate();
+    if (auto err = configureMicrophoneProc(actualSampleRate))
+        return err;
+
+    if (auto err = configureSpeakerProc(actualSampleRate))
+        return err;
+
+    if (auto err = m_ioUnit->initialize()) {
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::setupAudioUnit(%p) AudioUnitInitialize() failed, error %d (%.4s)", this, (int)err, (char*)&err);
+        return err;
+    }
+    m_ioUnitInitialized = true;
+
+    unduck();
+
+    return 0;
+}
+
+void CoreAudioSharedUnit::unduck()
+{
+    uint32_t outputDevice;
+    if (m_ioUnit && !m_ioUnit->defaultOutputDevice(&outputDevice))
+        AudioDeviceDuck(outputDevice, 1.0, nullptr, 0);
+}
+
+int CoreAudioSharedUnit::actualSampleRate() const
+{
+    Locker locker { m_speakerSamplesProducerLock };
+    return m_speakerSamplesProducer ? m_speakerSamplesProducer->format().streamDescription().mSampleRate : sampleRate();
+}
+
+OSStatus CoreAudioSharedUnit::configureMicrophoneProc(int sampleRate)
+{
+    ASSERT(isMainThread());
+
+    AURenderCallbackStruct callback = { microphoneCallback, this };
+    if (auto err = m_ioUnit->set(kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, inputBus, &callback, sizeof(callback))) {
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::configureMicrophoneProc(%p) unable to set vpio unit mic proc, error %d (%.4s)", this, (int)err, (char*)&err);
+        return err;
+    }
+
+    AudioStreamBasicDescription microphoneProcFormat = { };
+
+    UInt32 size = sizeof(microphoneProcFormat);
+    if (auto err = m_ioUnit->get(kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, inputBus, &microphoneProcFormat, &size)) {
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::configureMicrophoneProc(%p) unable to get output stream format, error %d (%.4s)", this, (int)err, (char*)&err);
+        return err;
+    }
+
+    microphoneProcFormat.mSampleRate = sampleRate;
+    if (auto err = m_ioUnit->set(kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, inputBus, &microphoneProcFormat, size)) {
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::configureMicrophoneProc(%p) unable to set output stream format, error %d (%.4s)", this, (int)err, (char*)&err);
+        return err;
+    }
+
+    m_microphoneSampleBuffer = AudioSampleBufferList::create(microphoneProcFormat, preferredIOBufferSize() * 2);
+    m_microphoneProcFormat = microphoneProcFormat;
+
+    return noErr;
+}
+
+OSStatus CoreAudioSharedUnit::configureSpeakerProc(int sampleRate)
+{
+    ASSERT(isMainThread());
+
+    AURenderCallbackStruct callback = { speakerCallback, this };
+    if (auto err = m_ioUnit->set(kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, outputBus, &callback, sizeof(callback))) {
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::configureSpeakerProc(%p) unable to set vpio unit speaker proc, error %d (%.4s)", this, (int)err, (char*)&err);
+        return err;
+    }
+
+    AudioStreamBasicDescription speakerProcFormat;
+    UInt32 size = sizeof(speakerProcFormat);
+    {
+        Locker locker { m_speakerSamplesProducerLock };
+        if (m_speakerSamplesProducer) {
+            speakerProcFormat = m_speakerSamplesProducer->format().streamDescription();
+            ASSERT(speakerProcFormat.mSampleRate == sampleRate);
+        } else {
+            if (auto err = m_ioUnit->get(kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, outputBus, &speakerProcFormat, &size)) {
+                RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::configureSpeakerProc(%p) unable to get input stream format, error %d (%.4s)", this, (int)err, (char*)&err);
+                return err;
+            }
+        }
+    }
+    speakerProcFormat.mSampleRate = sampleRate;
+
+    if (auto err = m_ioUnit->set(kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, outputBus, &speakerProcFormat, size)) {
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::configureSpeakerProc(%p) unable to get input stream format, error %d (%.4s)", this, (int)err, (char*)&err);
+        return err;
+    }
+
+    m_speakerProcFormat = speakerProcFormat;
+
+    return noErr;
+}
+
+#if !LOG_DISABLED
+void CoreAudioSharedUnit::checkTimestamps(const AudioTimeStamp& timeStamp, uint64_t sampleTime, double hostTime)
+{
+    if (!timeStamp.mSampleTime || sampleTime == m_latestMicTimeStamp || !hostTime)
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::checkTimestamps: unusual timestamps, sample time = %lld, previous sample time = %lld, hostTime %f", sampleTime, m_latestMicTimeStamp, hostTime);
+}
+#endif
+
+OSStatus CoreAudioSharedUnit::provideSpeakerData(AudioUnitRenderActionFlags& flags, const AudioTimeStamp& timeStamp, UInt32 /*inBusNumber*/, UInt32 inNumberFrames, AudioBufferList& ioData)
+{
+    if (m_isReconfiguring || !m_speakerSamplesProducerLock.tryLock()) {
+        AudioSampleBufferList::zeroABL(ioData, static_cast<size_t>(inNumberFrames * m_speakerProcFormat.bytesPerFrame()));
+        flags = kAudioUnitRenderAction_OutputIsSilence;
+        return noErr;
+    }
+
+    Locker locker { AdoptLock, m_speakerSamplesProducerLock };
+    if (!m_speakerSamplesProducer) {
+        AudioSampleBufferList::zeroABL(ioData, static_cast<size_t>(inNumberFrames * m_speakerProcFormat.bytesPerFrame()));
+        flags = kAudioUnitRenderAction_OutputIsSilence;
+        return noErr;
+    }
+    return m_speakerSamplesProducer->produceSpeakerSamples(inNumberFrames, ioData, timeStamp.mSampleTime, timeStamp.mHostTime, flags);
+}
+
+OSStatus CoreAudioSharedUnit::speakerCallback(void *inRefCon, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData)
+{
+    ASSERT(ioActionFlags);
+    ASSERT(inTimeStamp);
+    auto dataSource = static_cast<CoreAudioSharedUnit*>(inRefCon);
+    return dataSource->provideSpeakerData(*ioActionFlags, *inTimeStamp, inBusNumber, inNumberFrames, *ioData);
+}
+
+OSStatus CoreAudioSharedUnit::processMicrophoneSamples(AudioUnitRenderActionFlags& ioActionFlags, const AudioTimeStamp& timeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* /*ioData*/)
+{
+    ++m_microphoneProcsCalled;
+
+    if (m_isReconfiguring)
+        return false;
+
+    // Pull through the vpio unit to our mic buffer.
+    m_microphoneSampleBuffer->reset();
+    AudioBufferList& bufferList = m_microphoneSampleBuffer->bufferList();
+    if (auto err = m_ioUnit->render(&ioActionFlags, &timeStamp, inBusNumber, inNumberFrames, &bufferList)) {
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::processMicrophoneSamples(%p) AudioUnitRender failed with error %d (%.4s), bufferList size %d, inNumberFrames %d ", this, (int)err, (char*)&err, (int)bufferList.mBuffers[0].mDataByteSize, (int)inNumberFrames);
+        if (err == kAudio_ParamError) {
+            // Our buffer might be too small, the preferred buffer size or sample rate might have changed.
+            callOnMainThread([] {
+                CoreAudioSharedUnit::singleton().reconfigure();
+            });
+        }
+        // We return early so that if this error happens, we do not increment m_microphoneProcsCalled and fail the capture once timer kicks in.
+        return err;
+    }
+
+    if (!isProducingMicrophoneSamples())
+        return noErr;
+
+    double adjustedHostTime = m_DTSConversionRatio * timeStamp.mHostTime;
+    uint64_t sampleTime = timeStamp.mSampleTime;
+#if !LOG_DISABLED
+    checkTimestamps(timeStamp, sampleTime, adjustedHostTime);
+#endif
+    m_latestMicTimeStamp = sampleTime;
+    m_microphoneSampleBuffer->setTimes(adjustedHostTime, sampleTime);
+
+    if (volume() != 1.0)
+        m_microphoneSampleBuffer->applyGain(volume());
+
+    audioSamplesAvailable(MediaTime(sampleTime, m_microphoneProcFormat.sampleRate()), m_microphoneSampleBuffer->bufferList(), m_microphoneProcFormat, inNumberFrames);
+    return noErr;
+}
+
+OSStatus CoreAudioSharedUnit::microphoneCallback(void *inRefCon, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData)
+{
+    ASSERT(ioActionFlags);
+    ASSERT(inTimeStamp);
+    CoreAudioSharedUnit* dataSource = static_cast<CoreAudioSharedUnit*>(inRefCon);
+    return dataSource->processMicrophoneSamples(*ioActionFlags, *inTimeStamp, inBusNumber, inNumberFrames, ioData);
+}
+
+void CoreAudioSharedUnit::cleanupAudioUnit()
+{
+    if (m_ioUnitInitialized) {
+        ASSERT(m_ioUnit);
+        if (auto err = m_ioUnit->uninitialize())
+            RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::cleanupAudioUnit(%p) AudioUnitUninitialize failed with error %d (%.4s)", this, (int)err, (char*)&err);
+        m_ioUnitInitialized = false;
+    }
+
+    m_ioUnit = nullptr;
+
+    m_microphoneSampleBuffer = nullptr;
+}
+
+void CoreAudioSharedUnit::delaySamples(Seconds seconds)
+{
+    if (m_ioUnit)
+        m_ioUnit->delaySamples(seconds);
+}
+
+OSStatus CoreAudioSharedUnit::reconfigureAudioUnit()
+{
+    ASSERT(isMainThread());
+    if (!hasAudioUnit())
+        return 0;
+
+    m_isReconfiguring = true;
+    auto scope = makeScopeExit([this] { m_isReconfiguring = false; });
+
+    if (m_ioUnitStarted) {
+        if (auto err = m_ioUnit->stop()) {
+            RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::reconfigureAudioUnit(%p) AudioOutputUnitStop failed with error %d (%.4s)", this, (int)err, (char*)&err);
+            return err;
+        }
+    }
+
+    cleanupAudioUnit();
+    if (auto err = setupAudioUnit())
+        return err;
+
+    if (m_ioUnitStarted) {
+        if (auto err = m_ioUnit->start()) {
+            RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::reconfigureAudioUnit(%p) AudioOutputUnitStart failed with error %d (%.4s)", this, (int)err, (char*)&err);
+            return err;
+        }
+    }
+    return noErr;
+}
+
+OSStatus CoreAudioSharedUnit::startInternal()
+{
+    ASSERT(isMainThread());
+
+    setIsProducingMicrophoneSamples(true);
+
+    if (!m_ioUnit) {
+        if (auto err = setupAudioUnit()) {
+            cleanupAudioUnit();
+            ASSERT(!m_ioUnit);
+            return err;
+        }
+        ASSERT(m_ioUnit);
+    }
+
+    unduck();
+
+    {
+        Locker locker { m_speakerSamplesProducerLock };
+        if (m_speakerSamplesProducer)
+            m_speakerSamplesProducer->captureUnitIsStarting();
+    }
+
+    if (auto err = m_ioUnit->start()) {
+        {
+            Locker locker { m_speakerSamplesProducerLock };
+            if (m_speakerSamplesProducer)
+                m_speakerSamplesProducer->captureUnitHasStopped();
+        }
+
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::start(%p) AudioOutputUnitStart failed with error %d (%.4s)", this, (int)err, (char*)&err);
+        cleanupAudioUnit();
+        ASSERT(!m_ioUnit);
+        return err;
+    }
+
+    m_ioUnitStarted = true;
+
+    m_verifyCapturingTimer.startRepeating(verifyCaptureInterval());
+    m_microphoneProcsCalled = 0;
+    m_microphoneProcsCalledLastTime = 0;
+
+    return noErr;
+}
+
+void CoreAudioSharedUnit::isProducingMicrophoneSamplesChanged()
+{
+    if (!isProducingData())
+        return;
+    m_verifyCapturingTimer.startRepeating(verifyCaptureInterval());
+}
+
+void CoreAudioSharedUnit::validateOutputDevice(uint32_t currentOutputDeviceID)
+{
+#if PLATFORM(MAC)
+    if (!m_ioUnit)
+        return;
+
+    uint32_t currentDefaultOutputDeviceID = 0;
+    if (auto err = m_ioUnit->defaultOutputDevice(&currentDefaultOutputDeviceID))
+        return;
+
+    if (!currentDefaultOutputDeviceID || currentOutputDeviceID == currentDefaultOutputDeviceID)
+        return;
+
+    reconfigure();
+#else
+    UNUSED_PARAM(currentOutputDeviceID);
+#endif
+}
+
+void CoreAudioSharedUnit::verifyIsCapturing()
+{
+    if (m_microphoneProcsCalledLastTime != m_microphoneProcsCalled) {
+        m_microphoneProcsCalledLastTime = m_microphoneProcsCalled;
+        return;
+    }
+
+    RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::verifyIsCapturing - no audio received in %d seconds, failing", static_cast<int>(m_verifyCapturingTimer.repeatInterval().value()));
+    captureFailed();
+}
+
+void CoreAudioSharedUnit::stopInternal()
+{
+    ASSERT(isMainThread());
+
+    m_verifyCapturingTimer.stop();
+
+    if (!m_ioUnit || !m_ioUnitStarted)
+        return;
+
+    if (auto err = m_ioUnit->stop()) {
+        RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::stop(%p) AudioOutputUnitStop failed with error %d (%.4s)", this, (int)err, (char*)&err);
+        return;
+    }
+    {
+        Locker locker { m_speakerSamplesProducerLock };
+        if (m_speakerSamplesProducer)
+            m_speakerSamplesProducer->captureUnitHasStopped();
+    }
+
+    m_ioUnitStarted = false;
+}
+
+void CoreAudioSharedUnit::registerSpeakerSamplesProducer(CoreAudioSpeakerSamplesProducer& producer)
+{
+    ASSERT(isMainThread());
+
+    setIsRenderingAudio(true);
+
+    CoreAudioSpeakerSamplesProducer* oldProducer;
+    {
+        Locker locker { m_speakerSamplesProducerLock };
+        oldProducer = m_speakerSamplesProducer;
+        m_speakerSamplesProducer = &producer;
+    }
+    if (oldProducer && oldProducer != &producer)
+        oldProducer->captureUnitHasStopped();
+
+    if (hasAudioUnit() && producer.format() != m_speakerProcFormat)
+        reconfigure();
+}
+
+void CoreAudioSharedUnit::unregisterSpeakerSamplesProducer(CoreAudioSpeakerSamplesProducer& producer)
+{
+    ASSERT(isMainThread());
+
+    {
+        Locker locker { m_speakerSamplesProducerLock };
+        if (m_speakerSamplesProducer != &producer)
+            return;
+        m_speakerSamplesProducer = nullptr;
+    }
+
+    setIsRenderingAudio(false);
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(MEDIA_STREAM)

Added: trunk/Source/WebCore/platform/mediastream/mac/CoreAudioSharedUnit.h (0 => 294102)


--- trunk/Source/WebCore/platform/mediastream/mac/CoreAudioSharedUnit.h	                        (rev 0)
+++ trunk/Source/WebCore/platform/mediastream/mac/CoreAudioSharedUnit.h	2022-05-12 13:56:19 UTC (rev 294102)
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2017-2022 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(MEDIA_STREAM)
+
+#include "AudioSampleDataSource.h"
+#include "BaseAudioSharedUnit.h"
+#include "CAAudioStreamDescription.h"
+#include "Timer.h"
+
+typedef UInt32 AudioUnitPropertyID;
+typedef UInt32 AudioUnitScope;
+typedef UInt32 AudioUnitElement;
+typedef UInt32 AudioUnitRenderActionFlags;
+
+struct AudioBufferList;
+struct AudioTimeStamp;
+
+namespace WebCore {
+
+class CoreAudioSpeakerSamplesProducer;
+
+class CoreAudioSharedUnit final : public BaseAudioSharedUnit {
+public:
+    class InternalUnit {
+    public:
+        virtual ~InternalUnit() = default;
+        virtual OSStatus initialize() = 0;
+        virtual OSStatus uninitialize() = 0;
+        virtual OSStatus start() = 0;
+        virtual OSStatus stop() = 0;
+        virtual OSStatus set(AudioUnitPropertyID, AudioUnitScope, AudioUnitElement, const void*, UInt32) = 0;
+        virtual OSStatus get(AudioUnitPropertyID, AudioUnitScope, AudioUnitElement, void*, UInt32*) = 0;
+        virtual OSStatus render(AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32, UInt32, AudioBufferList*) = 0;
+        virtual OSStatus defaultInputDevice(uint32_t*) = 0;
+        virtual OSStatus defaultOutputDevice(uint32_t*) = 0;
+        virtual void delaySamples(Seconds) { }
+    };
+
+    static CoreAudioSharedUnit& unit();
+    static BaseAudioSharedUnit& singleton()  { return unit(); }
+    CoreAudioSharedUnit();
+
+    using CreationCallback = Function<Expected<UniqueRef<InternalUnit>, OSStatus>()>;
+    void setInternalUnitCreationCallback(CreationCallback&& callback) { m_creationCallback = WTFMove(callback); }
+    using GetSampleRateCallback = Function<int()>;
+    void setInternalUnitGetSampleRateCallback(GetSampleRateCallback&& callback) { m_getSampleRateCallback = WTFMove(callback); }
+
+    void registerSpeakerSamplesProducer(CoreAudioSpeakerSamplesProducer&);
+    void unregisterSpeakerSamplesProducer(CoreAudioSpeakerSamplesProducer&);
+    bool isRunning() const { return m_ioUnitStarted; }
+    void setSampleRateRange(CapabilityValueOrRange range) { m_sampleRateCapabilities = range; }
+
+private:
+    static size_t preferredIOBufferSize();
+
+    CapabilityValueOrRange sampleRateCapacities() const final { return m_sampleRateCapabilities; }
+    const CAAudioStreamDescription& microphoneFormat() const { return m_microphoneProcFormat; }
+
+    bool hasAudioUnit() const final { return !!m_ioUnit; }
+    void captureDeviceChanged() final;
+    OSStatus reconfigureAudioUnit() final;
+    void delaySamples(Seconds) final;
+
+    OSStatus setupAudioUnit();
+    void cleanupAudioUnit() final;
+
+    OSStatus startInternal() final;
+    void stopInternal() final;
+    bool isProducingData() const final { return m_ioUnitStarted; }
+    void isProducingMicrophoneSamplesChanged() final;
+    void validateOutputDevice(uint32_t deviceID) final;
+    int actualSampleRate() const final;
+    void resetSampleRate();
+
+    OSStatus configureSpeakerProc(int sampleRate);
+    OSStatus configureMicrophoneProc(int sampleRate);
+
+    static OSStatus microphoneCallback(void*, AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32, UInt32, AudioBufferList*);
+    OSStatus processMicrophoneSamples(AudioUnitRenderActionFlags&, const AudioTimeStamp&, UInt32, UInt32, AudioBufferList*);
+
+    static OSStatus speakerCallback(void*, AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32, UInt32, AudioBufferList*);
+    OSStatus provideSpeakerData(AudioUnitRenderActionFlags&, const AudioTimeStamp&, UInt32, UInt32, AudioBufferList&);
+
+    void unduck();
+
+    void verifyIsCapturing();
+
+    Seconds verifyCaptureInterval() { return isProducingMicrophoneSamples() ? 10_s : 2_s; }
+
+    CreationCallback m_creationCallback;
+    GetSampleRateCallback m_getSampleRateCallback;
+    std::unique_ptr<InternalUnit> m_ioUnit;
+
+    // Only read/modified from the IO thread.
+    Vector<Ref<AudioSampleDataSource>> m_activeSources;
+
+    CAAudioStreamDescription m_microphoneProcFormat;
+    RefPtr<AudioSampleBufferList> m_microphoneSampleBuffer;
+    uint64_t m_latestMicTimeStamp { 0 };
+
+    CAAudioStreamDescription m_speakerProcFormat;
+
+    double m_DTSConversionRatio { 0 };
+
+    bool m_ioUnitInitialized { false };
+    bool m_ioUnitStarted { false };
+
+    mutable std::unique_ptr<RealtimeMediaSourceCapabilities> m_capabilities;
+    mutable std::optional<RealtimeMediaSourceSettings> m_currentSettings;
+
+#if !LOG_DISABLED
+    void checkTimestamps(const AudioTimeStamp&, uint64_t, double);
+
+    String m_ioUnitName;
+#endif
+
+    CapabilityValueOrRange m_sampleRateCapabilities;
+
+    uint64_t m_microphoneProcsCalled { 0 };
+    uint64_t m_microphoneProcsCalledLastTime { 0 };
+    Timer m_verifyCapturingTimer;
+
+    bool m_isReconfiguring { false };
+    mutable Lock m_speakerSamplesProducerLock;
+    CoreAudioSpeakerSamplesProducer* m_speakerSamplesProducer WTF_GUARDED_BY_LOCK(m_speakerSamplesProducerLock) { nullptr };
+};
+
+} // namespace WebCore
+
+#endif // ENABLE(MEDIA_STREAM)

Modified: trunk/Source/WebCore/platform/mediastream/mac/MockAudioSharedUnit.h (294101 => 294102)


--- trunk/Source/WebCore/platform/mediastream/mac/MockAudioSharedUnit.h	2022-05-12 13:49:11 UTC (rev 294101)
+++ trunk/Source/WebCore/platform/mediastream/mac/MockAudioSharedUnit.h	2022-05-12 13:56:19 UTC (rev 294102)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016-2019 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2022 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -27,73 +27,16 @@
 
 #if ENABLE(MEDIA_STREAM)
 
-#include "BaseAudioSharedUnit.h"
-#include <CoreAudio/CoreAudioTypes.h>
-#include <wtf/RunLoop.h>
-#include <wtf/Vector.h>
-#include <wtf/WorkQueue.h>
+#include "CoreAudioSharedUnit.h"
 
-OBJC_CLASS AVAudioPCMBuffer;
-typedef struct OpaqueCMClock* CMClockRef;
-typedef const struct opaqueCMFormatDescription* CMFormatDescriptionRef;
-
 namespace WebCore {
 
-class WebAudioBufferList;
-class WebAudioSourceProviderCocoa;
+namespace MockAudioSharedUnit {
 
-class MockAudioSharedUnit final : public BaseAudioSharedUnit {
-public:
-    WEBCORE_EXPORT static MockAudioSharedUnit& singleton();
-    MockAudioSharedUnit();
+CoreAudioSharedUnit& singleton();
 
-    void setDeviceID(const String& deviceID) { setCaptureDevice(String { deviceID }, 0); }
+}
 
-private:
-    bool hasAudioUnit() const final;
-    void captureDeviceChanged() final;
-    OSStatus reconfigureAudioUnit() final;
-    void resetSampleRate() final;
-
-    void cleanupAudioUnit() final;
-    OSStatus startInternal() final;
-    void stopInternal() final;
-    bool isProducingData() const final;
-
-    void delaySamples(Seconds) final;
-
-    void start();
-    CapabilityValueOrRange sampleRateCapacities() const final { return CapabilityValueOrRange(44100, 48000); }
-
-    void tick();
-
-    void render(MonotonicTime);
-    void emitSampleBuffers(uint32_t frameCount);
-    void reconfigure();
-
-    static Seconds renderInterval() { return 20_ms; }
-
-    std::unique_ptr<WebAudioBufferList> m_audioBufferList;
-
-    uint32_t m_maximiumFrameCount;
-    uint64_t m_samplesEmitted { 0 };
-    uint64_t m_samplesRendered { 0 };
-
-    RetainPtr<CMFormatDescriptionRef> m_formatDescription;
-    AudioStreamBasicDescription m_streamFormat;
-
-    Vector<float> m_bipBopBuffer;
-    bool m_hasAudioUnit { false };
-    bool m_isProducingData { false };
-
-    RunLoop::Timer<MockAudioSharedUnit> m_timer;
-    MonotonicTime m_lastRenderTime { MonotonicTime::nan() };
-    MonotonicTime m_delayUntil;
-
-    Ref<WorkQueue> m_workQueue;
-    unsigned m_channelCount { 2 };
-};
-
 } // namespace WebCore
 
 #endif // ENABLE(MEDIA_STREAM)

Modified: trunk/Source/WebCore/platform/mediastream/mac/MockAudioSharedUnit.mm (294101 => 294102)


--- trunk/Source/WebCore/platform/mediastream/mac/MockAudioSharedUnit.mm	2022-05-12 13:49:11 UTC (rev 294101)
+++ trunk/Source/WebCore/platform/mediastream/mac/MockAudioSharedUnit.mm	2022-05-12 13:56:19 UTC (rev 294102)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016-2019 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2022 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -43,6 +43,9 @@
 #import <AVFoundation/AVAudioBuffer.h>
 #import <AudioToolbox/AudioConverter.h>
 #import <CoreAudio/CoreAudioTypes.h>
+#include <wtf/RunLoop.h>
+#include <wtf/Vector.h>
+#include <wtf/WorkQueue.h>
 
 #import <pal/cf/AudioToolboxSoftLink.h>
 #import <pal/cf/CoreMediaSoftLink.h>
@@ -90,67 +93,111 @@
     if (!device)
         return { "No mock microphone device"_s };
 
-    MockAudioSharedUnit::singleton().setDeviceID(deviceID);
+    MockAudioSharedUnit::singleton().setCaptureDevice(String { deviceID }, 0);
     return CoreAudioCaptureSource::createForTesting(WTFMove(deviceID), WTFMove(name), WTFMove(hashSalt), constraints, MockAudioSharedUnit::singleton(), pageIdentifier);
 }
 
-MockAudioSharedUnit& MockAudioSharedUnit::singleton()
-{
-    static NeverDestroyed<MockAudioSharedUnit> singleton;
-    return singleton;
-}
+class MockAudioSharedInternalUnit :  public CoreAudioSharedUnit::InternalUnit {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    MockAudioSharedInternalUnit();
+    ~MockAudioSharedInternalUnit();
 
-MockAudioSharedUnit::MockAudioSharedUnit()
-    : m_timer(RunLoop::current(), this, &MockAudioSharedUnit::start)
-    , m_workQueue(WorkQueue::create("MockAudioSharedUnit Capture Queue", WorkQueue::QOS::UserInteractive))
-{
-}
+private:
+    OSStatus initialize() final;
+    OSStatus uninitialize() final { return 0; }
+    OSStatus start() final;
+    OSStatus stop() final;
+    OSStatus set(AudioUnitPropertyID, AudioUnitScope, AudioUnitElement, const void*, UInt32) final;
+    OSStatus get(AudioUnitPropertyID, AudioUnitScope, AudioUnitElement, void*, UInt32*) final;
+    OSStatus render(AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32, UInt32, AudioBufferList*) final;
+    OSStatus defaultInputDevice(uint32_t*) final;
+    OSStatus defaultOutputDevice(uint32_t*) final;
+    void delaySamples(Seconds) final;
 
-void MockAudioSharedUnit::resetSampleRate()
+    int sampleRate() const { return m_streamFormat.mSampleRate; }
+    void tick();
+
+    void generateSampleBuffers(MonotonicTime);
+    void emitSampleBuffers(uint32_t frameCount);
+    void reconfigure();
+
+    static Seconds renderInterval() { return 20_ms; }
+
+    std::unique_ptr<WebAudioBufferList> m_audioBufferList;
+
+    uint32_t m_maximiumFrameCount;
+    uint64_t m_samplesEmitted { 0 };
+    uint64_t m_samplesRendered { 0 };
+
+    RetainPtr<CMFormatDescriptionRef> m_formatDescription;
+    AudioStreamBasicDescription m_outputStreamFormat;
+    AudioStreamBasicDescription m_streamFormat;
+
+    Vector<float> m_bipBopBuffer;
+    bool m_hasAudioUnit { false };
+    bool m_isProducingData { false };
+    bool m_enableEchoCancellation { true };
+    RunLoop::Timer<MockAudioSharedInternalUnit> m_timer;
+    MonotonicTime m_lastRenderTime { MonotonicTime::nan() };
+    MonotonicTime m_delayUntil;
+
+    Ref<WorkQueue> m_workQueue;
+    unsigned m_channelCount { 2 };
+    
+    AURenderCallbackStruct m_microphoneCallback;
+    AURenderCallbackStruct m_speakerCallback;
+};
+
+CoreAudioSharedUnit& MockAudioSharedUnit::singleton()
 {
-    if (auto device = MockRealtimeMediaSourceCenter::mockDeviceWithPersistentID(persistentID()))
-        setSampleRate(std::get<MockMicrophoneProperties>(device->properties).defaultSampleRate);
+    static NeverDestroyed<CoreAudioSharedUnit> unit;
+    static std::once_flag onceFlag;
+    std::call_once(onceFlag, [&] () {
+        unit->setSampleRateRange(CapabilityValueOrRange(44100, 48000));
+        unit->setInternalUnitCreationCallback([] {
+            UniqueRef<CoreAudioSharedUnit::InternalUnit> result = makeUniqueRef<MockAudioSharedInternalUnit>();
+            return result;
+        });
+        unit->setInternalUnitGetSampleRateCallback([] { return 44100; });
+    });
+    return unit;
 }
 
-bool MockAudioSharedUnit::hasAudioUnit() const
+static AudioStreamBasicDescription createAudioFormat(Float64 sampleRate, UInt32 channelCount)
 {
-    return m_hasAudioUnit;
+    AudioStreamBasicDescription format;
+    const int bytesPerFloat = sizeof(Float32);
+    const int bitsPerByte = 8;
+    const bool isFloat = true;
+    const bool isBigEndian = false;
+    const bool isNonInterleaved = true;
+    FillOutASBDForLPCM(format, sampleRate, channelCount, bitsPerByte * bytesPerFloat, bitsPerByte * bytesPerFloat, isFloat, isBigEndian, isNonInterleaved);
+    return format;
 }
 
-void MockAudioSharedUnit::captureDeviceChanged()
+MockAudioSharedInternalUnit::MockAudioSharedInternalUnit()
+    : m_timer(RunLoop::current(), [this] { this->start(); })
+    , m_workQueue(WorkQueue::create("MockAudioSharedInternalUnit Capture Queue", WorkQueue::QOS::UserInteractive))
 {
-    reconfigureAudioUnit();
+    m_streamFormat = m_outputStreamFormat = createAudioFormat(44100, 2);
 }
 
-OSStatus MockAudioSharedUnit::reconfigureAudioUnit()
+MockAudioSharedInternalUnit::~MockAudioSharedInternalUnit()
 {
-    if (!hasAudioUnit())
-        return 0;
-
-    m_lastRenderTime = MonotonicTime::nan();
-    m_workQueue->dispatch([this] {
-        reconfigure();
-        callOnMainThread([this] {
-            startInternal();
-        });
-    });
-    return 0;
+    ASSERT(!m_isProducingData);
 }
 
-void MockAudioSharedUnit::cleanupAudioUnit()
+OSStatus MockAudioSharedInternalUnit::initialize()
 {
-    m_hasAudioUnit = false;
-    m_isProducingData = false;
-    m_lastRenderTime = MonotonicTime::nan();
-}
+    ASSERT(m_outputStreamFormat.mSampleRate == m_streamFormat.mSampleRate);
+    if (m_outputStreamFormat.mSampleRate != m_streamFormat.mSampleRate)
+        return -1;
 
-OSStatus MockAudioSharedUnit::startInternal()
-{
-    start();
     return 0;
 }
 
-void MockAudioSharedUnit::start()
+OSStatus MockAudioSharedInternalUnit::start()
 {
     if (!m_hasAudioUnit)
         m_hasAudioUnit = true;
@@ -158,30 +205,29 @@
     m_lastRenderTime = MonotonicTime::now();
     m_isProducingData = true;
     m_workQueue->dispatch([this, renderTime = m_lastRenderTime] {
-        render(renderTime);
+        generateSampleBuffers(renderTime);
     });
+    return 0;
 }
 
-void MockAudioSharedUnit::stopInternal()
+OSStatus MockAudioSharedInternalUnit::stop()
 {
     m_isProducingData = false;
-    if (!m_hasAudioUnit)
-        return;
-    m_lastRenderTime = MonotonicTime::nan();
-}
+    if (m_hasAudioUnit)
+        m_lastRenderTime = MonotonicTime::nan();
 
-bool MockAudioSharedUnit::isProducingData() const
-{
-    return m_isProducingData;
+    m_workQueue->dispatchSync([] { });
+
+    return 0;
 }
 
-void MockAudioSharedUnit::delaySamples(Seconds delta)
+void MockAudioSharedInternalUnit::delaySamples(Seconds delta)
 {
-    stopInternal();
+    stop();
     m_timer.startOneShot(delta);
 }
 
-void MockAudioSharedUnit::reconfigure()
+void MockAudioSharedInternalUnit::reconfigure()
 {
     ASSERT(!isMainThread());
 
@@ -191,14 +237,6 @@
     m_maximiumFrameCount = WTF::roundUpToPowerOfTwo(renderInterval().seconds() * rate * 2);
     ASSERT(m_maximiumFrameCount);
 
-    const int bytesPerFloat = sizeof(Float32);
-    const int bitsPerByte = 8;
-    const int channelCount = m_channelCount;
-    const bool isFloat = true;
-    const bool isBigEndian = false;
-    const bool isNonInterleaved = true;
-    FillOutASBDForLPCM(m_streamFormat, rate, channelCount, bitsPerByte * bytesPerFloat, bitsPerByte * bytesPerFloat, isFloat, isBigEndian, isNonInterleaved);
-
     m_audioBufferList = makeUnique<WebAudioBufferList>(m_streamFormat, m_maximiumFrameCount);
 
     CMFormatDescriptionRef formatDescription;
@@ -215,25 +253,38 @@
 
     addHum(BipBopVolume, BipFrequency, rate, 0, m_bipBopBuffer.data() + bipStart, bipBopSampleCount);
     addHum(BipBopVolume, BopFrequency, rate, 0, m_bipBopBuffer.data() + bopStart, bipBopSampleCount);
-    if (!enableEchoCancellation())
+    if (!m_enableEchoCancellation)
         addHum(NoiseVolume, NoiseFrequency, rate, 0, m_bipBopBuffer.data(), sampleCount);
 }
 
-void MockAudioSharedUnit::emitSampleBuffers(uint32_t frameCount)
+void MockAudioSharedInternalUnit::emitSampleBuffers(uint32_t frameCount)
 {
     ASSERT(!isMainThread());
     ASSERT(m_formatDescription);
 
     CMTime startTime = PAL::CMTimeMake(m_samplesEmitted, sampleRate());
+    auto sampleTime = PAL::CMTimeGetSeconds(startTime);
     m_samplesEmitted += frameCount;
 
-    audioSamplesAvailable(PAL::toMediaTime(startTime), *m_audioBufferList, CAAudioStreamDescription(m_streamFormat), frameCount);
+    auto* bufferList = m_audioBufferList->list();
+    AudioUnitRenderActionFlags ioActionFlags = 0;
+    
+    AudioTimeStamp timeStamp;
+    memset(&timeStamp, 0, sizeof(AudioTimeStamp));
+    timeStamp.mSampleTime = sampleTime;
+    timeStamp.mHostTime = static_cast<UInt64>(sampleTime);
+    if (m_microphoneCallback.inputProc)
+        m_microphoneCallback.inputProc(m_microphoneCallback.inputProcRefCon, &ioActionFlags, &timeStamp, 1, frameCount, bufferList);
+
+    ioActionFlags = 0;
+    if (m_speakerCallback.inputProc)
+        m_speakerCallback.inputProc(m_speakerCallback.inputProcRefCon, &ioActionFlags, &timeStamp, 1, frameCount, bufferList);
 }
 
-void MockAudioSharedUnit::render(MonotonicTime renderTime)
+void MockAudioSharedInternalUnit::generateSampleBuffers(MonotonicTime renderTime)
 {
     ASSERT(!isMainThread());
-    if (!isProducingData())
+    if (!m_isProducingData)
         return;
 
     auto delta = renderInterval();
@@ -245,7 +296,7 @@
         nextRenderDelay = 0_s;
     }
     m_workQueue->dispatchAfter(nextRenderDelay, [this, nextRenderTime] {
-        render(nextRenderTime);
+        generateSampleBuffers(nextRenderTime);
     });
 
     if (!m_audioBufferList || !m_bipBopBuffer.size())
@@ -270,6 +321,80 @@
     }
 }
 
+OSStatus MockAudioSharedInternalUnit::render(AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32, UInt32 frameCount, AudioBufferList* buffer)
+{
+    auto* sourceBuffer = m_audioBufferList->list();
+    if (buffer->mNumberBuffers > sourceBuffer->mNumberBuffers)
+        return kAudio_ParamError;
+
+    for (uint32_t i = 0; i < buffer->mNumberBuffers; i++) {
+        auto* source = static_cast<uint8_t*>(sourceBuffer->mBuffers[i].mData);
+        auto* destination = static_cast<uint8_t*>(buffer->mBuffers[i].mData);
+        memcpy(destination, source, frameCount * m_streamFormat.mBytesPerPacket);
+    }
+
+    return 0;
+}
+
+OSStatus MockAudioSharedInternalUnit::set(AudioUnitPropertyID property, AudioUnitScope scope, AudioUnitElement, const void* value, UInt32)
+{
+    if (property == kAudioUnitProperty_StreamFormat) {
+        auto& typedValue = *static_cast<const AudioStreamBasicDescription*>(value);
+        if (scope == kAudioUnitScope_Input)
+            m_streamFormat = typedValue;
+        else
+            m_outputStreamFormat = typedValue;
+        return 0;
+    }
+    if (property == kAUVoiceIOProperty_VoiceProcessingEnableAGC) {
+        m_enableEchoCancellation = !!*static_cast<const uint32_t*>(value);
+        return 0;
+    }
+    if (property == kAudioOutputUnitProperty_SetInputCallback) {
+        m_microphoneCallback = *static_cast<const AURenderCallbackStruct*>(value);
+        return 0;
+    }
+    if (property == kAudioUnitProperty_SetRenderCallback) {
+        m_speakerCallback = *static_cast<const AURenderCallbackStruct*>(value);
+        return 0;
+    }
+    if (property == kAudioOutputUnitProperty_CurrentDevice) {
+        ASSERT(!*static_cast<const uint32_t*>(value));
+        if (auto device = MockRealtimeMediaSourceCenter::mockDeviceWithPersistentID(MockAudioSharedUnit::singleton().persistentIDForTesting()))
+            m_streamFormat.mSampleRate = m_outputStreamFormat.mSampleRate = std::get<MockMicrophoneProperties>(device->properties).defaultSampleRate;
+        return 0;
+    }
+    
+    return 0;
+}
+
+OSStatus MockAudioSharedInternalUnit::get(AudioUnitPropertyID property, AudioUnitScope scope, AudioUnitElement, void* value, UInt32* valueSize)
+{
+    if (property == kAudioUnitProperty_StreamFormat) {
+        auto& typedValue = *static_cast<AudioStreamBasicDescription*>(value);
+        if (scope == kAudioUnitScope_Input)
+            typedValue = m_streamFormat;
+        else
+            typedValue = m_outputStreamFormat;
+        *valueSize = sizeof(AudioStreamBasicDescription);
+        return 0;
+    }
+
+    return 0;
+}
+
+OSStatus MockAudioSharedInternalUnit::defaultInputDevice(uint32_t* device)
+{
+    *device = 0;
+    return 0;
+}
+
+OSStatus MockAudioSharedInternalUnit::defaultOutputDevice(uint32_t* device)
+{
+    *device = 0;
+    return 0;
+}
+
 } // namespace WebCore
 
 #endif // ENABLE(MEDIA_STREAM)
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to