Modified: trunk/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp (268726 => 268727)
--- trunk/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp 2020-10-20 12:19:27 UTC (rev 268726)
+++ trunk/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp 2020-10-20 13:19:42 UTC (rev 268727)
@@ -37,6 +37,51 @@
namespace WebCore {
+GST_DEBUG_CATEGORY(webkit_audio_destination_debug);
+#define GST_CAT_DEFAULT webkit_audio_destination_debug
+
+static void initializeDebugCategory()
+{
+ static std::once_flag onceFlag;
+ std::call_once(onceFlag, [] {
+ GST_DEBUG_CATEGORY_INIT(webkit_audio_destination_debug, "webkitaudiodestination", 0, "WebKit WebAudio Destination");
+ });
+}
+
+static unsigned long maximumNumberOfOutputChannels()
+{
+ initializeDebugCategory();
+
+ static int count = 0;
+ static std::once_flag onceFlag;
+ std::call_once(onceFlag, [] {
+ auto monitor = adoptGRef(gst_device_monitor_new());
+ auto caps = adoptGRef(gst_caps_new_empty_simple("audio/x-raw"));
+ gst_device_monitor_add_filter(monitor.get(), "Audio/Sink", caps.get());
+ gst_device_monitor_start(monitor.get());
+ auto* devices = gst_device_monitor_get_devices(monitor.get());
+ while (devices) {
+ auto device = adoptGRef(GST_DEVICE_CAST(devices->data));
+ auto caps = adoptGRef(gst_device_get_caps(device.get()));
+ unsigned size = gst_caps_get_size(caps.get());
+ for (unsigned i = 0; i < size; i++) {
+ auto* structure = gst_caps_get_structure(caps.get(), i);
+ if (!g_str_equal(gst_structure_get_name(structure), "audio/x-raw"))
+ continue;
+ int value;
+ if (!gst_structure_get_int(structure, "channels", &value))
+ continue;
+ count = std::max(count, value);
+ }
+ devices = g_list_delete_link(devices, devices);
+ }
+ GST_DEBUG("maximumNumberOfOutputChannels: %d", count);
+ gst_device_monitor_stop(monitor.get());
+ });
+
+ return count;
+}
+
gboolean messageCallback(GstBus*, GstMessage* message, AudioDestinationGStreamer* destination)
{
return destination->handleMessage(message);
@@ -50,17 +95,14 @@
Ref<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate)
{
+ initializeDebugCategory();
// FIXME: make use of inputDeviceId as appropriate.
// FIXME: Add support for local/live audio input.
if (numberOfInputChannels)
- LOG(Media, "AudioDestination::create(%u, %u, %f) - unhandled input channels", numberOfInputChannels, numberOfOutputChannels, sampleRate);
+ WTFLogAlways("AudioDestination::create(%u, %u, %f) - unhandled input channels", numberOfInputChannels, numberOfOutputChannels, sampleRate);
- // FIXME: Add support for multi-channel (> stereo) output.
- if (numberOfOutputChannels != 2)
- LOG(Media, "AudioDestination::create(%u, %u, %f) - unhandled output channels", numberOfInputChannels, numberOfOutputChannels, sampleRate);
-
- return adoptRef(*new AudioDestinationGStreamer(callback, sampleRate));
+ return adoptRef(*new AudioDestinationGStreamer(callback, numberOfOutputChannels, sampleRate));
}
float AudioDestination::hardwareSampleRate()
@@ -70,19 +112,16 @@
unsigned long AudioDestination::maxChannelCount()
{
- // FIXME: query the default audio hardware device to return the actual number
- // of channels of the device. Also see corresponding FIXME in create().
- return 0;
+ return maximumNumberOfOutputChannels();
}
-AudioDestinationGStreamer::AudioDestinationGStreamer(AudioIOCallback& callback, float sampleRate)
+AudioDestinationGStreamer::AudioDestinationGStreamer(AudioIOCallback& callback, unsigned long numberOfOutputChannels, float sampleRate)
: m_callback(callback)
- , m_renderBus(AudioBus::create(2, AudioUtilities::renderQuantumSize, false))
+ , m_renderBus(AudioBus::create(numberOfOutputChannels, AudioUtilities::renderQuantumSize, false))
, m_sampleRate(sampleRate)
- , m_isPlaying(false)
{
- m_pipeline = gst_pipeline_new("play");
- GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline)));
+ m_pipeline = gst_pipeline_new("audio-destination");
+ GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline.get())));
ASSERT(bus);
gst_bus_add_signal_watch_full(bus.get(), RunLoopSourcePriority::RunLoopDispatcher);
g_signal_connect(bus.get(), "message", G_CALLBACK(messageCallback), this);
@@ -93,7 +132,7 @@
GRefPtr<GstElement> audioSink = createPlatformAudioSink();
m_audioSinkAvailable = audioSink;
if (!audioSink) {
- LOG_ERROR("Failed to create GStreamer audio sink element");
+ GST_ERROR("Failed to create GStreamer audio sink element");
return;
}
@@ -107,7 +146,7 @@
// audiosink was loaded correctly.
GstStateChangeReturn stateChangeReturn = gst_element_set_state(audioSink.get(), GST_STATE_READY);
if (stateChangeReturn == GST_STATE_CHANGE_FAILURE) {
- LOG_ERROR("Failed to change autoaudiosink element state");
+ GST_ERROR("Failed to change autoaudiosink element state");
gst_element_set_state(audioSink.get(), GST_STATE_NULL);
m_audioSinkAvailable = false;
return;
@@ -116,7 +155,7 @@
GstElement* audioConvert = gst_element_factory_make("audioconvert", nullptr);
GstElement* audioResample = gst_element_factory_make("audioresample", nullptr);
- gst_bin_add_many(GST_BIN_CAST(m_pipeline), m_src.get(), audioConvert, audioResample, audioSink.get(), nullptr);
+ gst_bin_add_many(GST_BIN_CAST(m_pipeline.get()), m_src.get(), audioConvert, audioResample, audioSink.get(), nullptr);
// Link src pads from webkitAudioSrc to audioConvert ! audioResample ! autoaudiosink.
gst_element_link_pads_full(m_src.get(), "src", audioConvert, "sink", GST_PAD_LINK_CHECK_NOTHING);
@@ -126,13 +165,12 @@
AudioDestinationGStreamer::~AudioDestinationGStreamer()
{
- GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline)));
+ GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline.get())));
ASSERT(bus);
g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this);
gst_bus_remove_signal_watch(bus.get());
- gst_element_set_state(m_pipeline, GST_STATE_NULL);
- gst_object_unref(m_pipeline);
+ gst_element_set_state(m_pipeline.get(), GST_STATE_NULL);
}
unsigned AudioDestinationGStreamer::framesPerBuffer() const
@@ -153,9 +191,26 @@
case GST_MESSAGE_ERROR:
gst_message_parse_error(message, &error.outPtr(), &debug.outPtr());
g_warning("Error: %d, %s. Debug output: %s", error->code, error->message, debug.get());
- gst_element_set_state(m_pipeline, GST_STATE_NULL);
+ gst_element_set_state(m_pipeline.get(), GST_STATE_NULL);
m_isPlaying = false;
break;
+ case GST_MESSAGE_STATE_CHANGED:
+ if (GST_MESSAGE_SRC(message) == GST_OBJECT(m_pipeline.get())) {
+ GstState oldState, newState, pending;
+ gst_message_parse_state_changed(message, &oldState, &newState, &pending);
+
+ GST_INFO_OBJECT(m_pipeline.get(), "State changed (old: %s, new: %s, pending: %s)",
+ gst_element_state_get_name(oldState),
+ gst_element_state_get_name(newState),
+ gst_element_state_get_name(pending));
+
+ WTF::String dotFileName = makeString(GST_OBJECT_NAME(m_pipeline.get()), '_',
+ gst_element_state_get_name(oldState), '_',
+ gst_element_state_get_name(newState));
+
+ GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS(GST_BIN_CAST(m_pipeline.get()), GST_DEBUG_GRAPH_SHOW_ALL, dotFileName.utf8().data());
+ }
+ break;
default:
break;
}
@@ -171,12 +226,12 @@
if (dispatchToRenderThread)
webkitWebAudioSourceSetDispatchToRenderThreadCallback(WEBKIT_WEB_AUDIO_SRC(m_src.get()), WTFMove(dispatchToRenderThread));
- if (gst_element_set_state(m_pipeline, GST_STATE_PLAYING) == GST_STATE_CHANGE_FAILURE) {
+ GST_DEBUG("Starting");
+ if (gst_element_set_state(m_pipeline.get(), GST_STATE_PLAYING) == GST_STATE_CHANGE_FAILURE) {
g_warning("Error: Failed to set pipeline to playing");
m_isPlaying = false;
return;
}
-
m_isPlaying = true;
}
@@ -186,7 +241,8 @@
if (!m_audioSinkAvailable)
return;
- gst_element_set_state(m_pipeline, GST_STATE_PAUSED);
+ GST_DEBUG("Stopping");
+ gst_element_set_state(m_pipeline.get(), GST_STATE_PAUSED);
m_isPlaying = false;
}
Modified: trunk/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.cpp (268726 => 268727)
--- trunk/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.cpp 2020-10-20 12:19:27 UTC (rev 268726)
+++ trunk/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.cpp 2020-10-20 13:19:42 UTC (rev 268727)
@@ -27,7 +27,7 @@
#include <gst/audio/audio-info.h>
#include <gst/base/gstadapter.h>
-#if ENABLE(MEDIA_STREAM) && USE(LIBWEBRTC)
+#if ENABLE(MEDIA_STREAM)
#include "GStreamerAudioData.h"
#include "GStreamerMediaStreamSource.h"
#endif
@@ -34,14 +34,18 @@
namespace WebCore {
-// For now the provider supports only stereo files at a fixed sample
-// bitrate.
-static const int gNumberOfChannels = 2;
+// For now the provider supports only files at a fixed sample bitrate.
static const float gSampleBitRate = 44100;
-static GstFlowReturn onAppsinkNewBufferCallback(GstAppSink* sink, gpointer userData)
+GST_DEBUG_CATEGORY(webkit_audio_provider_debug);
+#define GST_CAT_DEFAULT webkit_audio_provider_debug
+
+static void initializeDebugCategory()
{
- return static_cast<AudioSourceProviderGStreamer*>(userData)->handleAudioBuffer(sink);
+ static std::once_flag onceFlag;
+ std::call_once(onceFlag, [] {
+ GST_DEBUG_CATEGORY_INIT(webkit_audio_provider_debug, "webkitaudioprovider", 0, "WebKit WebAudio Provider");
+ });
}
static void onGStreamerDeinterleavePadAddedCallback(GstElement*, GstPad* pad, AudioSourceProviderGStreamer* provider)
@@ -59,27 +63,16 @@
provider->handleRemovedDeinterleavePad(pad);
}
-static GstPadProbeReturn onAppsinkFlushCallback(GstPad*, GstPadProbeInfo* info, gpointer userData)
-{
- if (GST_PAD_PROBE_INFO_TYPE(info) & (GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM | GST_PAD_PROBE_TYPE_EVENT_FLUSH)) {
- GstEvent* event = GST_PAD_PROBE_INFO_EVENT(info);
- if (GST_EVENT_TYPE(event) == GST_EVENT_FLUSH_STOP) {
- AudioSourceProviderGStreamer* provider = reinterpret_cast<AudioSourceProviderGStreamer*>(userData);
- provider->clearAdapters();
- }
- }
- return GST_PAD_PROBE_OK;
-}
-
static void copyGStreamerBuffersToAudioChannel(GstAdapter* adapter, AudioBus* bus , int channelNumber, size_t framesToProcess)
{
- if (!gst_adapter_available(adapter)) {
+ auto available = gst_adapter_available(adapter);
+ if (!available) {
bus->zero();
return;
}
size_t bytes = framesToProcess * sizeof(float);
- if (gst_adapter_available(adapter) >= bytes) {
+ if (available >= bytes) {
gst_adapter_copy(adapter, bus->channel(channelNumber)->mutableData(), 0, bytes);
gst_adapter_flush(adapter, bytes);
} else
@@ -88,35 +81,23 @@
AudioSourceProviderGStreamer::AudioSourceProviderGStreamer()
: m_notifier(MainThreadNotifier<MainThreadNotification>::create())
- , m_client(nullptr)
- , m_deinterleaveSourcePads(0)
- , m_deinterleavePadAddedHandlerId(0)
- , m_deinterleaveNoMorePadsHandlerId(0)
- , m_deinterleavePadRemovedHandlerId(0)
{
- m_frontLeftAdapter = gst_adapter_new();
- m_frontRightAdapter = gst_adapter_new();
+ initializeDebugCategory();
}
-#if ENABLE(MEDIA_STREAM) && USE(LIBWEBRTC)
+#if ENABLE(MEDIA_STREAM)
AudioSourceProviderGStreamer::AudioSourceProviderGStreamer(MediaStreamTrackPrivate& source)
: m_notifier(MainThreadNotifier<MainThreadNotification>::create())
- , m_client(nullptr)
- , m_deinterleaveSourcePads(0)
- , m_deinterleavePadAddedHandlerId(0)
- , m_deinterleaveNoMorePadsHandlerId(0)
- , m_deinterleavePadRemovedHandlerId(0)
{
- m_frontLeftAdapter = gst_adapter_new();
- m_frontRightAdapter = gst_adapter_new();
+ initializeDebugCategory();
auto pipelineName = makeString("WebAudioProvider_MediaStreamTrack_", source.id());
- m_pipeline = adoptGRef(GST_ELEMENT(g_object_ref_sink(gst_element_factory_make("pipeline", pipelineName.utf8().data()))));
+ m_pipeline = gst_element_factory_make("pipeline", pipelineName.utf8().data());
auto src = ""
webkitMediaStreamSrcAddTrack(WEBKIT_MEDIA_STREAM_SRC(src), &source, true);
- m_audioSinkBin = adoptGRef(GST_ELEMENT(g_object_ref_sink(gst_parse_bin_from_description("tee name=audioTee", true, nullptr))));
+ m_audioSinkBin = gst_parse_bin_from_description("tee name=audioTee", true, nullptr);
- gst_bin_add_many(GST_BIN(m_pipeline.get()), src, m_audioSinkBin.get(), nullptr);
+ gst_bin_add_many(GST_BIN_CAST(m_pipeline.get()), src, m_audioSinkBin.get(), nullptr);
gst_element_link(src, m_audioSinkBin.get());
connectSimpleBusMessageCallback(m_pipeline.get());
@@ -134,11 +115,10 @@
g_signal_handler_disconnect(deinterleave.get(), m_deinterleavePadRemovedHandlerId);
}
+#if ENABLE(MEDIA_STREAM)
if (m_pipeline)
gst_element_set_state(m_pipeline.get(), GST_STATE_NULL);
-
- g_object_unref(m_frontLeftAdapter);
- g_object_unref(m_frontRightAdapter);
+#endif
}
void AudioSourceProviderGStreamer::configureAudioBin(GstElement* audioBin, GstElement* audioSink)
@@ -174,48 +154,36 @@
void AudioSourceProviderGStreamer::provideInput(AudioBus* bus, size_t framesToProcess)
{
auto locker = holdLock(m_adapterMutex);
- copyGStreamerBuffersToAudioChannel(m_frontLeftAdapter, bus, 0, framesToProcess);
- copyGStreamerBuffersToAudioChannel(m_frontRightAdapter, bus, 1, framesToProcess);
+ for (auto& it : m_adapters)
+ copyGStreamerBuffersToAudioChannel(it.value.get(), bus, it.key - 1, framesToProcess);
}
-GstFlowReturn AudioSourceProviderGStreamer::handleAudioBuffer(GstAppSink* sink)
+GstFlowReturn AudioSourceProviderGStreamer::handleSample(GstAppSink* sink, bool isPreroll)
{
+ auto sample = adoptGRef(isPreroll ? gst_app_sink_try_pull_preroll(sink, 0) : gst_app_sink_try_pull_sample(sink, 0));
+ if (!sample)
+ return gst_app_sink_is_eos(sink) ? GST_FLOW_EOS : GST_FLOW_ERROR;
+
if (!m_client)
return GST_FLOW_OK;
- // Pull a buffer from appsink and store it the appropriate buffer
- // list for the audio channel it represents.
- GRefPtr<GstSample> sample = adoptGRef(gst_app_sink_pull_sample(sink));
- if (!sample)
- return gst_app_sink_is_eos(sink) ? GST_FLOW_EOS : GST_FLOW_ERROR;
-
GstBuffer* buffer = gst_sample_get_buffer(sample.get());
if (!buffer)
return GST_FLOW_ERROR;
- GstCaps* caps = gst_sample_get_caps(sample.get());
- if (!caps)
- return GST_FLOW_ERROR;
-
- GstAudioInfo info;
- gst_audio_info_from_caps(&info, caps);
-
- auto locker = holdLock(m_adapterMutex);
-
- // Check the first audio channel. The buffer is supposed to store
- // data of a single channel anyway.
- switch (GST_AUDIO_INFO_POSITION(&info, 0)) {
- case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT:
- case GST_AUDIO_CHANNEL_POSITION_MONO:
- gst_adapter_push(m_frontLeftAdapter, gst_buffer_ref(buffer));
- break;
- case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT:
- gst_adapter_push(m_frontRightAdapter, gst_buffer_ref(buffer));
- break;
- default:
- break;
+ {
+ auto locker = holdLock(m_adapterMutex);
+ GQuark quark = g_quark_from_static_string("channel-id");
+ int channelId = GPOINTER_TO_INT(g_object_get_qdata(G_OBJECT(sink), quark));
+ auto result = m_adapters.ensure(channelId, [&] {
+ return gst_adapter_new();
+ });
+ auto* adapter = result.iterator->value.get();
+ gst_adapter_push(adapter, gst_buffer_ref(buffer));
}
+ if (gst_app_sink_is_eos(sink))
+ return GST_FLOW_EOS;
return GST_FLOW_OK;
}
@@ -227,8 +195,10 @@
ASSERT(client);
m_client = client;
+#if ENABLE(MEDIA_STREAM)
if (m_pipeline)
gst_element_set_state(m_pipeline.get(), GST_STATE_PLAYING);
+#endif
// The volume element is used to mute audio playback towards the
// autoaudiosink. This is needed to avoid double playback of audio
@@ -253,18 +223,14 @@
m_deinterleaveNoMorePadsHandlerId = g_signal_connect(deInterleave, "no-more-pads", G_CALLBACK(onGStreamerDeinterleaveReadyCallback), this);
m_deinterleavePadRemovedHandlerId = g_signal_connect(deInterleave, "pad-removed", G_CALLBACK(onGStreamerDeinterleavePadRemovedCallback), this);
- GstCaps* caps = gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(gSampleBitRate),
- "channels", G_TYPE_INT, gNumberOfChannels,
- "format", G_TYPE_STRING, GST_AUDIO_NE(F32),
- "layout", G_TYPE_STRING, "interleaved", nullptr);
+ auto caps = adoptGRef(gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(gSampleBitRate),
+ "format", G_TYPE_STRING, GST_AUDIO_NE(F32), "layout", G_TYPE_STRING, "interleaved", nullptr));
+ g_object_set(capsFilter, "caps", caps.get(), nullptr);
- g_object_set(capsFilter, "caps", caps, nullptr);
- gst_caps_unref(caps);
+ gst_bin_add_many(GST_BIN_CAST(m_audioSinkBin.get()), audioQueue, audioConvert, audioResample, capsFilter, deInterleave, nullptr);
- gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), audioQueue, audioConvert, audioResample, capsFilter, deInterleave, nullptr);
+ auto audioTee = adoptGRef(gst_bin_get_by_name(GST_BIN(m_audioSinkBin.get()), "audioTee"));
- GRefPtr<GstElement> audioTee = adoptGRef(gst_bin_get_by_name(GST_BIN(m_audioSinkBin.get()), "audioTee"));
-
// Link a new src pad from tee to queue ! audioconvert !
// audioresample ! capsfilter ! deinterleave. Later
// on each deinterleaved planar audio channel will be routed to an
@@ -284,91 +250,78 @@
void AudioSourceProviderGStreamer::handleNewDeinterleavePad(GstPad* pad)
{
- m_deinterleaveSourcePads++;
+ GST_DEBUG("New pad %" GST_PTR_FORMAT, pad);
- if (m_deinterleaveSourcePads > 2) {
- g_warning("The AudioSourceProvider supports only mono and stereo audio. Silencing out this new channel.");
- GstElement* queue = gst_element_factory_make("queue", nullptr);
- GstElement* sink = gst_element_factory_make("fakesink", nullptr);
- g_object_set(sink, "async", FALSE, nullptr);
- gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), queue, sink, nullptr);
-
- GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue, "sink"));
- gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);
-
- GQuark quark = g_quark_from_static_string("peer");
- g_object_set_qdata(G_OBJECT(pad), quark, sinkPad.get());
- gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING);
- gst_element_sync_state_with_parent(queue);
- gst_element_sync_state_with_parent(sink);
- return;
- }
-
// A new pad for a planar channel was added in deinterleave. Plug
// in an appsink so we can pull the data from each
// channel. Pipeline looks like:
- // ... deinterleave ! queue ! appsink.
- GstElement* queue = gst_element_factory_make("queue", nullptr);
+ // ... deinterleave ! appsink.
GstElement* sink = gst_element_factory_make("appsink", nullptr);
- GstAppSinkCallbacks callbacks;
- callbacks.eos = nullptr;
- callbacks.new_preroll = nullptr;
- callbacks.new_sample = onAppsinkNewBufferCallback;
+ static GstAppSinkCallbacks callbacks = {
+ nullptr,
+ [](GstAppSink* sink, gpointer userData) -> GstFlowReturn {
+ return static_cast<AudioSourceProviderGStreamer*>(userData)->handleSample(sink, true);
+ },
+ [](GstAppSink* sink, gpointer userData) -> GstFlowReturn {
+ return static_cast<AudioSourceProviderGStreamer*>(userData)->handleSample(sink, false);
+ },
+ { nullptr }
+ };
gst_app_sink_set_callbacks(GST_APP_SINK(sink), &callbacks, this, nullptr);
-
g_object_set(sink, "async", FALSE, nullptr);
- GRefPtr<GstCaps> caps = adoptGRef(gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(gSampleBitRate),
- "channels", G_TYPE_INT, 1,
- "format", G_TYPE_STRING, GST_AUDIO_NE(F32),
- "layout", G_TYPE_STRING, "interleaved", nullptr));
-
+ auto caps = adoptGRef(gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(gSampleBitRate),
+ "channels", G_TYPE_INT, 1, "format", G_TYPE_STRING, GST_AUDIO_NE(F32), "layout", G_TYPE_STRING, "interleaved", nullptr));
gst_app_sink_set_caps(GST_APP_SINK(sink), caps.get());
- gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), queue, sink, nullptr);
+ gst_bin_add(GST_BIN_CAST(m_audioSinkBin.get()), sink);
- GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue, "sink"));
+ auto sinkPad = adoptGRef(gst_element_get_static_pad(sink, "sink"));
gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);
GQuark quark = g_quark_from_static_string("peer");
g_object_set_qdata(G_OBJECT(pad), quark, sinkPad.get());
- gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ m_deinterleaveSourcePads++;
+ GQuark channelIdQuark = g_quark_from_static_string("channel-id");
+ g_object_set_qdata(G_OBJECT(sink), channelIdQuark, GINT_TO_POINTER(m_deinterleaveSourcePads));
sinkPad = adoptGRef(gst_element_get_static_pad(sink, "sink"));
- gst_pad_add_probe(sinkPad.get(), GST_PAD_PROBE_TYPE_EVENT_FLUSH, onAppsinkFlushCallback, this, nullptr);
+ gst_pad_add_probe(sinkPad.get(), GST_PAD_PROBE_TYPE_EVENT_FLUSH, [](GstPad*, GstPadProbeInfo* info, gpointer userData) {
+ if (GST_PAD_PROBE_INFO_TYPE(info) & (GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM | GST_PAD_PROBE_TYPE_EVENT_FLUSH)) {
+ GstEvent* event = GST_PAD_PROBE_INFO_EVENT(info);
+ if (GST_EVENT_TYPE(event) == GST_EVENT_FLUSH_STOP) {
+ auto* provider = reinterpret_cast<AudioSourceProviderGStreamer*>(userData);
+ provider->clearAdapters();
+ }
+ }
+ return GST_PAD_PROBE_OK;
+ }, this, nullptr);
- gst_element_sync_state_with_parent(queue);
gst_element_sync_state_with_parent(sink);
}
void AudioSourceProviderGStreamer::handleRemovedDeinterleavePad(GstPad* pad)
{
+ GST_DEBUG("Pad %" GST_PTR_FORMAT " gone", pad);
m_deinterleaveSourcePads--;
- // Remove the queue ! appsink chain downstream of deinterleave.
GQuark quark = g_quark_from_static_string("peer");
GstPad* sinkPad = GST_PAD_CAST(g_object_get_qdata(G_OBJECT(pad), quark));
if (!sinkPad)
return;
- GRefPtr<GstElement> queue = adoptGRef(gst_pad_get_parent_element(sinkPad));
- GRefPtr<GstPad> queueSrcPad = adoptGRef(gst_element_get_static_pad(queue.get(), "src"));
- GRefPtr<GstPad> appsinkSinkPad = adoptGRef(gst_pad_get_peer(queueSrcPad.get()));
- GRefPtr<GstElement> sink = adoptGRef(gst_pad_get_parent_element(appsinkSinkPad.get()));
+ auto sink = adoptGRef(gst_pad_get_parent_element(sinkPad));
gst_element_set_state(sink.get(), GST_STATE_NULL);
- gst_element_set_state(queue.get(), GST_STATE_NULL);
- gst_element_unlink(queue.get(), sink.get());
- gst_bin_remove_many(GST_BIN(m_audioSinkBin.get()), queue.get(), sink.get(), nullptr);
+ gst_bin_remove(GST_BIN_CAST(m_audioSinkBin.get()), sink.get());
}
void AudioSourceProviderGStreamer::deinterleavePadsConfigured()
{
+ GST_DEBUG("Deinterleave configured, notifying client");
m_notifier->notify(MainThreadNotification::DeinterleavePadsConfigured, [this] {
ASSERT(m_client);
- ASSERT(m_deinterleaveSourcePads == gNumberOfChannels);
-
m_client->setFormat(m_deinterleaveSourcePads, gSampleBitRate);
});
}
@@ -376,8 +329,8 @@
void AudioSourceProviderGStreamer::clearAdapters()
{
auto locker = holdLock(m_adapterMutex);
- gst_adapter_clear(m_frontLeftAdapter);
- gst_adapter_clear(m_frontRightAdapter);
+ for (auto& adapter : m_adapters.values())
+ gst_adapter_clear(adapter.get());
}
} // WebCore