Diff
Modified: trunk/LayoutTests/ChangeLog (273460 => 273461)
--- trunk/LayoutTests/ChangeLog 2021-02-25 00:40:00 UTC (rev 273460)
+++ trunk/LayoutTests/ChangeLog 2021-02-25 00:57:53 UTC (rev 273461)
@@ -1,3 +1,12 @@
+2021-02-24 Jean-Yves Avenard <j...@apple.com>
+
+ [MSE] Media segment is incorrectly dropped when using negative timestampOffset or when source buffer appendWindow is set.
+ https://bugs.webkit.org/show_bug.cgi?id=222260
+
+ Reviewed by Eric Carlson.
+
+ * media/media-source/media-source-timestampoffset-trim.html:
+
2021-02-24 Chris Dumez <cdu...@apple.com>
Device motion / orientation events not working in third-party iframes despite Feature-Policy allowing it
Added: trunk/LayoutTests/media/media-source/media-source-timestampoffset-trim-expected.txt (0 => 273461)
--- trunk/LayoutTests/media/media-source/media-source-timestampoffset-trim-expected.txt (rev 0)
+++ trunk/LayoutTests/media/media-source/media-source-timestampoffset-trim-expected.txt 2021-02-25 00:57:53 UTC (rev 273461)
@@ -0,0 +1,40 @@
+This tests that audio samples outside the appendWindow's interval are properly ignored.
+
+RUN(video.src = ""
+EVENT(sourceopen)
+RUN(sourceBuffer = source.addSourceBuffer(loader.type()))
+RUN(sourceBuffer.appendBuffer(loader.initSegment()))
+EVENT(update)
+RUN(sourceBuffer.timestampOffset = -(framesToBeDroppedBeginning * framesInAACPacket) / sampleRate)
+Append a media segment negative offset.
+RUN(sourceBuffer.appendBuffer(loader.mediaSegment(0)))
+EVENT(update)
+EXPECTED (sourceBuffer.buffered.length == '1') OK
+EXPECTED (sourceBuffer.buffered.start(0) == '0') OK
+EXPECTED (sourceBuffer.buffered.end(0) == '0.896') OK
+RUN(sourceBuffer.remove(0, Infinity))
+EVENT(update)
+RUN(sourceBuffer.timestampOffset = 0)
+RUN(sourceBuffer.appendWindowStart = 0)
+RUN(sourceBuffer.appendWindowEnd = Infinity)
+RUN(sourceBuffer.appendWindowStart = (framesToBeDroppedBeginning * framesInAACPacket) / sampleRate)
+Append a media segment with appendWindowStart set.
+RUN(sourceBuffer.appendBuffer(loader.mediaSegment(0)))
+EVENT(update)
+EXPECTED (sourceBuffer.buffered.length == '1') OK
+EXPECTED (sourceBuffer.buffered.start(0) == '0.10666666666666667') OK
+EXPECTED (sourceBuffer.buffered.end(0) == '1.0026666666666666') OK
+RUN(sourceBuffer.remove(0, Infinity))
+EVENT(update)
+RUN(sourceBuffer.timestampOffset = 0)
+RUN(sourceBuffer.appendWindowStart = 0)
+RUN(sourceBuffer.appendWindowEnd = Infinity)
+RUN(sourceBuffer.appendWindowEnd = ((totalFramesInSegment - framesToBeDroppedEnd) * framesInAACPacket) / sampleRate)
+Append a media segment with appendWindowEnd set.
+RUN(sourceBuffer.appendBuffer(loader.mediaSegment(0)))
+EVENT(update)
+EXPECTED (sourceBuffer.buffered.length == '1') OK
+EXPECTED (sourceBuffer.buffered.start(0) == '0') OK
+EXPECTED (sourceBuffer.buffered.end(0) == '0.896') OK
+END OF TEST
+
Added: trunk/LayoutTests/media/media-source/media-source-timestampoffset-trim.html (0 => 273461)
--- trunk/LayoutTests/media/media-source/media-source-timestampoffset-trim.html (rev 0)
+++ trunk/LayoutTests/media/media-source/media-source-timestampoffset-trim.html 2021-02-25 00:57:53 UTC (rev 273461)
@@ -0,0 +1,89 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>media-source-timestampoffset-trim</title>
+ <script src=""
+ <script src=""
+ <script>
+ var loader;
+ var source;
+ var sourceBuffer;
+
+ const framesInAACPacket = 1024;
+ const sampleRate = 48000;
+ const totalFramesInSegment = 47;
+ const framesToBeDroppedBeginning = 5;
+ const framesToBeDroppedEnd = 5;
+
+ function resetSourceBuffer() {
+ run('sourceBuffer.timestampOffset = 0');
+ run('sourceBuffer.appendWindowStart = 0');
+ run('sourceBuffer.appendWindowEnd = Infinity');
+ }
+
+ function runTest() {
+ findMediaElement();
+
+ loader = new MediaSourceLoader('content/test-48khz-manifest.json');
+ loader._onerror_ = () => {
+ failTest('Media data loading failed');
+ };
+ loader._onload_ = async () => {
+ waitForEventAndFail('error');
+
+ source = new MediaSource();
+ run('video.src = ""
+ await waitFor(source, 'sourceopen');
+
+ run('sourceBuffer = source.addSourceBuffer(loader.type())');
+ run('sourceBuffer.appendBuffer(loader.initSegment())');
+ await waitFor(sourceBuffer, 'update');
+
+ // We want the first framesToBeDroppedBeginning frames to be dropped, and only those.
+ run('sourceBuffer.timestampOffset = -(framesToBeDroppedBeginning * framesInAACPacket) / sampleRate');
+ consoleWrite('Append a media segment negative offset.')
+ run('sourceBuffer.appendBuffer(loader.mediaSegment(0))');
+ await waitFor(sourceBuffer, 'update');
+
+ testExpected('sourceBuffer.buffered.length', 1);
+ testExpected('sourceBuffer.buffered.start(0)', 0);
+ testExpected('sourceBuffer.buffered.end(0)' , ((totalFramesInSegment - framesToBeDroppedBeginning) * framesInAACPacket) / sampleRate);
+ run('sourceBuffer.remove(0, Infinity)');
+ await waitFor(sourceBuffer, 'update');
+
+ resetSourceBuffer();
+ // We want the first framesToBeDroppedBeginning frames to be dropped, and only those.
+ run('sourceBuffer.appendWindowStart = (framesToBeDroppedBeginning * framesInAACPacket) / sampleRate');
+ consoleWrite('Append a media segment with appendWindowStart set.')
+ run('sourceBuffer.appendBuffer(loader.mediaSegment(0))');
+ await waitFor(sourceBuffer, 'update');
+
+ testExpected('sourceBuffer.buffered.length', 1);
+ testExpected('sourceBuffer.buffered.start(0)', (framesToBeDroppedBeginning * framesInAACPacket) / sampleRate);
+ testExpected('sourceBuffer.buffered.end(0)' , (totalFramesInSegment * framesInAACPacket) / sampleRate);
+ run('sourceBuffer.remove(0, Infinity)');
+ await waitFor(sourceBuffer, 'update');
+
+ resetSourceBuffer();
+ // We want the last framesToBeDroppedEnd frames to be dropped, and only those.
+ run('sourceBuffer.appendWindowEnd = ((totalFramesInSegment - framesToBeDroppedEnd) * framesInAACPacket) / sampleRate');
+ consoleWrite('Append a media segment with appendWindowEnd set.')
+ run('sourceBuffer.appendBuffer(loader.mediaSegment(0))');
+ await waitFor(sourceBuffer, 'update');
+
+ testExpected('sourceBuffer.buffered.length', 1);
+ testExpected('sourceBuffer.buffered.start(0)', 0);
+ testExpected('sourceBuffer.buffered.end(0)' , ((totalFramesInSegment - framesToBeDroppedEnd) * framesInAACPacket) / sampleRate);
+
+ endTest();
+ };
+ }
+ </script>
+</head>
+<body _onload_="runTest()">
+ <div>
+ This tests that audio samples outside the appendWindow's interval are properly ignored.
+ </div>
+ <video controls></video>
+</body>
+</html>
Modified: trunk/Source/WebCore/ChangeLog (273460 => 273461)
--- trunk/Source/WebCore/ChangeLog 2021-02-25 00:40:00 UTC (rev 273460)
+++ trunk/Source/WebCore/ChangeLog 2021-02-25 00:57:53 UTC (rev 273461)
@@ -1,3 +1,33 @@
+2021-02-24 Jean-Yves Avenard <j...@apple.com>
+
+ [MSE] Media segment is incorrectly dropped when using negative timestampOffset or when source buffer appendWindow is set.
+ https://bugs.webkit.org/show_bug.cgi?id=222260
+
+ Reviewed by Eric Carlson.
+
+ Test: media/media-source/media-source-timestampoffset-trim.html
+
+ CoreMedia packs multiple audio frames together into a single CMSampleBuffer,
+ this allows for faster processing and easier insertion into the track buffer tree.
+ However, per mediasoure spec [1], a frame is to be dropped according to
+ its start time and duration. So if only the beginning of the MediaSample
+ was to be dropped, we would have incorrectly dropped the lot.
+ We now split the MediaSample if it is going to be dropped to ensure that
+ all usable content is inserted into the track buffer.
+ Audio splicing isn't done yet, but this gets us closer to it.
+
+ [1] https://w3c.github.io/media-source/#sourcebuffer-coded-frame-processing
+
+ * platform/graphics/SourceBufferPrivate.cpp:
+ (WebCore::SourceBufferPrivate::didReceiveSample):
+ * platform/graphics/SourceBufferPrivate.h:
+ * platform/graphics/avfoundation/objc/SourceBufferPrivateAVFObjC.mm:
+ (WebCore::SourceBufferPrivateAVFObjC::didParseInitializationData):
+ (WebCore::SourceBufferPrivateAVFObjC::didProvideMediaDataForTrackId):
+ * platform/graphics/gstreamer/mse/SourceBufferPrivateGStreamer.cpp:
+ (WebCore::SourceBufferPrivateGStreamer::didReceiveSample):
+ * platform/graphics/gstreamer/mse/SourceBufferPrivateGStreamer.h:
+
2021-02-24 Chris Dumez <cdu...@apple.com>
Unreviewed, fix build with the latest iOS SDK.
Modified: trunk/Source/WebCore/platform/graphics/SourceBufferPrivate.cpp (273460 => 273461)
--- trunk/Source/WebCore/platform/graphics/SourceBufferPrivate.cpp 2021-02-25 00:40:00 UTC (rev 273460)
+++ trunk/Source/WebCore/platform/graphics/SourceBufferPrivate.cpp 2021-02-25 00:57:53 UTC (rev 273461)
@@ -816,7 +816,7 @@
return true;
}
-void SourceBufferPrivate::didReceiveSample(MediaSample& sample)
+void SourceBufferPrivate::didReceiveSample(Ref<MediaSample>&& originalSample)
{
if (!m_isAttached)
return;
@@ -842,6 +842,9 @@
// are run:
// 1. For each coded frame in the media segment run the following steps:
// 1.1. Loop Top
+
+ Ref<MediaSample> sample = WTFMove(originalSample);
+
do {
MediaTime presentationTimestamp;
MediaTime decodeTimestamp;
@@ -850,7 +853,7 @@
// sample's duration for timestamp generation.
// 1.2 Let frame duration be a double precision floating point representation of the coded frame's
// duration in seconds.
- MediaTime frameDuration = sample.duration();
+ MediaTime frameDuration = sample->duration();
if (m_shouldGenerateTimestamps) {
// ↳ If generate timestamps flag equals true:
@@ -865,11 +868,11 @@
// ↳ Otherwise:
// 1. Let presentation timestamp be a double precision floating point representation of
// the coded frame's presentation timestamp in seconds.
- presentationTimestamp = sample.presentationTime();
+ presentationTimestamp = sample->presentationTime();
// 2. Let decode timestamp be a double precision floating point representation of the coded frame's
// decode timestamp in seconds.
- decodeTimestamp = sample.decodeTime();
+ decodeTimestamp = sample->decodeTime();
}
// 1.3 If mode equals "sequence" and group start timestamp is set, then run the following steps:
@@ -895,7 +898,7 @@
// NOTE: this is out-of-order, but we need TrackBuffer to be able to cache the results of timestamp offset rounding
// 1.5 Let track buffer equal the track buffer that the coded frame will be added to.
- AtomString trackID = sample.trackID();
+ AtomString trackID = sample->trackID();
auto it = m_trackBufferMap.find(trackID);
if (it == m_trackBufferMap.end()) {
// The client managed to append a sample with a trackID not present in the initialization
@@ -975,13 +978,13 @@
if (m_appendMode == SourceBufferAppendMode::Sequence) {
// Use the generated timestamps instead of the sample's timestamps.
- sample.setTimestamps(presentationTimestamp, decodeTimestamp);
+ sample->setTimestamps(presentationTimestamp, decodeTimestamp);
} else if (trackBuffer.roundedTimestampOffset) {
// Reflect the timestamp offset into the sample.
- sample.offsetTimestampsBy(trackBuffer.roundedTimestampOffset);
+ sample->offsetTimestampsBy(trackBuffer.roundedTimestampOffset);
}
- DEBUG_LOG(LOGIDENTIFIER, sample);
+ DEBUG_LOG(LOGIDENTIFIER, sample.get());
// 1.7 Let frame end timestamp equal the sum of presentation timestamp and frame duration.
MediaTime frameEndTimestamp = presentationTimestamp + frameDuration;
@@ -993,6 +996,35 @@
// point flag to true, drop the coded frame, and jump to the top of the loop to start processing
// the next coded frame.
if (presentationTimestamp < m_appendWindowStart || frameEndTimestamp > m_appendWindowEnd) {
+ // 1.8 Note.
+ // Some implementations MAY choose to collect some of these coded frames with presentation
+ // timestamp less than appendWindowStart and use them to generate a splice at the first coded
+ // frame that has a presentation timestamp greater than or equal to appendWindowStart even if
+ // that frame is not a random access point. Supporting this requires multiple decoders or
+ // faster than real-time decoding so for now this behavior will not be a normative
+ // requirement.
+ // 1.9 Note.
+ // Some implementations MAY choose to collect coded frames with presentation timestamp less
+ // than appendWindowEnd and frame end timestamp greater than appendWindowEnd and use them to
+ // generate a splice across the portion of the collected coded frames within the append
+ // window at time of collection, and the beginning portion of later processed frames which
+ // only partially overlap the end of the collected coded frames. Supporting this requires
+ // multiple decoders or faster than real-time decoding so for now this behavior will not be a
+ // normative requirement. In conjunction with collecting coded frames that span
+ // appendWindowStart, implementations MAY thus support gapless audio splicing.
+ // Audio MediaSamples are typically made of packed audio samples. Trim sample to make it fit within the appendWindow.
+ if (sample->isDivisable()) {
+ std::pair<RefPtr<MediaSample>, RefPtr<MediaSample>> replacementSamples = sample->divide(m_appendWindowStart);
+ if (replacementSamples.second) {
+ replacementSamples = replacementSamples.second->divide(m_appendWindowEnd);
+ if (replacementSamples.first) {
+ sample = replacementSamples.first.releaseNonNull();
+ if (m_appendMode != SourceBufferAppendMode::Sequence && trackBuffer.roundedTimestampOffset)
+ sample->offsetTimestampsBy(-trackBuffer.roundedTimestampOffset);
+ continue;
+ }
+ }
+ }
trackBuffer.needRandomAccessFlag = true;
m_client->sourceBufferPrivateDidDropSample();
return;
@@ -1013,7 +1045,7 @@
if (trackBuffer.needRandomAccessFlag) {
// 1.11.1 If the coded frame is not a random access point, then drop the coded frame and jump
// to the top of the loop to start processing the next coded frame.
- if (!sample.isSync()) {
+ if (!sample->isSync()) {
m_client->sourceBufferPrivateDidDropSample();
return;
}
@@ -1081,10 +1113,10 @@
// next I-frame. See <https://github.com/w3c/media-source/issues/187> for a discussion of what
// the how the MSE specification should handlie this secnario.
do {
- if (!sample.isSync())
+ if (!sample->isSync())
break;
- DecodeOrderSampleMap::KeyType decodeKey(sample.decodeTime(), sample.presentationTime());
+ DecodeOrderSampleMap::KeyType decodeKey(sample->decodeTime(), sample->presentationTime());
auto nextSampleInDecodeOrder = trackBuffer.samples.decodeOrder().findSampleAfterDecodeKey(decodeKey);
if (nextSampleInDecodeOrder == trackBuffer.samples.decodeOrder().end())
break;
@@ -1151,7 +1183,7 @@
// NOTE: in the case of b-frames, the previous step may leave in place samples whose presentation
// timestamp < presentationTime, but whose decode timestamp >= decodeTime. These will eventually cause
// a decode error if left in place, so remove these samples as well.
- DecodeOrderSampleMap::KeyType decodeKey(sample.decodeTime(), sample.presentationTime());
+ DecodeOrderSampleMap::KeyType decodeKey(sample->decodeTime(), sample->presentationTime());
auto samplesWithHigherDecodeTimes = trackBuffer.samples.decodeOrder().findSamplesBetweenDecodeKeys(decodeKey, erasedSamples.decodeOrder().begin()->first);
if (samplesWithHigherDecodeTimes.first != samplesWithHigherDecodeTimes.second)
dependentSamples.insert(samplesWithHigherDecodeTimes.first, samplesWithHigherDecodeTimes.second);
@@ -1197,11 +1229,11 @@
// Note that adding a frame to the decode queue is no guarantee that it will be actually enqueued at that point.
// If the frame is after the discontinuity boundary, the enqueueing algorithm will hold it there until samples
// with earlier timestamps are enqueued. The decode queue is not FIFO, but rather an ordered map.
- DecodeOrderSampleMap::KeyType decodeKey(sample.decodeTime(), sample.presentationTime());
+ DecodeOrderSampleMap::KeyType decodeKey(sample->decodeTime(), sample->presentationTime());
if (trackBuffer.lastEnqueuedDecodeKey.first.isInvalid() || decodeKey > trackBuffer.lastEnqueuedDecodeKey) {
- trackBuffer.decodeQueue.insert(DecodeOrderSampleMap::MapType::value_type(decodeKey, &sample));
+ trackBuffer.decodeQueue.insert(DecodeOrderSampleMap::MapType::value_type(decodeKey, &sample.get()));
- if (trackBuffer.minimumEnqueuedPresentationTime.isValid() && sample.presentationTime() < trackBuffer.minimumEnqueuedPresentationTime)
+ if (trackBuffer.minimumEnqueuedPresentationTime.isValid() && sample->presentationTime() < trackBuffer.minimumEnqueuedPresentationTime)
trackBuffer.needsMinimumUpcomingPresentationTimeUpdating = true;
}
Modified: trunk/Source/WebCore/platform/graphics/SourceBufferPrivate.h (273460 => 273461)
--- trunk/Source/WebCore/platform/graphics/SourceBufferPrivate.h 2021-02-25 00:40:00 UTC (rev 273460)
+++ trunk/Source/WebCore/platform/graphics/SourceBufferPrivate.h 2021-02-25 00:57:53 UTC (rev 273461)
@@ -161,7 +161,7 @@
void appendCompleted(bool parsingSucceeded, bool isEnded);
void reenqueSamples(const AtomString& trackID);
WEBCORE_EXPORT void didReceiveInitializationSegment(SourceBufferPrivateClient::InitializationSegment&&, CompletionHandler<void()>&&);
- WEBCORE_EXPORT void didReceiveSample(MediaSample&);
+ WEBCORE_EXPORT void didReceiveSample(Ref<MediaSample>&&);
void provideMediaData(const AtomString& trackID);
uint64_t totalTrackBufferSizeInBytes() const;
Modified: trunk/Source/WebCore/platform/graphics/avfoundation/objc/SourceBufferPrivateAVFObjC.mm (273460 => 273461)
--- trunk/Source/WebCore/platform/graphics/avfoundation/objc/SourceBufferPrivateAVFObjC.mm 2021-02-25 00:40:00 UTC (rev 273460)
+++ trunk/Source/WebCore/platform/graphics/avfoundation/objc/SourceBufferPrivateAVFObjC.mm 2021-02-25 00:57:53 UTC (rev 273461)
@@ -409,7 +409,7 @@
auto& mediaSample = trackIdMediaSamplePair.second;
if (trackId == m_enabledVideoTrackID || m_audioRenderers.contains(trackId)) {
DEBUG_LOG(LOGIDENTIFIER, mediaSample.get());
- didReceiveSample(mediaSample);
+ didReceiveSample(WTFMove(mediaSample));
}
}
@@ -450,7 +450,7 @@
}
DEBUG_LOG(LOGIDENTIFIER, mediaSample.get());
- didReceiveSample(mediaSample);
+ didReceiveSample(WTFMove(mediaSample));
}
void SourceBufferPrivateAVFObjC::willProvideContentKeyRequestInitializationDataForTrackID(uint64_t trackID)
Modified: trunk/Source/WebCore/platform/graphics/gstreamer/mse/SourceBufferPrivateGStreamer.cpp (273460 => 273461)
--- trunk/Source/WebCore/platform/graphics/gstreamer/mse/SourceBufferPrivateGStreamer.cpp 2021-02-25 00:40:00 UTC (rev 273460)
+++ trunk/Source/WebCore/platform/graphics/gstreamer/mse/SourceBufferPrivateGStreamer.cpp 2021-02-25 00:57:53 UTC (rev 273461)
@@ -186,9 +186,9 @@
SourceBufferPrivate::didReceiveInitializationSegment(WTFMove(initializationSegment), WTFMove(completionHandler));
}
-void SourceBufferPrivateGStreamer::didReceiveSample(MediaSample& sample)
+void SourceBufferPrivateGStreamer::didReceiveSample(Ref<MediaSample>&& sample)
{
- SourceBufferPrivate::didReceiveSample(sample);
+ SourceBufferPrivate::didReceiveSample(WTFMove(sample));
}
void SourceBufferPrivateGStreamer::didReceiveAllPendingSamples()
Modified: trunk/Source/WebCore/platform/graphics/gstreamer/mse/SourceBufferPrivateGStreamer.h (273460 => 273461)
--- trunk/Source/WebCore/platform/graphics/gstreamer/mse/SourceBufferPrivateGStreamer.h 2021-02-25 00:40:00 UTC (rev 273460)
+++ trunk/Source/WebCore/platform/graphics/gstreamer/mse/SourceBufferPrivateGStreamer.h 2021-02-25 00:57:53 UTC (rev 273461)
@@ -73,7 +73,7 @@
void notifyReadyForMoreSamples();
void didReceiveInitializationSegment(SourceBufferPrivateClient::InitializationSegment&&, CompletionHandler<void()>&&);
- void didReceiveSample(MediaSample&);
+ void didReceiveSample(Ref<MediaSample>&&);
void didReceiveAllPendingSamples();
void appendParsingFailed();