I've been playing with actual code today and have to say my
frustration is reducing. What I did is work toward complete
refactoring of the NetStream model, following design already
posted here.
To make rewrite simpler, I got rid of some features which
we'll want to have but for now are implemented in such
a non-general way that only disturb refactoring.
In particular here's what is gone:
1) Drop any use of threads
2) Drop support for non-FLV formats
3) Drop audio support
Don't be scared, they will likely all get back, just in a safer form.
Audio support is really a consequence of threads, in that SDL mixer
runs in its own thread, so needs proper thread-safety before reintroducing.
As for non-FLV the issue there is that we need the kind of interface
FLVParser expose, which we don't have for non-flv. I belive MediaParser
from libmedia should be it, just not ready yet.
So, now what I added:
1) A PlayHead class which takes care of automatically
advances when all consumers consumed current position
and proper switch between PAUSE and PLAY mode (so that
time cursor is always correct).
2) As-needed video decoding
And what I fixed:
1) NetStream.{getBytesLoaded/getBytesTotal} - the darker red fill on
old youtube player
2) NetStream.seek - 100% compatibility as far as I can see
Now I need your help about what to do with the current work.
For myself I'd commit it as I don't like having uncommitted code around
(scared to loose it somehow), but how would you take a commit making
the FFMPEG (remember: non-default choice) NetStream handling:
- audio-less
- FLV-only supporting
- keeping GUI unresponsive on net burst or on seek-forward
Please let me know, before everything blows up :)
Patch attached.
--strk;
--
() ASCII Ribbon Campaign
/\ Keep it simple!
Index: libbase/FLVParser.cpp
===================================================================
RCS file: /sources/gnash/gnash/libbase/FLVParser.cpp,v
retrieving revision 1.38
diff -u -r1.38 FLVParser.cpp
--- libbase/FLVParser.cpp 21 May 2008 08:51:21 -0000 1.38
+++ libbase/FLVParser.cpp 23 May 2008 13:46:10 -0000
@@ -115,28 +115,26 @@
}
-boost::uint32_t FLVParser::getBufferLength()
+boost::uint32_t
+FLVParser::getBufferLength()
{
- boost::mutex::scoped_lock lock(_mutex);
-
if (_video) {
size_t size = _videoFrames.size();
if (size > 1 && size > _nextVideoFrame) {
- return _videoFrames.back()->timestamp -
_videoFrames[_nextVideoFrame]->timestamp;
+ return _videoFrames.back()->timestamp; // -
_videoFrames[_nextVideoFrame]->timestamp;
}
}
if (_audio) {
size_t size = _audioFrames.size();
if (size > 1 && size > _nextAudioFrame) {
- return _audioFrames.back()->timestamp -
_audioFrames[_nextAudioFrame]->timestamp;
+ return _audioFrames.back()->timestamp; // -
_audioFrames[_nextAudioFrame]->timestamp;
}
}
return 0;
}
-boost::uint16_t FLVParser::videoFrameRate()
+boost::uint16_t
+FLVParser::videoFrameRate()
{
- boost::mutex::scoped_lock lock(_mutex);
-
// Make sure that there are parsed some frames
while(_videoFrames.size() < 2 && !_parsingComplete) {
parseNextTag();
@@ -150,10 +148,9 @@
}
-boost::uint32_t FLVParser::videoFrameDelay()
+boost::uint32_t
+FLVParser::videoFrameDelay()
{
- boost::mutex::scoped_lock lock(_mutex);
-
// If there are no video in this FLV return 0
if (!_video && _lastParsedPosition > 0) return 0;
@@ -168,10 +165,9 @@
return _videoFrames[_nextVideoFrame-1]->timestamp -
_videoFrames[_nextVideoFrame-2]->timestamp;
}
-boost::uint32_t FLVParser::audioFrameDelay()
+boost::uint32_t
+FLVParser::audioFrameDelay()
{
- boost::mutex::scoped_lock lock(_mutex);
-
// If there are no audio in this FLV return 0
if (!_audio && _lastParsedPosition > 0) return 0;
@@ -186,77 +182,11 @@
return _audioFrames[_nextAudioFrame-1]->timestamp -
_audioFrames[_nextAudioFrame-2]->timestamp;
}
-FLVFrame* FLVParser::nextMediaFrame()
-{
- boost::mutex::scoped_lock lock(_mutex);
-
- boost::uint32_t video_size = _videoFrames.size();
- boost::uint32_t audio_size = _audioFrames.size();
-
- if (audio_size <= _nextAudioFrame && video_size <= _nextVideoFrame)
- {
-
- // Parse a media frame if any left or if needed
- while(_videoFrames.size() <= _nextVideoFrame &&
_audioFrames.size() <= _nextAudioFrame && !_parsingComplete) {
- if (!parseNextTag()) break;
- }
- }
-
- // Find the next frame in the file
- bool audioReady = _audioFrames.size() > _nextAudioFrame;
- bool videoReady = _videoFrames.size() > _nextVideoFrame;
- bool useAudio = false;
-
- if (audioReady && videoReady) {
- useAudio = _audioFrames[_nextAudioFrame]->dataPosition <
_videoFrames[_nextVideoFrame]->dataPosition;
- } else if (!audioReady && videoReady) {
- useAudio = false;
- } else if (audioReady && !videoReady) {
- useAudio = true;
- } else {
- // If no frames are next we have reached EOF
- return NULL;
- }
-
- // Find the next frame in the file a return it
-
- if (useAudio) {
-
- FLVAudioFrameInfo* frameInfo = _audioFrames[_nextAudioFrame];
-
- std::auto_ptr<FLVFrame> frame = makeAudioFrame(_lt, *frameInfo);
- if ( ! frame.get() )
- {
- log_error("Could not make audio frame %d",
_nextAudioFrame);
- return 0;
- }
-
- _nextAudioFrame++;
- return frame.release(); // TODO: return by auto_ptr
-
- } else {
-
- FLVVideoFrameInfo* frameInfo = _videoFrames[_nextVideoFrame];
- std::auto_ptr<FLVFrame> frame = makeVideoFrame(_lt, *frameInfo);
- if ( ! frame.get() )
- {
- log_error("Could not make video frame %d",
_nextVideoFrame);
- return 0;
- }
-
- _nextVideoFrame++;
- return frame.release(); // TODO: return by auto_ptr
- }
-
-
-}
-
-FLVFrame* FLVParser::nextAudioFrame()
+FLVAudioFrameInfo*
+FLVParser::peekNextAudioFrameInfo()
{
- boost::mutex::scoped_lock lock(_mutex);
-
// If there are no audio in this FLV return NULL
- if (!_audio && _lastParsedPosition > 0) return NULL;
+ if (!_audio && _lastParsedPosition > 0) return 0;
// Make sure that there are parsed enough frames to return the need
frame
while(_audioFrames.size() <= _nextAudioFrame && !_parsingComplete) {
@@ -264,9 +194,20 @@
}
// If the needed frame can't be parsed (EOF reached) return NULL
- if (_audioFrames.size() <= _nextAudioFrame || _audioFrames.size() == 0)
return NULL;
+ if (_audioFrames.empty() || _audioFrames.size() <= _nextAudioFrame)
+ {
+ return 0;
+ }
+
+ return _audioFrames[_nextAudioFrame];
+}
+
+FLVFrame*
+FLVParser::nextAudioFrame()
+{
+ FLVAudioFrameInfo* frameInfo = peekNextAudioFrameInfo();
+ if ( ! frameInfo ) return 0;
- FLVAudioFrameInfo* frameInfo = _audioFrames[_nextAudioFrame];
std::auto_ptr<FLVFrame> frame = makeAudioFrame(_lt, *frameInfo);
if ( ! frame.get() )
{
@@ -279,15 +220,14 @@
}
-FLVFrame* FLVParser::nextVideoFrame()
+FLVVideoFrameInfo*
+ FLVParser::peekNextVideoFrameInfo()
{
- boost::mutex::scoped_lock lock(_mutex);
-
// If there are no video in this FLV return NULL
if (!_video && _lastParsedPosition > 0)
{
//gnash::log_debug("no video, or lastParserPosition > 0");
- return NULL;
+ return 0;
}
// Make sure that there are parsed enough frames to return the need
frame
@@ -297,14 +237,18 @@
}
// If the needed frame can't be parsed (EOF reached) return NULL
- if (_videoFrames.size() <= _nextVideoFrame || _videoFrames.size() == 0)
+ if (_videoFrames.empty() || _videoFrames.size() <= _nextVideoFrame)
{
//gnash::log_debug("The needed frame (%d) can't be parsed (EOF
reached)", _lastVideoFrame);
- return NULL;
+ return 0;
}
- // TODO: let a function do this
- FLVVideoFrameInfo* frameInfo = _videoFrames[_nextVideoFrame];
+ return _videoFrames[_nextVideoFrame];
+}
+
+FLVFrame* FLVParser::nextVideoFrame()
+{
+ FLVVideoFrameInfo* frameInfo = peekNextVideoFrameInfo();
std::auto_ptr<FLVFrame> frame = makeVideoFrame(_lt, *frameInfo);
if ( ! frame.get() )
{
@@ -483,8 +427,6 @@
FLVVideoInfo* FLVParser::getVideoInfo()
{
- boost::mutex::scoped_lock lock(_mutex);
-
// If there are no video in this FLV return NULL
if (!_video && _lastParsedPosition > 0) return NULL;
@@ -496,9 +438,6 @@
FLVAudioInfo* FLVParser::getAudioInfo()
{
-
- boost::mutex::scoped_lock lock(_mutex);
-
// If there are no audio in this FLV return NULL
if (!_audio && _lastParsedPosition > 0) return NULL;
@@ -511,10 +450,9 @@
return _audioInfo.get(); // may be null
}
-bool FLVParser::isTimeLoaded(boost::uint32_t time)
+bool
+FLVParser::isTimeLoaded(boost::uint32_t time)
{
- boost::mutex::scoped_lock lock(_mutex);
-
// Parse frames until the need time is found, or EOF
while (!_parsingComplete) {
if (!parseNextTag()) break;
@@ -536,10 +474,9 @@
}
-boost::uint32_t FLVParser::seek(boost::uint32_t time)
+boost::uint32_t
+FLVParser::seek(boost::uint32_t time)
{
- boost::mutex::scoped_lock lock(_mutex);
-
if (time == 0) {
if (_video) _nextVideoFrame = 0;
if (_audio) _nextAudioFrame = 0;
@@ -747,6 +684,18 @@
return (in[0] << 16) | (in[1] << 8) | in[2];
}
+boost::uint64_t
+FLVParser::getBytesLoaded() const
+{
+ return _lastParsedPosition;
+}
+
+boost::uint64_t
+FLVParser::getBytesTotal() const
+{
+ return _lt.get_size();
+}
+
} // end of gnash namespace
#undef PADDING_BYTES
Index: libbase/FLVParser.h
===================================================================
RCS file: /sources/gnash/gnash/libbase/FLVParser.h,v
retrieving revision 1.29
diff -u -r1.29 FLVParser.h
--- libbase/FLVParser.h 12 May 2008 08:33:15 -0000 1.29
+++ libbase/FLVParser.h 23 May 2008 13:46:10 -0000
@@ -26,7 +26,6 @@
#include "LoadThread.h"
#include "dsodefs.h"
#include <vector>
-#include <boost/thread/mutex.hpp>
#include <memory>
namespace gnash {
@@ -156,7 +155,7 @@
/// and fetching frames from there on, sequentially.
/// See seek(), nextVideoFrame(), nextAudioFrame() and nextMediaFrame().
///
-/// Input is received from a LoadThread object.
+/// Input is received from a tu_file object.
///
class DSOEXPORT FLVParser
{
@@ -210,33 +209,41 @@
/// Kills the parser...
~FLVParser();
- /// Return next media frame
+ /// \brief
+ /// Return the next audio frame info in the parsed buffer.
//
- /// Locks the _mutex
+ /// If no frame has been played before the first frame is returned.
+ /// If there is no more frames in the parsed buffer NULL is returned,
+ /// you can check with parsingCompleted() to know wheter this is due to
+ /// EOF reached.
///
- FLVFrame* nextMediaFrame();
+ FLVAudioFrameInfo* peekNextAudioFrameInfo();
/// \brief
- /// Returns the next audio frame in the parsed buffer.
+ /// Returns the next audio frame in the parsed buffer, advancing audio
cursor.
//
/// If no frame has been played before the first frame is returned.
/// If there is no more frames in the parsed buffer NULL is returned,
/// you can check with parsingCompleted() to know wheter this is due to
/// EOF reached.
///
- /// Locks the _mutex
- ///
FLVFrame* nextAudioFrame();
- /// \brief
- /// Returns the next video frame in the parsed buffer.
+ /// Returns the next video frame info in the parsed buffer.
//
/// If no frame has been played before the first frame is returned.
/// If there is no more frames in the parsed buffer NULL is returned.
/// you can check with parsingCompleted() to know wheter this is due to
/// EOF reached.
///
- /// Locks the _mutex
+ FLVVideoFrameInfo* peekNextVideoFrameInfo();
+
+ /// Returns the next video frame in the parsed buffer, advancing video
cursor.
+ //
+ /// If no frame has been played before the first frame is returned.
+ /// If there is no more frames in the parsed buffer NULL is returned.
+ /// you can check with parsingCompleted() to know wheter this is due to
+ /// EOF reached.
///
FLVFrame* nextVideoFrame();
@@ -249,8 +256,6 @@
/// Returns information about video in the stream.
//
- /// Locks the _mutex
- ///
/// The returned object is owned by the FLVParser object.
/// Can return NULL if video contains NO video frames.
/// Will block till either parsing finished or a video frame is found.
@@ -259,8 +264,6 @@
/// Returns a FLVAudioInfo class about the audiostream
//
- /// Locks the _mutex
- ///
FLVAudioInfo* getAudioInfo();
/// \brief
@@ -271,8 +274,6 @@
/// available in list of already the parsed frames, we
/// parse some more. This is used to check how much is buffered.
///
- /// Locks the _mutex
- ///
/// @param time
/// Timestamp, in milliseconds.
///
@@ -282,39 +283,42 @@
/// Seeks to the closest possible position the given position,
/// and returns the new position.
//
- /// Locks the _mutex
- ///
boost::uint32_t seek(boost::uint32_t);
/// Returns the framedelay from the last to the current
/// audioframe in milliseconds. This is used for framerate.
//
- /// Locks the _mutex
- ///
boost::uint32_t audioFrameDelay();
/// \brief
/// Returns the framedelay from the last to the current
/// videoframe in milliseconds.
//
- /// Locks the _mutex
- ///
boost::uint32_t videoFrameDelay();
/// Returns the framerate of the video
//
- /// Locks the _mutex
- ///
boost::uint16_t videoFrameRate();
/// Returns the "bufferlength", meaning the differens between the
/// current frames timestamp and the timestamp of the last parseable
/// frame. Returns the difference in milliseconds.
//
- /// Locks the _mutex
- ///
boost::uint32_t getBufferLength();
+ /// Parses next tag from the file
+ //
+ /// Returns true if something was parsed, false otherwise.
+ /// Sets _parsingComplete=true on end of file.
+ ///
+ bool parseNextTag();
+
+ /// Return number of bytes parsed so far
+ boost::uint64_t getBytesLoaded() const;
+
+ /// Return total number of bytes in input
+ boost::uint64_t getBytesTotal() const;
+
private:
/// seeks to the closest possible position the given position,
@@ -325,16 +329,6 @@
/// and returns the new position.
boost::uint32_t seekVideo(boost::uint32_t time);
-
- /// Parses next tag from the file
- //
- /// Returns true if something was parsed, false otherwise.
- /// Sets _parsingComplete=true on end of file.
- ///
- /// TODO: make public (seems useful for an external parsing driver)
- ///
- bool parseNextTag();
-
/// Parses the header of the file
bool parseHeader();
@@ -383,9 +377,6 @@
/// Audio stream is present
bool _video;
-
- /// Mutex to avoid problems with threads using the parser
- boost::mutex _mutex;
};
} // end of gnash namespace
Index: server/asobj/NetStream.cpp
===================================================================
RCS file: /sources/gnash/gnash/server/asobj/NetStream.cpp,v
retrieving revision 1.91
diff -u -r1.91 NetStream.cpp
--- server/asobj/NetStream.cpp 21 May 2008 16:48:15 -0000 1.91
+++ server/asobj/NetStream.cpp 23 May 2008 13:46:11 -0000
@@ -42,6 +42,8 @@
#include "namedStrings.h"
#include "movie_root.h"
+#include "VirtualClock.h" // for PlayHead
+
// Define the following macro to have status notification handling debugged
//#define GNASH_DEBUG_STATUS
@@ -641,4 +643,100 @@
}
#endif // GNASH_USE_GC
+// ------- PlayHead class --------
+PlayHead::PlayHead(VirtualClock* clockSource)
+ :
+ _position(0),
+ _state(PLAY_PLAYING),
+ _availableConsumers(0),
+ _positionConsumers(0),
+ _clockSource(clockSource)
+{
+ _clockOffset = _clockSource->elapsed();
+}
+
+void
+PlayHead::init(bool hasVideo, bool hasAudio)
+{
+ boost::uint64_t now = _clockSource->elapsed();
+ if ( hasVideo ) _availableConsumers |= CONSUMER_VIDEO;
+ if ( hasAudio ) _availableConsumers |= CONSUMER_AUDIO;
+ _positionConsumers = 0;
+
+ _position = 0;
+ _clockOffset = now;
+ assert(now-_clockOffset == _position);
+}
+
+PlayHead::PlaybackStatus
+PlayHead::setState(PlaybackStatus newState)
+{
+ if ( _state == newState ) return _state; // nothing to do
+
+ if ( _state == PLAY_PAUSED )
+ {
+ _state = PLAY_PLAYING;
+
+ // if we go from PAUSED to PLAYING, reset
+ // _clockOffset to yank current position
+ // when querying clock source *now*
+ boost::uint64_t now = _clockSource->elapsed();
+ _clockOffset = ( now - _position );
+ assert( now-_clockOffset == _position ); // check if we did the
right thing
+
+ return PLAY_PAUSED;
+ }
+ else
+ {
+ assert(_state == PLAY_PLAYING);
+ _state = PLAY_PAUSED;
+ // When going from PLAYING to PAUSED
+ // we do nothing with _clockOffset
+ // as we'll update it when getting back to PLAYING
+ return PLAY_PLAYING;
+ }
+}
+
+PlayHead::PlaybackStatus
+PlayHead::toggleState()
+{
+ if ( _state == PLAY_PAUSED ) return setState(PLAY_PLAYING);
+ else return setState(PLAY_PAUSED);
+}
+
+void
+PlayHead::advanceIfConsumed()
+{
+ if ( (_positionConsumers & _availableConsumers) != _availableConsumers)
+ {
+ // not all available consumers consumed current position,
+ // won't advance
+ log_debug("PlayHead::advance(): "
+ "not all consumers consumed current position, "
+ "won't advance");
+ return;
+ }
+
+ // Advance position
+ boost::uint64_t now = _clockSource->elapsed();
+ _position = now-_clockOffset;
+
+ // Reset consumers state
+ _positionConsumers = 0;
+}
+
+void
+PlayHead::seekTo(boost::uint64_t position)
+{
+ boost::uint64_t now = _clockSource->elapsed();
+ _position = position;
+
+ _clockOffset = ( now - _position );
+ assert( now-_clockOffset == _position ); // check if we did the right
thing
+
+ // Reset consumers state
+ _positionConsumers = 0;
+}
+
+
} // end of gnash namespace
Index: server/asobj/NetStream.h
===================================================================
RCS file: /sources/gnash/gnash/server/asobj/NetStream.h,v
retrieving revision 1.63
diff -u -r1.63 NetStream.h
--- server/asobj/NetStream.h 21 May 2008 16:48:15 -0000 1.63
+++ server/asobj/NetStream.h 23 May 2008 13:46:11 -0000
@@ -39,9 +39,145 @@
// Forward declarations
namespace gnash {
//class NetConnection;
+ class VirtualClock;
}
namespace gnash {
+
+/// The playback controller
+class PlayHead {
+
+public:
+
+ /// Flags for playback state
+ enum PlaybackStatus {
+ PLAY_PLAYING = 1,
+ PLAY_PAUSED = 2
+ };
+
+
+ /// Initialize playhead given a VirtualCock to use
+ /// as clock source
+ //
+ /// The PlayHead will have initial state set to PLAYING
+ ///
+ PlayHead(VirtualClock* clockSource);
+
+ /// Initialize playhead
+ //
+ /// @param hasVideo
+ /// Whether video consumer is available
+ ///
+ /// @param hasAudio
+ /// Whether video consumer is available
+ ///
+ void init(bool hasVideo, bool hasAudio);
+
+ /// mutex to protect playhead state
+ boost::mutex mutex;
+
+ /// Get current playhead position (milliseconds)
+ boost::uint64_t getPosition() { return _position; }
+
+ /// Get current playback state
+ PlaybackStatus getState() { return _state; }
+
+ /// Set playback state, returning old state
+ PlaybackStatus setState(PlaybackStatus newState);
+
+ /// Toggle playback state, returning old state
+ PlaybackStatus toggleState();
+
+ /// Return true if video of current position have been consumed
+ bool isVideoConsumed() const
+ {
+ return (_positionConsumers & CONSUMER_VIDEO);
+ }
+
+ /// \brief
+ /// Mark current position as being consumed by video consumer,
+ /// advancing if needed
+ void setVideoConsumed()
+ {
+ _positionConsumers |= CONSUMER_VIDEO;
+ advanceIfConsumed();
+ }
+
+ /// Return true if audio of current position have been consumed
+ bool isAudioConsumed() const
+ {
+ return (_positionConsumers & CONSUMER_AUDIO);
+ }
+
+ /// \brief
+ /// Mark current position as being consumed by audio consumer,
+ /// advancing if needed.
+ void setAudioConsumed()
+ {
+ _positionConsumers |= CONSUMER_AUDIO;
+ advanceIfConsumed();
+ }
+
+ /// Change current position to the given time.
+ //
+ /// Consume flag will be reset.
+ ///
+ /// @param position
+ /// Position timestamp (milliseconds)
+ ///
+ /// POSTCONDITIONS:
+ /// - isVideoConsumed() == false
+ /// - isAudioConsumed() == false
+ /// - getPosition() == position
+ ///
+ void seekTo(boost::uint64_t position);
+
+private:
+
+ /// Advance position if all consumers consumed the current one
+ //
+ /// Clock source will be used to determine the amount
+ /// of milliseconds to advance position to.
+ ///
+ /// Consumer flags will be reset.
+ ///
+ /// POSTCONDITIONS:
+ /// - isVideoConsumed() == false
+ /// - isAudioConsumed() == false
+ ///
+ void advanceIfConsumed();
+
+ /// Flags for consumers state
+ enum ConsumerFlag {
+ CONSUMER_VIDEO = 1,
+ CONSUMER_AUDIO = 2
+ };
+
+ /// Current playhead position
+ boost::uint64_t _position;
+
+ /// Current playback state
+ PlaybackStatus _state;
+
+ /// Binary OR of consumers representing
+ /// which consumers are active
+ int _availableConsumers;
+
+ /// Binary OR of consumers representing
+ /// which consumers consumed current position
+ int _positionConsumers;
+
+ /// The clock source, externally owned
+ VirtualClock* _clockSource;
+
+ /// Offset to subtract from current clock source
+ /// to get current position
+ //
+ /// The offset will be
+ boost::uint64_t _clockOffset;
+
+};
+
/// NetStream ActionScript class
@@ -203,6 +339,7 @@
//
/// @param position
/// Defines in seconds where to seek to
+ /// TODO: take milliseconds !!
///
virtual void seek(boost::uint32_t /*pos*/){}
Index: server/asobj/NetStreamFfmpeg.cpp
===================================================================
RCS file: /sources/gnash/gnash/server/asobj/NetStreamFfmpeg.cpp,v
retrieving revision 1.133
diff -u -r1.133 NetStreamFfmpeg.cpp
--- server/asobj/NetStreamFfmpeg.cpp 22 May 2008 11:30:17 -0000 1.133
+++ server/asobj/NetStreamFfmpeg.cpp 23 May 2008 13:46:13 -0000
@@ -32,7 +32,7 @@
#include "movie_root.h"
#include "sound_handler.h"
#include "VideoDecoderFfmpeg.h"
-#include "ClockTime.h" // TODO: use the VirtualClock instead ?
+#include "SystemClock.h"
#include "FLVParser.h"
@@ -48,7 +48,7 @@
#endif
/// Define this to add debugging prints for locking
-#define GNASH_DEBUG_THREADS
+//#define GNASH_DEBUG_THREADS
// Define the following macro to have status notification handling debugged
//#define GNASH_DEBUG_STATUS
@@ -63,9 +63,9 @@
namespace gnash {
-NetStreamFfmpeg::NetStreamFfmpeg():
+NetStreamFfmpeg::NetStreamFfmpeg()
+ :
- _playback_state(PLAY_NONE),
_decoding_state(DEC_NONE),
m_video_index(-1),
@@ -82,9 +82,10 @@
m_last_video_timestamp(0),
m_last_audio_timestamp(0),
- m_current_timestamp(0),
+
+ _playHead(new SystemClock()), // will leak a SystemClock !
+
m_unqueued_data(NULL),
- m_time_of_pause(0),
_decoderBuffer(0),
_soundHandler(get_sound_handler())
@@ -106,23 +107,21 @@
void NetStreamFfmpeg::pause( PauseMode mode )
{
log_debug("::pause(%d) called ", mode);
- switch ( mode ) {
- case pauseModeToggle:
- if ( playbackStatus() == PLAY_PAUSED ) {
- unpausePlayback();
- } else {
- pausePlayback();
- }
+ switch ( mode )
+ {
+ case pauseModeToggle:
+ if ( _playHead.getState() == PlayHead::PLAY_PAUSED)
unpausePlayback();
+ else pausePlayback();
break;
- case pauseModePause:
+ case pauseModePause:
pausePlayback();
break;
- case pauseModeUnPause:
+ case pauseModeUnPause:
unpausePlayback();
break;
- default:
+ default:
break;
- }
+ }
}
@@ -164,11 +163,6 @@
delete m_unqueued_data;
m_unqueued_data = NULL;
- boost::mutex::scoped_lock lock(_qMutex);
-
- m_qvideo.clear();
- m_qaudio.clear();
-
delete [] ByteIOCxt.buffer;
}
@@ -226,12 +220,10 @@
void
NetStreamFfmpeg::play(const std::string& c_url)
{
-
// Is it already playing ?
- if (playbackStatus() != PLAY_NONE && playbackStatus() != PLAY_STOPPED)
+ if ( m_parser.get() )
{
- log_error("NetStream.play() called already playing ?"); //
TODO: fix this case
- //unpausePlayback(); // will check for playbackStatus itself..
+ log_error("NetStream.play() called while already streaming ?");
// TODO: fix this case
return;
}
@@ -265,8 +257,8 @@
_soundHandler->attach_aux_streamer(audio_streamer, this);
// This starts the decoding thread
- _decodeThread = new
boost::thread(boost::bind(NetStreamFfmpeg::av_streamer, this));
- _decodeThreadBarrier.wait();
+ //_decodeThread = new
boost::thread(boost::bind(NetStreamFfmpeg::av_streamer, this));
+ //_decodeThreadBarrier.wait();
return;
}
@@ -602,8 +594,14 @@
}
}
- playbackStatus(PLAY_PLAYING);
- m_start_clock = clocktime::getTicks();
+ _playHead.init(m_VCodecCtx!=0, false); // second arg should be
m_ACodecCtx!=0, but we're testing video only for now
+ _playHead.setState(PlayHead::PLAY_PLAYING);
+
+//#ifdef GNASH_DEBUG_STATUS
+ log_debug("Setting playStart status");
+//#endif
+ setStatus(playStart);
+
return true;
}
@@ -638,106 +636,14 @@
ns->_decodeThreadBarrier.wait();
- //assert (ns->m_ACodecCtx); // is only set if audio decoder could be
initialized
- //assert (ns->m_VCodecCtx); // is only set if video decder could be
initialized
- //assert (ns->m_FormatCtx); // is only set for non-flv
-
- ns->setStatus(playStart);
-
- ns->m_last_video_timestamp = 0;
- ns->m_last_audio_timestamp = 0;
- ns->m_current_timestamp = 0;
-
- ns->m_start_clock = clocktime::getTicks();
-
- ns->m_unqueued_data = NULL;
-
- // Loop until killed
- while ( ! ns->decodeThreadKillRequested() ) // locks _qMutex
+ // Parse in a thread...
+ abort(); // has to be fixed to use mutex against parser
+ // FIXME:
+ while ( ! ns->m_parser->parsingCompleted()
+ && ! ns->decodeThreadKillRequested() )
{
- unsigned long int sleepTime = 1000;
-
- {
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: waiting for lock in av_streamer");
-#endif
- boost::mutex::scoped_lock lock(ns->_qMutex);
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: lock obtained in av_streamer");
-#endif
-
- if ( ns->decodingStatus() == DEC_STOPPED )
- {
- log_debug("Dec stopped (eof), waiting on qNeedRefill
condition");
- ns->_qFillerResume.wait(lock);
- continue; // will release the lock for a moment
- }
-
-#ifdef GNASH_DEBUG_THREADS
- log_debug("Decoding iteration. bufferTime=%lu, bufferLen=%lu,
videoFrames=%lu, audioFrames=%lu",
- ns->bufferTime(), ns->bufferLength(),
ns->m_qvideo.size(), ns->m_qaudio.size());
-#endif
-
- if (ns->m_isFLV)
- {
- // If any of the two queues are full don't bother
fetching more
- // (next consumer will wake us up)
- //
- if ( ns->m_qvideo.full() || ns->m_qaudio.full() )
- {
- ns->decodingStatus(DEC_DECODING); // that's to
say: not buffering anymore
-
- // Instead wait till waked up by short-queues
event
- log_debug("Queues full, waiting on qNeedRefill
condition");
- ns->_qFillerResume.wait(lock);
- }
- else
- {
- log_debug("Calling decodeFLVFrame");
- bool successDecoding = ns->decodeFLVFrame();
- //log_debug("decodeFLVFrame returned %d",
successDecoding);
- if ( ! successDecoding )
- {
- // Possible failures:
- // 1. could not decode frame... lot's
of possible
- // reasons...
- // 2. EOF reached
- if ( ns->m_videoFrameFormat !=
render::NONE )
- {
- log_error("Could not decode FLV
frame");
- }
- // else it's expected, we'll keep going
anyway
- }
-
- }
-
- }
- else
- {
-
- // If we have problems with decoding - break
- if (ns->decodeMediaFrame() == false &&
ns->m_start_onbuffer == false && ns->m_qvideo.size() == 0 &&
ns->m_qaudio.size() == 0)
- {
- break;
- }
-
- }
-
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: releasing lock in av_streamer");
-#endif
- }
-
- //log_debug("Sleeping %d microseconds", sleepTime);
- usleep(sleepTime); // Sleep 1ms to avoid busying the processor.
-
+ ns->m_parser->parseNextTag();
}
-
-//#ifdef GNASH_DEBUG_THREADS
- log_debug("Out of decoding loop. playbackStatus:%d, decodingStatus:%d",
ns->playbackStatus(), ns->decodingStatus());
-//#endif
- ns->decodingStatus(DEC_STOPPED);
-
}
// audio callback is running in sound handler thread
@@ -745,6 +651,10 @@
{
//GNASH_REPORT_FUNCTION;
+ return false;
+
+#if 0 // no audio for now, needs proper mutex design first (SDL sound handler
runs in a thread)
+
NetStreamFfmpeg* ns = static_cast<NetStreamFfmpeg*>(owner);
PlaybackState pbStatus = ns->playbackStatus();
@@ -797,10 +707,100 @@
#endif
}
return true;
+#endif
+}
+
+media::raw_mediadata_t*
+NetStreamFfmpeg::getDecodedVideoFrame(boost::uint32_t ts)
+{
+ if ( ! m_parser.get() )
+ {
+ log_error("getDecodedVideoFrame: no parser available");
+ return 0; // no parser, no party
+ }
+
+ FLVVideoFrameInfo* info = m_parser->peekNextVideoFrameInfo();
+ if ( ! info )
+ {
+ log_error("getDecodedVideoFrame(%d): no more video frames in
input (peekNextVideoFrameInfo returned false)");
+ decodingStatus(DEC_STOPPED);
+ return 0;
+ }
+
+ if ( info->timestamp > ts )
+ {
+ log_error("getDecodedVideoFrame(%d): next video frame is in the
future (%d)", ts, info->timestamp);
+ return 0; // next frame is in the future
+ }
+
+ // Loop until a good frame is found
+ media::raw_mediadata_t* video = 0;
+ while ( 1 )
+ {
+ video = decodeNextVideoFrame();
+ if ( ! video )
+ {
+ log_error("peekNextVideoFrameInfo returned some info, "
+ "but decodeNextVideoFrame returned null, "
+ "I don't think this should ever happen");
+ break;
+ }
+
+ FLVVideoFrameInfo* info = m_parser->peekNextVideoFrameInfo();
+ if ( ! info )
+ {
+ // the one we decoded was the last one
+ log_debug("last video frame decoded (should set
playback status to STOP?)");
+ break;
+ }
+ if ( info->timestamp > ts )
+ {
+ // the next one is in the future, we'll return this one.
+ log_debug("next video frame is in the future, we'll
return this one");
+ break; // the one we decoded
+ }
+ }
+
+ return video;
+}
+
+media::raw_mediadata_t*
+NetStreamFfmpeg::decodeNextVideoFrame()
+{
+ if ( ! m_parser.get() )
+ {
+ log_error("decodeNextVideoFrame: no parser available");
+ return 0; // no parser, no party
+ }
+
+ FLVFrame* frame = m_parser->nextVideoFrame();
+ if (frame == NULL)
+ {
+ log_debug("decodeNextVideoFrame: no more video frames in
input");
+ return 0;
+ }
+ assert (frame->type == videoFrame);
+
+ AVPacket packet;
+
+ packet.destruct = avpacket_destruct; // needed ?
+ packet.size = frame->dataSize;
+ packet.data = frame->data;
+ // FIXME: is this the right value for packet.dts?
+ packet.pts = packet.dts = static_cast<boost::int64_t>(frame->timestamp);
+ assert (frame->type == videoFrame);
+ packet.stream_index = 0;
+
+ return decodeVideo(&packet);
}
-bool NetStreamFfmpeg::decodeFLVFrame()
+bool
+NetStreamFfmpeg::decodeFLVFrame()
{
+#if 1
+ abort();
+ return false;
+#else
FLVFrame* frame = m_parser->nextMediaFrame(); // we don't care which
one, do we ?
if (frame == NULL)
@@ -822,21 +822,40 @@
if (frame->type == videoFrame)
{
packet.stream_index = 0;
- return decodeVideo(&packet);
+ media::raw_mediadata_t* video = decodeVideo(&packet);
+ assert (m_isFLV);
+ if (video)
+ {
+ // NOTE: Caller is assumed to have locked _qMutex
already
+ if ( ! m_qvideo.push(video) )
+ {
+ log_error("Video queue full !");
+ }
+ }
}
else
{
assert(frame->type == audioFrame);
packet.stream_index = 1;
- return decodeAudio(&packet);
+ media::raw_mediadata_t* audio = decodeAudio(&packet);
+ if ( audio )
+ {
+ if ( ! m_qaudio.push(audio) )
+ {
+ log_error("Audio queue full!");
+ }
+ }
}
+ return true;
+#endif
}
-bool NetStreamFfmpeg::decodeAudio( AVPacket* packet )
+media::raw_mediadata_t*
+NetStreamFfmpeg::decodeAudio( AVPacket* packet )
{
- if (!m_ACodecCtx) return false;
+ if (!m_ACodecCtx) return 0;
int frame_size;
//static const unsigned int bufsize = (AVCODEC_MAX_AUDIO_FRAME_SIZE *
3) / 2;
@@ -944,26 +963,20 @@
m_last_audio_timestamp += frame_delay;
- if (m_isFLV)
- {
- if ( ! m_qaudio.push(raw) )
- {
- log_error("Audio queue full!");
- }
- }
- else m_unqueued_data = m_qaudio.push(raw) ? NULL : raw;
+ return raw;
}
- return true;
+ return 0;
}
-bool NetStreamFfmpeg::decodeVideo(AVPacket* packet)
+media::raw_mediadata_t*
+NetStreamFfmpeg::decodeVideo(AVPacket* packet)
{
- if (!m_VCodecCtx) return false;
+ if (!m_VCodecCtx) return NULL;
int got = 0;
avcodec_decode_video(m_VCodecCtx, m_Frame, &got, packet->data,
packet->size);
- if (!got) return false;
+ if (!got) return NULL;
// This tmpImage is really only used to compute proper size of the
video data...
// stupid isn't it ?
@@ -982,7 +995,7 @@
if (m_videoFrameFormat == render::NONE)
{
// NullGui?
- return false;
+ return NULL;
}
else if (m_videoFrameFormat == render::YUV && m_VCodecCtx->pix_fmt !=
PIX_FMT_YUV420P)
@@ -997,7 +1010,7 @@
rgbpicture =
media::VideoDecoderFfmpeg::convertRGB24(m_VCodecCtx, *m_Frame);
if (!rgbpicture.data[0])
{
- return false;
+ return NULL;
}
}
@@ -1088,21 +1101,14 @@
}
- // NOTE: Caller is assumed to have locked _qMutex already
- if (m_isFLV)
- {
- if ( ! m_qvideo.push(video) )
- {
- log_error("Video queue full !");
- }
- }
- else m_unqueued_data = m_qvideo.push(video) ? NULL : video;
-
- return true;
+ return video;
}
bool NetStreamFfmpeg::decodeMediaFrame()
{
+ return false;
+
+#if 0 // Only FLV for now (non-FLV should be threated the same as FLV, using a
MediaParser in place of the FLVParser)
if (m_unqueued_data)
{
@@ -1132,20 +1138,24 @@
{
if (packet.stream_index == m_audio_index && _soundHandler)
{
- if (!decodeAudio(&packet))
+ media::raw_mediadata_t* audio = decodeAudio(&packet);
+ if (!audio)
{
log_error(_("Problems decoding audio frame"));
return false;
}
+ m_unqueued_data = m_qaudio.push(audio) ? NULL : audio;
}
else
if (packet.stream_index == m_video_index)
{
- if (!decodeVideo(&packet))
+ media::raw_mediadata_t* video = decodeVideo(&packet);
+ if (!video)
{
log_error(_("Problems decoding video frame"));
return false;
}
+ m_unqueued_data = m_qvideo.push(video) ? NULL : video;
}
av_free_packet(&packet);
}
@@ -1156,15 +1166,24 @@
}
return true;
+#endif
}
void
-NetStreamFfmpeg::seek(boost::uint32_t pos)
+NetStreamFfmpeg::seek(boost::uint32_t posSeconds)
{
GNASH_REPORT_FUNCTION;
- // We'll mess with the queues here
- boost::mutex::scoped_lock lock(_qMutex);
+ // We'll mess with the input here
+ if ( ! m_parser.get() )
+ {
+ log_debug("NetStreamFfmpeg::seek(%d): no parser, no party",
posSeconds);
+ return;
+ }
+
+ // Don't ask me why, but NetStream::seek() takes seconds...
+ boost::uint32_t pos = posSeconds*1000;
+
long newpos = 0;
double timebase = 0;
@@ -1172,18 +1191,11 @@
// Seek to new position
if (m_isFLV)
{
- if (m_parser.get())
- {
- newpos = m_parser->seek(pos);
- }
- else
- {
- newpos = 0;
- }
+ newpos = m_parser->seek(pos);
+ log_debug("m_parser->seek(%d) returned %d", pos, newpos);
}
else if (m_FormatCtx)
{
-
AVStream* videostream = m_FormatCtx->streams[m_video_index];
timebase = static_cast<double>(videostream->time_base.num /
videostream->time_base.den);
newpos = static_cast<long>(pos / timebase);
@@ -1205,20 +1217,11 @@
{
m_last_video_timestamp = 0;
m_last_audio_timestamp = 0;
- m_current_timestamp = 0;
-
- m_start_clock = clocktime::getTicks();
-
}
else if (m_isFLV)
{
-
- if (m_VCodecCtx) m_start_clock += m_last_video_timestamp -
newpos;
- else m_start_clock += m_last_audio_timestamp - newpos;
-
if (m_ACodecCtx) m_last_audio_timestamp = newpos;
if (m_VCodecCtx) m_last_video_timestamp = newpos;
- m_current_timestamp = newpos;
}
else
{
@@ -1240,139 +1243,100 @@
av_free_packet( &Packet );
av_seek_frame(m_FormatCtx, m_video_index, newpos, 0);
- boost::uint32_t newtime_ms =
static_cast<boost::int32_t>(newtime / 1000.0);
- m_start_clock += m_last_audio_timestamp - newtime_ms;
+ newpos = static_cast<boost::int32_t>(newtime / 1000.0);
- m_last_audio_timestamp = newtime_ms;
- m_last_video_timestamp = newtime_ms;
- m_current_timestamp = newtime_ms;
+ m_last_audio_timestamp = newpos;
+ m_last_video_timestamp = newpos;
}
- // Flush the queues
- m_qvideo.clear();
- m_qaudio.clear();
+ // 'newpos' will always be on a keyframe (supposedly)
+ _playHead.seekTo(newpos);
decodingStatus(DEC_DECODING); // or ::refreshVideoFrame will send a
STOPPED again
- if ( playbackStatus() == PLAY_STOPPED )
- {
- // restart playback (if not paused)
- playbackStatus(PLAY_PLAYING);
- }
_qFillerResume.notify_all(); // wake it decoder is sleeping
+ refreshVideoFrame(true);
}
void
-NetStreamFfmpeg::refreshVideoFrame()
+NetStreamFfmpeg::refreshVideoFrame(bool alsoIfPaused)
{
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: waiting for lock in refreshVideoFrame");
-#endif
- boost::mutex::scoped_lock lock(_qMutex);
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: lock obtained in refreshVideoFrame");
-#endif
- // If we're paused (and we got the first imageframe), there is no need
to do this
- if (playbackStatus() == PLAY_PAUSED && m_imageframe)
+ if ( ! m_parser.get() )
{
- log_debug("refreshVideoFrame doing nothing as playback is
paused and we have an image frame already");
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: releasing lock in refreshVideoFrame");
-#endif
+ log_debug("%p.refreshVideoFrame: no parser, no party", this);
return;
}
- // Loop until a good frame is found
- do
+ if ( ! alsoIfPaused && _playHead.getState() == PlayHead::PLAY_PAUSED )
{
- // Get video frame from queue, will have the lowest timestamp
- // will return NULL if empty(). See multithread_queue::front
- media::raw_mediadata_t* video = m_qvideo.front();
+ log_debug("%p.refreshVideoFrame: doing nothing as playhead is
paused", this);
+ return;
+ }
- // If the queue is empty either we're waiting for more data
- // to be decoded or we're out of data
- if (!video)
- {
- log_debug("refreshVideoFrame:: No more video frames in
queue");
+ if ( _playHead.isVideoConsumed() )
+ {
+ log_debug("%p.refreshVideoFrame: doing nothing as current
position was already decoded", this);
+ return;
+ }
- if ( decodingStatus() == DEC_STOPPED )
- {
- if ( playbackStatus() != PLAY_STOPPED )
- {
- playbackStatus(PLAY_STOPPED);
-//#ifdef GNASH_DEBUG_STATUS
- log_debug("Setting playStop status");
-//#endif
- setStatus(playStop);
- }
- }
- else
- {
- // There no video but decoder is still running
- // not much to do here except wait for next call
- //assert(decodingStatus() == DEC_BUFFERING);
- }
- break;
- }
+ // Caclulate the current time
+ boost::uint64_t curPos = _playHead.getPosition();
+
+ log_debug("%p.refreshVideoFrame: currentPosition=%d, playHeadState=%d",
this, curPos, _playHead.getState());
- // Caclulate the current time
- boost::uint32_t current_clock;
- if (m_ACodecCtx && _soundHandler)
+
+ // Get next decoded video frame from parser, will have the lowest
timestamp
+ media::raw_mediadata_t* video = getDecodedVideoFrame(curPos);
+
+ // to be decoded or we're out of data
+ if (!video)
+ {
+ if ( decodingStatus() == DEC_STOPPED )
{
- current_clock = m_current_timestamp;
+ log_debug("%p.refreshVideoFrame(): no more video frames
to decode, sending STOP event", this);
+//#ifdef GNASH_DEBUG_STATUS
+ log_debug("Setting playStop status");
+//#endif
+ setStatus(playStop);
}
else
{
- current_clock = clocktime::getTicks() - m_start_clock;
- m_current_timestamp = current_clock;
+ log_debug("%p.refreshVideoFrame(): last video frame was
good enough for current position", this);
+ // There no video but decoder is still running
+ // not much to do here except wait for next call
+ //assert(decodingStatus() == DEC_BUFFERING);
}
- boost::uint32_t video_clock = video->m_pts;
+ }
+ else
+ {
- // If the timestamp on the videoframe is smaller than the
- // current time, we put it in the output image.
- if (current_clock >= video_clock)
+ if (m_videoFrameFormat == render::YUV)
{
-
- if (m_videoFrameFormat == render::YUV)
- {
- if ( ! m_imageframe ) m_imageframe = new
image::yuv(m_VCodecCtx->width, m_VCodecCtx->height);
- // XXX m_imageframe might be a byte aligned
buffer, while video is not!
-
static_cast<image::yuv*>(m_imageframe)->update(video->m_data);
- }
- else if (m_videoFrameFormat == render::RGB)
- {
- if ( ! m_imageframe ) m_imageframe = new
image::rgb(m_VCodecCtx->width, m_VCodecCtx->height);
- image::rgb* imgframe =
static_cast<image::rgb*>(m_imageframe);
- rgbcopy(imgframe, video, m_VCodecCtx->width *
3);
- }
-
- // Delete the frame from the queue
- m_qvideo.pop();
- delete video;
-
- // wake up filler (TODO: do only if decoder is running)
- // TODO2: resume only at end of loop ?
- _qFillerResume.notify_all();
-
- // A frame is ready for pickup
- m_newFrameReady = true;
-
+ if ( ! m_imageframe ) m_imageframe = new
image::yuv(m_VCodecCtx->width, m_VCodecCtx->height);
+ // XXX m_imageframe might be a byte aligned buffer,
while video is not!
+
static_cast<image::yuv*>(m_imageframe)->update(video->m_data);
}
- else
+ else if (m_videoFrameFormat == render::RGB)
{
- // The timestamp on the first frame in the queue is
greater
- // than the current time, so no need to do anything.
- break;
+ if ( ! m_imageframe ) m_imageframe = new
image::rgb(m_VCodecCtx->width, m_VCodecCtx->height);
+ image::rgb* imgframe =
static_cast<image::rgb*>(m_imageframe);
+ rgbcopy(imgframe, video, m_VCodecCtx->width * 3);
}
- } while(!m_qvideo.empty());
+ // Delete the frame from the queue
+ delete video;
+
+ // A frame is ready for pickup
+ m_newFrameReady = true;
+ }
+
+ // We consumed video of current position, feel free to advance if needed
+ _playHead.setVideoConsumed();
+
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: releasing lock in refreshVideoFrame");
-#endif
}
@@ -1393,104 +1357,59 @@
boost::int32_t
NetStreamFfmpeg::time()
{
-
- if (m_FormatCtx && m_FormatCtx->nb_streams > 0)
- {
- double time = (double)m_FormatCtx->streams[0]->time_base.num /
(double)m_FormatCtx->streams[0]->time_base.den *
(double)m_FormatCtx->streams[0]->cur_dts;
- return static_cast<boost::int32_t>(time);
- }
- else if
- (m_isFLV)
- {
- return m_current_timestamp;
- }
- else
- {
- return 0;
- }
+ return _playHead.getPosition();
}
void NetStreamFfmpeg::pausePlayback()
{
GNASH_REPORT_FUNCTION;
- if (playbackStatus() == PLAY_PAUSED) return;
-
- playbackStatus(PLAY_PAUSED);
-
- // Save the current time so we later can tell how long the pause lasted
- m_time_of_pause = clocktime::getTicks();
+ PlayHead::PlaybackStatus oldStatus =
_playHead.setState(PlayHead::PLAY_PAUSED);
- // Disconnect the soundhandler so we don't play while paused
- if ( _soundHandler ) _soundHandler->detach_aux_streamer((void*)this);
+ // Disconnect the soundhandler if we were playing before
+ if ( oldStatus == PlayHead::PLAY_PLAYING && _soundHandler )
+ {
+ _soundHandler->detach_aux_streamer((void*)this);
+ }
}
void NetStreamFfmpeg::unpausePlayback()
{
GNASH_REPORT_FUNCTION;
- if (playbackStatus() == PLAY_PLAYING) // already playing
- {
- log_debug("unpausePlayback: already playing");
- return;
- }
-
- playbackStatus(PLAY_PLAYING);
+ PlayHead::PlaybackStatus oldStatus =
_playHead.setState(PlayHead::PLAY_PLAYING);
- if (m_current_timestamp == 0)
+ // Re-connect to the soundhandler if we were paused before
+ if ( oldStatus == PlayHead::PLAY_PAUSED && _soundHandler )
{
- m_start_clock = clocktime::getTicks();
+ _soundHandler->attach_aux_streamer(audio_streamer, (void*)
this);
}
- else
- {
- // Add the paused time to the start time so that the playhead
doesn't
- // noticed that we have been paused
- m_start_clock += clocktime::getTicks() - m_time_of_pause;
- }
-
- // (re)-connect to the soundhandler.
- // It was disconnected in ::pausePlayback to avoid to keep playing
sound while paused
- if ( _soundHandler ) _soundHandler->attach_aux_streamer(audio_streamer,
(void*) this);
}
long
NetStreamFfmpeg::bytesLoaded ()
{
- long ret_val = 0;
-
- if ( _netCon )
+ if ( ! m_parser.get() )
{
- ret_val = _netCon->getBytesLoaded();
+ log_debug("bytesLoaded: no parser, no party");
+ return 0;
}
- return ret_val;
+ return m_parser->getBytesLoaded();
}
long
NetStreamFfmpeg::bytesTotal ()
{
- long ret_val = 0;
-
- if ( _netCon )
+ if ( ! m_parser.get() )
{
- ret_val = _netCon->getBytesTotal();
+ log_debug("bytesTotal: no parser, no party");
+ return 0;
}
- return ret_val;
-}
-
-NetStreamFfmpeg::PlaybackState
-NetStreamFfmpeg::playbackStatus(PlaybackState newstate)
-{
- boost::mutex::scoped_lock lock(_state_mutex);
-
- if (newstate != PLAY_NONE) {
- _playback_state = newstate;
- }
-
- return _playback_state;
+ return m_parser->getBytesTotal();
}
NetStreamFfmpeg::DecodingState
@@ -1511,14 +1430,6 @@
GNASH_REPORT_FUNCTION;
{
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: waiting for lock in killDecodeThread");
-#endif
- boost::mutex::scoped_lock lock(_qMutex);
-#ifdef GNASH_DEBUG_THREADS
- log_debug("qMutex: lock obtained in killDecodeThread");
-#endif
-
_qFillerKillRequest = true;
_qFillerResume.notify_all(); // wake it up if waiting..
}
@@ -1536,7 +1447,6 @@
bool
NetStreamFfmpeg::decodeThreadKillRequested()
{
- boost::mutex::scoped_lock lock(_qMutex);
return _qFillerKillRequest;
}
Index: server/asobj/NetStreamFfmpeg.h
===================================================================
RCS file: /sources/gnash/gnash/server/asobj/NetStreamFfmpeg.h,v
retrieving revision 1.67
diff -u -r1.67 NetStreamFfmpeg.h
--- server/asobj/NetStreamFfmpeg.h 22 May 2008 11:30:17 -0000 1.67
+++ server/asobj/NetStreamFfmpeg.h 23 May 2008 13:46:13 -0000
@@ -117,7 +117,6 @@
DEC_BUFFERING,
};
- PlaybackState _playback_state;
DecodingState _decoding_state;
// Mutex protecting _playback_state and _decoding_state
@@ -155,7 +154,11 @@
/// is that refreshVideoFrame() is called right before get_video().
This is important
/// to ensure timing is correct..
///
- void refreshVideoFrame();
+ /// @param alsoIfPaused
+ /// If true, video is consumed/refreshed even if playhead is paused.
+ /// By default this is false, but will be used on ::seek
(user-reguested)
+ ///
+ void refreshVideoFrame(bool alsoIfPaused=false);
// Used to decode and push the next available (non-FLV) frame to the
audio or video queue
bool decodeMediaFrame();
@@ -189,35 +192,45 @@
///
bool decodeFLVFrame();
- /// Used to decode a video frame and push it on the videoqueue
+ /// Decode next video frame fetching it MediaParser cursor
+ //
+ /// @return 0 on EOF or error, a decoded video otherwise
+ ///
+ media::raw_mediadata_t* decodeNextVideoFrame();
+
+ /// Decode input frames up to the one with timestamp <= ts.
//
- /// Also updates m_imageframe (why !??)
+ /// Decoding starts from "next" element in the parser cursor.
///
+ /// Return 0 if:
+ /// 1. there's no parser active.
+ /// 2. parser cursor is already on last frame.
+ /// 3. next element in cursor has timestamp > tx
+ /// 4. there was an error decoding
+ ///
+ media::raw_mediadata_t* getDecodedVideoFrame(boost::uint32_t ts);
+
+ /// Used to decode a video frame
+ //
/// This is a blocking call.
- /// If no Video decoding context exists (m_VCodecCtx), false is
returned.
- /// On decoding (or converting) error, false is returned.
- /// If renderer requested video format is render::NONE, false is
returned.
- /// In any other case, true is returned.
+ /// If no Video decoding context exists (m_VCodecCtx), 0 is returned.
+ /// On decoding (or converting) error, 0 is returned.
+ /// If renderer requested video format is render::NONE, 0 is returned.
+ /// In any other case, a decoded video frame is returned.
///
- /// NOTE: (FIXME) if video queue is full,
- /// we'd still return true w/out pushing anything new there
- ///
/// TODO: return a more informative value to tell what happened.
///
- bool decodeVideo( AVPacket* packet );
+ media::raw_mediadata_t* decodeVideo( AVPacket* packet );
- /// Used to decode a audio frame and push it on the audioqueue
+ /// Used to decode an audio frame
//
/// This is a blocking call.
- /// If no Video decoding context exists (m_ACodecCtx), false is
returned.
- /// In any other case, true is returned.
+ /// If no Video decoding context exists (m_ACodecCtx), 0 is returned.
+ /// In any other case, a decoded audio frame is returned.
///
- /// NOTE: (FIXME) if audio queue is full,
- /// we'd still return true w/out pushing anything new there
- ///
/// TODO: return a more informative value to tell what happened.
///
- bool decodeAudio( AVPacket* packet );
+ media::raw_mediadata_t* decodeAudio( AVPacket* packet );
// Used to calculate a decimal value from a ffmpeg fraction
inline double as_double(AVRational time)
@@ -225,7 +238,6 @@
return time.num / (double) time.den;
}
- PlaybackState playbackStatus(PlaybackState newstate = PLAY_NONE);
DecodingState decodingStatus(DecodingState newstate = DEC_NONE);
int m_video_index;
@@ -278,24 +290,11 @@
// The timestamp of the last decoded audio frame, in seconds.
volatile boost::uint32_t m_last_audio_timestamp;
- // The timestamp of the last played audio (default) or video (if no
audio) frame.
- // Misured in seconds.
- boost::uint32_t m_current_timestamp;
-
- /// The queues of audio and video data.
- typedef media::ElementsOwningQueue<media::raw_mediadata_t*> MediaQueue;
-
- MediaQueue m_qaudio;
- MediaQueue m_qvideo;
-
- /// Mutex protecting access to queues
- boost::mutex _qMutex;
-
/// Queues filler will wait on this condition when queues are full
boost::condition _qFillerResume;
- // The time we started playing in seconds (since VM start ?)
- volatile boost::uint64_t m_start_clock;
+ /// Playback control device
+ PlayHead _playHead;
// When the queues are full, this is where we keep the audio/video frame
// there wasn't room for on its queue
@@ -303,9 +302,6 @@
ByteIOContext ByteIOCxt;
- // Time of when pause started, in seconds since VM started
- volatile boost::uint64_t m_time_of_pause;
-
// Decoder buffer
boost::uint8_t* _decoderBuffer;
_______________________________________________
Gnash-dev mailing list
[email protected]
http://lists.gnu.org/mailman/listinfo/gnash-dev