lavfi audio fw using the generalized AVFilterBufferRef struct. Anonymous
unions weren't allowed so I had to give the unions some names. Please
review and comment.
---
ffmpeg.c | 6 +-
ffplay.c | 12 ++--
libavfilter/avfilter.c | 59 +++++++++++++++++++++-
libavfilter/avfilter.h | 115 +++++++++++++++++++++++++++++++++++-------
libavfilter/defaults.c | 105 ++++++++++++++++++++++++++++++++++++--
libavfilter/formats.c | 3 +-
libavfilter/vf_aspect.c | 2 +-
libavfilter/vf_crop.c | 4 +-
libavfilter/vf_drawbox.c | 6 +-
libavfilter/vf_fifo.c | 2 +-
libavfilter/vf_fps.c | 2 +-
libavfilter/vf_hflip.c | 2 +-
libavfilter/vf_overlay.c | 8 ++--
libavfilter/vf_pad.c | 2 +-
libavfilter/vf_pixdesctest.c | 2 +-
libavfilter/vf_rotate.c | 12 ++--
libavfilter/vf_scale.c | 6 +-
libavfilter/vf_transpose.c | 18 +++---
libavfilter/vsrc_buffer.c | 8 ++--
19 files changed, 302 insertions(+), 72 deletions(-)
diff --git a/ffmpeg.c b/ffmpeg.c
index aec1f79..faf05c9 100644
--- a/ffmpeg.c
+++ b/ffmpeg.c
@@ -377,8 +377,8 @@ static int get_filtered_video_pic(AVFilterContext *ctx,
memcpy(pic2->data, pic->data, sizeof(pic->data));
memcpy(pic2->linesize, pic->linesize, sizeof(pic->linesize));
- pic2->interlaced_frame = pic->interlaced;
- pic2->top_field_first = pic->top_field_first;
+ pic2->interlaced_frame = pic->prop.interlaced;
+ pic2->top_field_first = pic->packing.top_field_first;
return 1;
}
@@ -1701,7 +1701,7 @@ static int output_packet(AVInputStream *ist, int ist_index,
break;
case AVMEDIA_TYPE_VIDEO:
#if CONFIG_AVFILTER
- ost->st->codec->sample_aspect_ratio = ist->picref->pixel_aspect;
+ ost->st->codec->sample_aspect_ratio = ist->picref->desc.pixel_aspect;
#endif
do_video_out(os, ost, ist, &picture, &frame_size);
if (vstats_filename && frame_size)
diff --git a/ffplay.c b/ffplay.c
index c89a8b3..47bac10 100644
--- a/ffplay.c
+++ b/ffplay.c
@@ -694,10 +694,10 @@ static void video_image_display(VideoState *is)
vp = &is->pictq[is->pictq_rindex];
if (vp->bmp) {
#if CONFIG_AVFILTER
- if (vp->picref->pixel_aspect.num == 0)
+ if (vp->picref->desc.pixel_aspect.num == 0)
aspect_ratio = 0;
else
- aspect_ratio = av_q2d(vp->picref->pixel_aspect);
+ aspect_ratio = av_q2d(vp->picref->desc.pixel_aspect);
#else
/* XXX: use variable in the frame */
@@ -1582,8 +1582,8 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
return -1;
- ref->w = codec->width;
- ref->h = codec->height;
+ ref->dim1.w = codec->width;
+ ref->dim2.h = codec->height;
for(i = 0; i < 4; i ++) {
unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
@@ -1616,7 +1616,7 @@ static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
return codec->get_buffer(codec, pic);
}
- if ((codec->width != ref->w) || (codec->height != ref->h) ||
+ if ((codec->width != ref->dim1.w) || (codec->height != ref->dim2.h) ||
(codec->pix_fmt != ref->format)) {
av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
return -1;
@@ -1677,7 +1677,7 @@ static int input_request_frame(AVFilterLink *link)
picref->pts = pts;
picref->pos = pkt.pos;
- picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
+ picref->desc.pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
avfilter_start_frame(link, picref);
avfilter_draw_slice(link, 0, link->h, 1);
avfilter_end_frame(link);
diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c
index 98a7204..bd0c5e2 100644
--- a/libavfilter/avfilter.c
+++ b/libavfilter/avfilter.c
@@ -22,6 +22,7 @@
/* #define DEBUG */
#include "libavcodec/imgconvert.h"
+#include "libavcodec/audioconvert.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "internal.h"
@@ -179,7 +180,7 @@ void ff_dprintf_picref(void *ctx, AVFilterBufferRef *picref, int end)
picref->data [0], picref->data [1], picref->data [2], picref->data [3],
picref->linesize[0], picref->linesize[1], picref->linesize[2], picref->linesize[3],
picref->pts, picref->pos,
- picref->pixel_aspect.num, picref->pixel_aspect.den, picref->w, picref->h,
+ picref->desc.pixel_aspect.num, picref->desc.pixel_aspect.den, picref->dim1.w, picref->dim2.h,
end ? "\n" : "");
}
@@ -211,6 +212,20 @@ AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int
return ret;
}
+AVFilterBufferRef *avfilter_get_samples_ref(AVFilterLink *link, int perms, int size,
+ int64_t channel_layout, enum SampleFormat sample_fmt, int planar)
+{
+ AVFilterBufferRef *ret = NULL;
+
+ if(link_dpad(link).get_samples_ref)
+ ret = link_dpad(link).get_samples_ref(link, perms, size, channel_layout, sample_fmt, planar);
+
+ if(!ret)
+ ret = avfilter_default_get_samples_ref(link, perms, size, channel_layout, sample_fmt, planar);
+
+ return ret;
+}
+
int avfilter_request_frame(AVFilterLink *link)
{
FF_DPRINTF_START(NULL, request_frame); ff_dprintf_link(NULL, link, 1);
@@ -314,7 +329,7 @@ void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
for(i = 0; i < 4; i ++) {
int planew =
- ff_get_plane_bytewidth(link->format, link->cur_buf->w, i);
+ ff_get_plane_bytewidth(link->format, link->cur_buf->dim1.w, i);
if(!src[i]) continue;
@@ -331,6 +346,46 @@ void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
draw_slice(link, y, h, slice_dir);
}
+void avfilter_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
+{
+ void (*filter_samples)(AVFilterLink *, AVFilterBufferRef *);
+ AVFilterPad *dst = &link_dpad(link);
+
+ if (!(filter_samples = dst->filter_samples))
+ filter_samples = avfilter_default_filter_samples;
+
+ /* prepare to copy the samples if the buffer has insufficient permissions */
+ if ((dst->min_perms & samplesref->perms) != dst->min_perms ||
+ dst->rej_perms & samplesref->perms) {
+ unsigned int i, num_channels, copy_size;
+
+ av_log(link->dst, AV_LOG_INFO,
+ "Copying audio data in avfilter (have perms %x, need %x, reject %x)\n",
+ samplesref->perms, link_dpad(link).min_perms, link_dpad(link).rej_perms);
+
+ link->cur_buf = avfilter_default_get_samples_ref(link, dst->min_perms,
+ samplesref->dim2.size, samplesref->desc.channel_layout,
+ samplesref->format, samplesref->packing.planar);
+ link->cur_buf->pts = samplesref->pts;
+ link->cur_buf->prop.sample_rate = samplesref->prop.sample_rate;
+
+ /* Copy actual data into new samples buffer */
+
+ /* FIXME: Need to use hamming weight count function instead once libavutil has the required function */
+ num_channels = avcodec_channel_layout_num_channels(samplesref->desc.channel_layout);
+ copy_size = samplesref->dim2.size/num_channels;
+
+ for (i = 0; i < num_channels; i++)
+ memcpy(link->cur_buf->data[i], samplesref->data[i], copy_size);
+
+ avfilter_unref_buffer(samplesref);
+ }
+ else
+ link->cur_buf = samplesref;
+
+ filter_samples(link, link->cur_buf);
+}
+
#define MAX_REGISTERED_AVFILTERS_NB 64
static AVFilter *registered_avfilters[MAX_REGISTERED_AVFILTERS_NB + 1];
diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h
index 96fca21..c433c6c 100644
--- a/libavfilter/avfilter.h
+++ b/libavfilter/avfilter.h
@@ -99,21 +99,38 @@ typedef struct AVFilterBuffer
typedef struct AVFilterBufferRef
{
AVFilterBuffer *buf; ///< the buffer that this is a reference to
- uint8_t *data[4]; ///< picture data for each plane
- int linesize[4]; ///< number of bytes per line
- int w; ///< image width
- int h; ///< image height
+ uint8_t *data[8]; ///< picture/audio data for each plane/channel
+ int linesize[8]; ///< number of bytes per line
int format; ///< media format
+ int perms; ///< permissions, see the AV_PERM_* flags
int64_t pts; ///< presentation timestamp in units of 1/AV_TIME_BASE
int64_t pos; ///< byte position in stream, -1 if unknown
- AVRational pixel_aspect; ///< pixel aspect ratio
-
- int perms; ///< permissions, see the AV_PERM_* flags
+ union {
+ AVRational pixel_aspect; ///< pixel aspect ratio of video buffer
+ int64_t channel_layout; ///< channel layout of audio buffer
+ } desc;
+
+ union {
+ int w; ///< image width
+ int samples_nb; ///< number of audio samples
+ } dim1;
+ union {
+ int h; ///< image height
+ int size; ///< audio buffer size
+ } dim2;
+
+ union {
+ int interlaced; ///< is video frame interlaced
+ uint32_t sample_rate; ///< audio buffer sample rate
+ } prop;
+
+ union {
+ int top_field_first; ///< video field order
+ int planar; ///< audio buffer - planar or packed
+ } packing;
- int interlaced; ///< is frame interlaced
- int top_field_first;
} AVFilterBufferRef;
/**
@@ -122,11 +139,11 @@ typedef struct AVFilterBufferRef
*/
static inline void avfilter_copy_bufref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src)
{
- dst->pts = src->pts;
- dst->pos = src->pos;
- dst->pixel_aspect = src->pixel_aspect;
- dst->interlaced = src->interlaced;
- dst->top_field_first = src->top_field_first;
+ dst->pts = src->pts;
+ dst->pos = src->pos;
+ dst->desc.pixel_aspect = src->desc.pixel_aspect;
+ dst->prop.interlaced = src->prop.interlaced;
+ dst->packing.top_field_first = src->packing.top_field_first;
}
/**
@@ -332,6 +349,16 @@ struct AVFilterPad
AVFilterBufferRef *(*get_video_buffer)(AVFilterLink *link, int perms, int w, int h);
/**
+ * Callback function to get an audio buffer. If NULL, the filter system will
+ * use avfilter_default_get_samples_ref().
+ *
+ * Input audio pads only.
+ */
+ AVFilterBufferRef *(*get_samples_ref)(AVFilterLink *link, int perms,
+ int size, int64_t channel_layout,
+ enum SampleFormat sample_fmt, int planar);
+
+ /**
* Callback called after the slices of a frame are completely sent. If
* NULL, the filter layer will default to releasing the reference stored
* in the link structure during start_frame().
@@ -349,13 +376,21 @@ struct AVFilterPad
void (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir);
/**
+ * Samples filtering callback. This is where a filter receives audio data
+ * and should do its processing.
+ *
+ * Input audio pads only.
+ */
+ void (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref);
+
+ /**
* Frame poll callback. This returns the number of immediately available
* frames. It should return a positive value if the next request_frame()
* is guaranteed to return one frame (with no delay).
*
* Defaults to just calling the source poll_frame() method.
*
- * Output video pads only.
+ * Output pads only.
*/
int (*poll_frame)(AVFilterLink *link);
@@ -364,7 +399,7 @@ struct AVFilterPad
* frame being output over the given link. This should return zero on
* success, and another value on error.
*
- * Output video pads only.
+ * Output pads only.
*/
int (*request_frame)(AVFilterLink *link);
@@ -391,13 +426,19 @@ void avfilter_default_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
void avfilter_default_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
/** default handler for end_frame() for video inputs */
void avfilter_default_end_frame(AVFilterLink *link);
-/** default handler for config_props() for video outputs */
+/** default handler for filter_samples() for audio inputs */
+void avfilter_default_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
+/** default handler for config_props() for audio/video outputs */
int avfilter_default_config_output_link(AVFilterLink *link);
-/** default handler for config_props() for video inputs */
+/** default handler for config_props() for audio/video inputs */
int avfilter_default_config_input_link (AVFilterLink *link);
/** default handler for get_video_buffer() for video inputs */
AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link,
int perms, int w, int h);
+/** default handler for get_samples_ref() for audio inputs */
+AVFilterBufferRef *avfilter_default_get_samples_ref(AVFilterLink *link, int perms,
+ int size, int64_t channel_layout,
+ enum SampleFormat sample_fmt, int planar);
/**
* A helper for query_formats() which sets all links to the same list of
* formats. If there are no links hooked to this filter, the list of formats is
@@ -413,6 +454,9 @@ void avfilter_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
/** draw_slice() handler for filters which simply pass video along */
void avfilter_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
+/** filter_samples() handler for filters which simply pass audio along */
+void avfilter_null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
+
/** end_frame() handler for filters which simply pass video along */
void avfilter_null_end_frame(AVFilterLink *link);
@@ -420,6 +464,11 @@ void avfilter_null_end_frame(AVFilterLink *link);
AVFilterBufferRef *avfilter_null_get_video_buffer(AVFilterLink *link,
int perms, int w, int h);
+/** get_samples_ref() handler for filters which simply pass audio along */
+AVFilterBufferRef *avfilter_null_get_samples_ref(AVFilterLink *link, int perms,
+ int size, int64_t channel_layout,
+ enum SampleFormat sample_fmt, int planar);
+
/**
* Filter definition. This defines the pads a filter contains, and all the
* callback functions used to interact with the filter.
@@ -509,8 +558,14 @@ struct AVFilterLink
enum AVMediaType type; ///< filter media type
+ /* These two parameters apply only to video */
int w; ///< agreed upon image width
int h; ///< agreed upon image height
+ /* These three parameters apply only to audio */
+ int samples_nb; ///< number of samples in this buffer
+ int64_t channel_layout; ///< channel layout of current buffer (see avcodec.h)
+ int64_t sample_rate; ///< samples per second
+
int format; ///< agreed upon media format
/**
@@ -566,6 +621,22 @@ AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms,
int w, int h);
/**
+ * Request an audio samples buffer with a specific set of permissions.
+ *
+ * @param link the output link to the filter from which the buffer will
+ * be requested
+ * @param perms the required access permissions
+ * @param samples_nb the number of samples in the buffer to allocate
+ * @param channel_layout the number and type of channels per sample in the buffer to allocate
+ * @param sample_fmt the format of each sample in the buffer to allocate
+ * @return A reference to the samples. This must be unreferenced with
+ * avfilter_unref_samples when you are finished with it.
+ */
+AVFilterBufferRef *avfilter_get_samples_ref(AVFilterLink *link, int perms,
+ int size, int64_t channel_layout,
+ enum SampleFormat sample_fmt, int planar);
+
+/**
* Request an input frame from the filter at the other end of the link.
* @param link the input link
* @return zero on success
@@ -613,6 +684,14 @@ void avfilter_end_frame(AVFilterLink *link);
*/
void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
+/**
+ * Send a buffer of audio samples to the next filter.
+ *
+ * @param link the output link over which the audio samples are being sent
+ * @param planar samples are packed if 0 or planar if 1
+ */
+void avfilter_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
+
/** Initialize the filter system. Register all builtin filters. */
void avfilter_register_all(void);
diff --git a/libavfilter/defaults.c b/libavfilter/defaults.c
index d607c31..b39937e 100644
--- a/libavfilter/defaults.c
+++ b/libavfilter/defaults.c
@@ -20,6 +20,7 @@
*/
#include "libavcore/imgutils.h"
+#include "libavcodec/audioconvert.h"
#include "avfilter.h"
/* TODO: buffer pool. see comment for avfilter_default_get_video_buffer() */
@@ -40,8 +41,8 @@ AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int per
char *buf;
ref->buf = pic;
- ref->w = w;
- ref->h = h;
+ ref->dim1.w = w;
+ ref->dim2.h = h;
/* make sure the buffer gets read permission or it's useless for output */
ref->perms = perms | AV_PERM_READ;
@@ -49,15 +50,15 @@ AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int per
pic->refcount = 1;
ref->format = link->format;
pic->free = avfilter_default_free_buffer;
- av_fill_image_linesizes(pic->linesize, ref->format, ref->w);
+ av_fill_image_linesizes(pic->linesize, ref->format, ref->dim1.w);
for (i=0; i<4;i++)
pic->linesize[i] = FFALIGN(pic->linesize[i], 16);
- tempsize = av_fill_image_pointers(pic->data, ref->format, ref->h, NULL, pic->linesize);
+ tempsize = av_fill_image_pointers(pic->data, ref->format, ref->dim2.h, NULL, pic->linesize);
buf = av_malloc(tempsize + 16); // +2 is needed for swscaler, +16 to be
// SIMD-friendly
- av_fill_image_pointers(pic->data, ref->format, ref->h, buf, pic->linesize);
+ av_fill_image_pointers(pic->data, ref->format, ref->dim2.h, buf, pic->linesize);
memcpy(ref->data, pic->data, sizeof(pic->data));
memcpy(ref->linesize, pic->linesize, sizeof(pic->linesize));
@@ -65,6 +66,65 @@ AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int per
return ref;
}
+AVFilterBufferRef *avfilter_default_get_samples_ref(AVFilterLink *link, int perms,
+ int size, int64_t channel_layout,
+ enum SampleFormat sample_fmt, int planar)
+{
+ AVFilterBuffer *buffer = av_mallocz(sizeof(AVFilterBuffer));
+ AVFilterBufferRef *ref = av_mallocz(sizeof(AVFilterBufferRef));
+ int i, sample_size, num_chans, bufsize, per_channel_size, step_size = 0;
+ char *buf;
+
+ ref->buf = buffer;
+ ref->desc.channel_layout = channel_layout;
+ ref->format = sample_fmt;
+ ref->dim2.size = size;
+ ref->packing.planar = planar;
+
+ /* make sure the buffer gets read permission or it's useless for output */
+ ref->perms = perms | AV_PERM_READ;
+
+ buffer->refcount = 1;
+ buffer->free = avfilter_default_free_buffer;
+
+ sample_size = av_get_bits_per_sample_format(sample_fmt) >>3;
+ num_chans = avcodec_channel_layout_num_channels(channel_layout);
+
+ per_channel_size = size/num_chans;
+ ref->dim1.samples_nb = per_channel_size/sample_size;
+
+ /* Set the number of bytes to traverse to reach next sample of a particular channel:
+ * For planar, this is simply the sample size.
+ * For packed, this is the number of samples * sample_size.
+ */
+ for (i = 0; i < num_chans; i++)
+ buffer->linesize[i] = (planar > 0)?(per_channel_size):sample_size;
+ memset(&buffer->linesize[num_chans], 0, (8-num_chans)*sizeof(buffer->linesize[0]));
+
+ /* Calculate total buffer size, round to multiple of 16 to be SIMD friendly */
+ bufsize = (size + 15)&~15;
+ buf = av_malloc(bufsize);
+
+ /* For planar, set the start point of each channel's data within the buffer
+ * For packed, set the start point of the entire buffer only
+ */
+ buffer->data[0] = buf;
+ if (planar > 0) {
+ for (i = 1; i < num_chans; i++) {
+ step_size += per_channel_size;
+ buffer->data[i] = buf + step_size;
+ }
+ } else
+ memset(&buffer->data[1], (long)buf, (num_chans-1)*sizeof(buffer->data[0]));
+
+ memset(&buffer->data[num_chans], 0, (8-num_chans)*sizeof(buffer->data[0]));
+
+ memcpy(ref->data, buffer->data, sizeof(buffer->data));
+ memcpy(ref->linesize, buffer->linesize, sizeof(buffer->linesize));
+
+ return ref;
+}
+
void avfilter_default_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
AVFilterLink *out = NULL;
@@ -109,6 +169,28 @@ void avfilter_default_end_frame(AVFilterLink *link)
}
}
+/* FIXME: samplesref is same as link->cur_buf. Need to consider removing the redundant parameter. */
+void avfilter_default_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
+{
+ AVFilterLink *out = NULL;
+
+ if (link->dst->output_count)
+ out = link->dst->outputs[0];
+
+ if (out) {
+ out->out_buf = avfilter_default_get_samples_ref(link, AV_PERM_WRITE, samplesref->dim2.size,
+ samplesref->desc.channel_layout,
+ samplesref->format, samplesref->packing.planar);
+ out->out_buf->pts = samplesref->pts;
+ out->out_buf->prop.sample_rate = samplesref->prop.sample_rate;
+ avfilter_filter_samples(out, avfilter_ref_buffer(out->out_buf, ~0));
+ avfilter_unref_buffer(out->out_buf);
+ out->out_buf = NULL;
+ }
+ avfilter_unref_buffer(samplesref);
+ link->cur_buf = NULL;
+}
+
/**
* default config_link() implementation for output video links to simplify
* the implementation of one input one output video filters */
@@ -183,8 +265,21 @@ void avfilter_null_end_frame(AVFilterLink *link)
avfilter_end_frame(link->dst->outputs[0]);
}
+void avfilter_null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
+{
+ avfilter_filter_samples(link->dst->outputs[0], samplesref);
+}
+
AVFilterBufferRef *avfilter_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{
return avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h);
}
+AVFilterBufferRef *avfilter_null_get_samples_ref(AVFilterLink *link, int perms,
+ int size, int64_t channel_layout,
+ enum SampleFormat sample_fmt, int packed)
+{
+ return avfilter_get_samples_ref(link->dst->outputs[0], perms, size,
+ channel_layout, sample_fmt, packed);
+}
+
diff --git a/libavfilter/formats.c b/libavfilter/formats.c
index f60a4e6..d5dff0e 100644
--- a/libavfilter/formats.c
+++ b/libavfilter/formats.c
@@ -107,7 +107,8 @@ AVFilterFormats *avfilter_all_formats(enum AVMediaType type)
{
AVFilterFormats *ret = NULL;
int fmt;
- int num_formats = type == AVMEDIA_TYPE_VIDEO ? PIX_FMT_NB : 0;
+ int num_formats = type == AVMEDIA_TYPE_VIDEO ? PIX_FMT_NB :
+ type == AVMEDIA_TYPE_AUDIO ? SAMPLE_FMT_NB : 0;
for (fmt = 0; fmt < num_formats; fmt++)
if ((type != AVMEDIA_TYPE_VIDEO) ||
diff --git a/libavfilter/vf_aspect.c b/libavfilter/vf_aspect.c
index bd18649..26883db 100644
--- a/libavfilter/vf_aspect.c
+++ b/libavfilter/vf_aspect.c
@@ -60,7 +60,7 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
AspectContext *aspect = link->dst->priv;
- picref->pixel_aspect = aspect->aspect;
+ picref->desc.pixel_aspect = aspect->aspect;
avfilter_start_frame(link->dst->outputs[0], picref);
}
diff --git a/libavfilter/vf_crop.c b/libavfilter/vf_crop.c
index ddaf733..5f83e30 100644
--- a/libavfilter/vf_crop.c
+++ b/libavfilter/vf_crop.c
@@ -163,8 +163,8 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
AVFilterBufferRef *ref2 = avfilter_ref_buffer(picref, ~0);
int i;
- ref2->w = crop->w;
- ref2->h = crop->h;
+ ref2->dim1.w = crop->w;
+ ref2->dim2.h = crop->h;
ref2->data[0] += crop->y * ref2->linesize[0];
ref2->data[0] += (crop->x * crop->bpp) >> 3;
diff --git a/libavfilter/vf_drawbox.c b/libavfilter/vf_drawbox.c
index 717da63..769c927 100644
--- a/libavfilter/vf_drawbox.c
+++ b/libavfilter/vf_drawbox.c
@@ -100,14 +100,14 @@ static void draw_box(AVFilterBufferRef *pic, BoxContext* context, box_color colo
int xb = context->x;
int yb = context->y;
- for (y = yb; (y < yb + context->h) && y < pic->h; y++) {
+ for (y = yb; (y < yb + context->h) && y < pic->dim2.h; y++) {
row[0] = pic->data[0] + y * pic->linesize[0];
for (channel = 1; channel < 3; channel++)
row[channel] = pic->data[channel] +
pic->linesize[channel] * (y>> context->vsub);
- for (x = xb; (x < xb + context->w) && x < pic->w; x++)
+ for (x = xb; (x < xb + context->w) && x < pic->dim1.w; x++)
if((y - yb < 3) || (yb + context->h - y < 4) ||
(x - xb < 3) || (xb + context->w - x < 4)) {
row[0][x] = color.y;
@@ -125,7 +125,7 @@ static void end_frame(AVFilterLink *link)
draw_box(pic,context,context->color);
- avfilter_draw_slice(output, 0, pic->h, 1);
+ avfilter_draw_slice(output, 0, pic->dim2.h, 1);
avfilter_end_frame(output);
}
diff --git a/libavfilter/vf_fifo.c b/libavfilter/vf_fifo.c
index d5830b0..bf4561b 100644
--- a/libavfilter/vf_fifo.c
+++ b/libavfilter/vf_fifo.c
@@ -84,7 +84,7 @@ static int request_frame(AVFilterLink *link)
/* by doing this, we give ownership of the reference to the next filter,
* so we don't have to worry about dereferencing it ourselves. */
avfilter_start_frame(link, buf->root.next->pic);
- avfilter_draw_slice(link, 0, buf->root.next->pic->h, 1);
+ avfilter_draw_slice(link, 0, buf->root.next->pic->dim2.h, 1);
avfilter_end_frame(link);
if(buf->last == buf->root.next)
diff --git a/libavfilter/vf_fps.c b/libavfilter/vf_fps.c
index bafd13c..7161834 100644
--- a/libavfilter/vf_fps.c
+++ b/libavfilter/vf_fps.c
@@ -96,7 +96,7 @@ static int request_frame(AVFilterLink *link)
fps->has_frame=0;
avfilter_start_frame(link, avfilter_ref_buffer(fps->pic, ~AV_PERM_WRITE));
- avfilter_draw_slice (link, 0, fps->pic->h, 1);
+ avfilter_draw_slice (link, 0, fps->pic->dim2.h, 1);
avfilter_end_frame (link);
avfilter_unref_buffer(fps->pic);
diff --git a/libavfilter/vf_hflip.c b/libavfilter/vf_hflip.c
index 8810199..3c004f9 100644
--- a/libavfilter/vf_hflip.c
+++ b/libavfilter/vf_hflip.c
@@ -60,7 +60,7 @@ static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
/* luma plane */
outrow = out->data[0] + y * out->linesize[0];
- inrow = in-> data[0] + y * in-> linesize[0] + in->w -1;
+ inrow = in-> data[0] + y * in-> linesize[0] + in->dim1.w -1;
for(i = 0; i < h; i++) {
for(j = 0; j < link->w; j++)
outrow[j] = inrow[-j];
diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c
index 0a6160c..27ea94d 100644
--- a/libavfilter/vf_overlay.c
+++ b/libavfilter/vf_overlay.c
@@ -340,14 +340,14 @@ static int request_frame(AVFilterLink *link)
/* we draw the output frame */
pic = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
if(over->pics[0][0]) {
- pic->pixel_aspect = over->pics[0][0]->pixel_aspect;
+ pic->desc.pixel_aspect = over->pics[0][0]->desc.pixel_aspect;
copy_image(pic, 0, 0, over->pics[0][0], link->w, link->h,
over->bpp, over->hsub, over->vsub);
}
x = FFMIN(over->x, link->w-1);
y = FFMIN(over->y, link->h-1);
- w = FFMIN(link->w-x, over->pics[1][0]->w);
- h = FFMIN(link->h-y, over->pics[1][0]->h);
+ w = FFMIN(link->w-x, over->pics[1][0]->dim1.w);
+ h = FFMIN(link->h-y, over->pics[1][0]->dim2.h);
if(over->pics[1][0])
copy_image(pic, x, y, over->pics[1][0], w, h,
over->bpp, over->hsub, over->vsub);
@@ -357,7 +357,7 @@ static int request_frame(AVFilterLink *link)
/* and send it to the next filter */
avfilter_start_frame(link, avfilter_ref_buffer(pic, ~0));
- avfilter_draw_slice (link, 0, pic->h, 1);
+ avfilter_draw_slice (link, 0, pic->dim2.h, 1);
avfilter_end_frame (link);
avfilter_unref_buffer(pic);
diff --git a/libavfilter/vf_pad.c b/libavfilter/vf_pad.c
index 24ff034..2e00ca5 100644
--- a/libavfilter/vf_pad.c
+++ b/libavfilter/vf_pad.c
@@ -427,7 +427,7 @@ static int color_request_frame(AVFilterLink *link)
{
ColorContext *color = link->src->priv;
AVFilterBufferRef *picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h);
- picref->pixel_aspect = (AVRational) {1, 1};
+ picref->desc.pixel_aspect = (AVRational) {1, 1};
picref->pts = av_rescale_q(color->pts++, color->time_base, AV_TIME_BASE_Q);
picref->pos = 0;
diff --git a/libavfilter/vf_pixdesctest.c b/libavfilter/vf_pixdesctest.c
index 77139c5..ea00b2f 100644
--- a/libavfilter/vf_pixdesctest.c
+++ b/libavfilter/vf_pixdesctest.c
@@ -56,7 +56,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
int i;
outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
- outlink->w, outlink->h);
+ outlink->w, outlink->h);
outpicref = outlink->out_buf;
avfilter_copy_bufref_props(outpicref, picref);
diff --git a/libavfilter/vf_rotate.c b/libavfilter/vf_rotate.c
index 24990f1..f04253b 100644
--- a/libavfilter/vf_rotate.c
+++ b/libavfilter/vf_rotate.c
@@ -125,7 +125,7 @@ static void end_frame(AVFilterLink *link)
int column = (i - rot->transy)*rot->cosx -
(j - rot->transx)*rot->sinx + 0.5;
- if (line < 0 || line >= in->w || column < 0 || column >= in->h)
+ if (line < 0 || line >= in->dim1.w || column < 0 || column >= in->dim2.h)
*(out->data[0] + i*out->linesize[0] + j) = rot->backcolor[0];
else
*(out->data[0] + i*out->linesize[0] + j) =
@@ -145,7 +145,7 @@ static void end_frame(AVFilterLink *link)
int column = (i2 - rot->transy)*rot->cosx -
(j2 - rot->transx)*rot->sinx + 0.5;
- if (line < 0 || line >= in->w || column < 0 || column >= in->h) {
+ if (line < 0 || line >= in->dim1.w || column < 0 || column >= in->dim2.h) {
*(out->data[plane] + i*out->linesize[plane] + j) =
rot->backcolor[plane];
} else {
@@ -171,11 +171,11 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
out->out_buf->pts = picref->pts;
out->out_buf->pos = picref->pos;
- if(picref->pixel_aspect.num == 0) {
- out->out_buf->pixel_aspect = picref->pixel_aspect;
+ if(picref->desc.pixel_aspect.num == 0) {
+ out->out_buf->desc.pixel_aspect = picref->desc.pixel_aspect;
} else {
- out->out_buf->pixel_aspect.num = picref->pixel_aspect.den;
- out->out_buf->pixel_aspect.den = picref->pixel_aspect.num;
+ out->out_buf->desc.pixel_aspect.num = picref->desc.pixel_aspect.den;
+ out->out_buf->desc.pixel_aspect.den = picref->desc.pixel_aspect.num;
}
avfilter_start_frame(out, avfilter_ref_buffer(out->out_buf, ~0));
diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c
index 0486841..37dd7fb 100644
--- a/libavfilter/vf_scale.c
+++ b/libavfilter/vf_scale.c
@@ -156,9 +156,9 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
outlink->out_buf = outpicref;
- av_reduce(&outpicref->pixel_aspect.num, &outpicref->pixel_aspect.den,
- (int64_t)picref->pixel_aspect.num * outlink->h * link->w,
- (int64_t)picref->pixel_aspect.den * outlink->w * link->h,
+ av_reduce(&outpicref->desc.pixel_aspect.num, &outpicref->desc.pixel_aspect.den,
+ (int64_t)picref->desc.pixel_aspect.num * outlink->h * link->w,
+ (int64_t)picref->desc.pixel_aspect.den * outlink->w * link->h,
INT_MAX);
scale->slice_y = 0;
diff --git a/libavfilter/vf_transpose.c b/libavfilter/vf_transpose.c
index 15046f8..f1679e1 100644
--- a/libavfilter/vf_transpose.c
+++ b/libavfilter/vf_transpose.c
@@ -74,22 +74,22 @@ static void end_frame(AVFilterLink *link)
int i, j, plane;
/* luma plane */
- for(i = 0; i < pic->h; i ++)
- for(j = 0; j < pic->w; j ++)
+ for(i = 0; i < pic->dim2.h; i ++)
+ for(j = 0; j < pic->dim1.w; j ++)
*(out->data[0] + j *out->linesize[0] + i) =
*(in->data[0]+ i * in->linesize[0] + j);
/* chroma planes */
for(plane = 1; plane < 3; plane ++) {
- for(i = 0; i < pic->h >> trans->vsub; i++) {
- for(j = 0; j < pic->w >> trans->hsub; j++)
+ for(i = 0; i < pic->dim2.h >> trans->vsub; i++) {
+ for(j = 0; j < pic->dim1.w >> trans->hsub; j++)
*(out->data[plane] + j *out->linesize[plane] + i) =
*(in->data[plane]+ i * in->linesize[plane] + j);
}
}
avfilter_unref_buffer(in);
- avfilter_draw_slice(output, 0, out->h, 1);
+ avfilter_draw_slice(output, 0, out->dim2.h, 1);
avfilter_end_frame(output);
avfilter_unref_buffer(out);
}
@@ -101,11 +101,11 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
out->out_buf = avfilter_get_video_buffer(out, AV_PERM_WRITE, out->w, out->h);
out->out_buf->pts = picref->pts;
- if(picref->pixel_aspect.num == 0) {
- out->out_buf->pixel_aspect = picref->pixel_aspect;
+ if(picref->desc.pixel_aspect.num == 0) {
+ out->out_buf->desc.pixel_aspect = picref->desc.pixel_aspect;
} else {
- out->out_buf->pixel_aspect.num = picref->pixel_aspect.den;
- out->out_buf->pixel_aspect.den = picref->pixel_aspect.num;
+ out->out_buf->desc.pixel_aspect.num = picref->desc.pixel_aspect.den;
+ out->out_buf->desc.pixel_aspect.den = picref->desc.pixel_aspect.num;
}
avfilter_start_frame(out, avfilter_ref_buffer(out->out_buf, ~0));
diff --git a/libavfilter/vsrc_buffer.c b/libavfilter/vsrc_buffer.c
index 2bec911..83f817a 100644
--- a/libavfilter/vsrc_buffer.c
+++ b/libavfilter/vsrc_buffer.c
@@ -122,10 +122,10 @@ static int request_frame(AVFilterLink *link)
av_picture_copy((AVPicture *)&picref->data, (AVPicture *)&c->frame,
picref->format, link->w, link->h);
- picref->pts = c->pts;
- picref->pixel_aspect = c->pixel_aspect;
- picref->interlaced = c->frame.interlaced_frame;
- picref->top_field_first = c->frame.top_field_first;
+ picref->pts = c->pts;
+ picref->desc.pixel_aspect = c->pixel_aspect;
+ picref->prop.interlaced = c->frame.interlaced_frame;
+ picref->packing.top_field_first = c->frame.top_field_first;
avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0));
avfilter_draw_slice(link, 0, link->h, 1);
avfilter_end_frame(link);
_______________________________________________
FFmpeg-soc mailing list
[email protected]
https://lists.mplayerhq.hu/mailman/listinfo/ffmpeg-soc