> > > > --- a/libavcodec/vaapi_decode.c > > > +++ b/libavcodec/vaapi_decode.c > > > @@ -455,6 +455,9 @@ static const struct { > > > MAP(AV1, AV1_MAIN, AV1Profile0), > > > MAP(AV1, AV1_HIGH, AV1Profile1), > > > #endif > > > +#if VA_CHECK_VERSION(1, 22, 0) > > > + MAP(H266, VVC_MAIN_10, VVCMain10), > > > +#endif > > > > > > #undef MAP > > > }; > > > @@ -627,6 +630,10 @@ static int > > > vaapi_decode_make_config(AVCodecContext *avctx, > > > case AV_CODEC_ID_VP8: > > > frames->initial_pool_size += 3; > > > break; > > > + case AV_CODEC_ID_H266: > > > + // Add additional 16 for maximum 16 frames delay in > > > vvc native decode. > > > + frames->initial_pool_size += 32; > > > > One frame of 8k YUV444, 10 bits, is about 200MB. Thirty-two frames > > amount to approximately 6GB.Can we dynamically allocate the buffer > > pool? > > It's processing in other thread: > https://patchwork.ffmpeg.org/project/ffmpeg/list/?series=11316 > > > > > The software decoder requires a delay of 16 frames to ensure full > > utilization of CPUs. In the future, we may consider increasing this > > to 32 or even 64 frames. > > However, for hardware decoding, given that all processing occurs on > > the GPU, we do not require any delay. > > The delay can avoid sync hardware task immediately once it is > submitted, which can avoid hardware switch tasks frequently and drop > performance. If the number will increase, I'd prefer to set it as an > option and diff the default value for hardware with software.
Why does VVC require such a large frame pool while other hardware codecs do not? What makes VVC so special?" _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".