On Wed Nov 19 15:25:49 2025 +0900, Jackson Lee wrote:
> The current decoding method  was to wait until each frame was
> decoded after feeding a bitstream. As a result, performance was low
> and Wave5 could not achieve max pixel processing rate.
> 
> Update driver to use an asynchronous approach for decoding and feeding a
> bitstream in order to achieve full capabilities of the device.
> 
> WAVE5 supports command-queueing to maximize performance by pipelining
> internal commands and by hiding wait cycle taken to receive a command
> from Host processor.
> 
> Instead of waiting for each command to be executed before sending the
> next command, Host processor just places all the commands in the
> command-queue and goes on doing other things while the commands in the
> queue are processed by VPU.
> 
> While Host processor handles its own tasks, it can receive VPU interrupt
> request (IRQ).
> In this case, host processor can simply exit interrupt service routine
> (ISR) without accessing to host interface to read the result of the
> command reported by VPU.
> After host processor completed its tasks, host processor can read the
> command result when host processor needs the reports and does
> response processing.
> 
> To achieve this goal, the device_run() calls v4l2_m2m_job_finish
> so that next command can be sent to VPU continuously, if there is
> any result, then irq is triggered and gets decoded frames and returns
> them to upper layer.
> Theses processes work independently each other without waiting
> a decoded frame.
> 
> Signed-off-by: Jackson Lee <[email protected]>
> Signed-off-by: Nas Chung <[email protected]>
> Tested-by: Brandon Brnich <[email protected]>
> Signed-off-by: Nicolas Dufresne <[email protected]>
> Signed-off-by: Hans Verkuil <[email protected]>

Patch committed.

Thanks,
Hans Verkuil

 .../media/platform/chips-media/wave5/wave5-hw.c    |   2 +-
 .../platform/chips-media/wave5/wave5-vpu-dec.c     | 180 +++++++++++++++------
 .../media/platform/chips-media/wave5/wave5-vpu.h   |   2 +-
 .../platform/chips-media/wave5/wave5-vpuapi.c      |  53 ++++--
 .../platform/chips-media/wave5/wave5-vpuapi.h      |   6 +
 .../platform/chips-media/wave5/wave5-vpuconfig.h   |   1 +
 6 files changed, 178 insertions(+), 66 deletions(-)

---

diff --git a/drivers/media/platform/chips-media/wave5/wave5-hw.c 
b/drivers/media/platform/chips-media/wave5/wave5-hw.c
index d94cf84c3ee5..687ce6ccf3ae 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-hw.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-hw.c
@@ -102,7 +102,7 @@ static void _wave5_print_reg_err(struct vpu_device 
*vpu_dev, u32 reg_fail_reason
                dev_dbg(dev, "%s: queueing failure: 0x%x\n", func, reg_val);
                break;
        case WAVE5_SYSERR_RESULT_NOT_READY:
-               dev_err(dev, "%s: result not ready: 0x%x\n", func, 
reg_fail_reason);
+               dev_dbg(dev, "%s: result not ready: 0x%x\n", func, 
reg_fail_reason);
                break;
        case WAVE5_SYSERR_ACCESS_VIOLATION_HW:
                dev_err(dev, "%s: access violation: 0x%x\n", func, 
reg_fail_reason);
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c 
b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
index b10396fa2379..e75770912e21 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
@@ -136,6 +136,18 @@ valid_state_switch:
        return 0;
 }
 
+static int set_instance_state(struct vpu_instance *inst, enum 
vpu_instance_state state)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&inst->state_spinlock, flags);
+       ret = switch_state(inst, state);
+       spin_unlock_irqrestore(&inst->state_spinlock, flags);
+
+       return ret;
+}
+
 static int wave5_vpu_dec_set_eos_on_firmware(struct vpu_instance *inst)
 {
        int ret;
@@ -227,7 +239,7 @@ static int start_decode(struct vpu_instance *inst, u32 
*fail_res)
                src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
                if (src_buf)
                        v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
-               switch_state(inst, VPU_INST_STATE_STOP);
+               set_instance_state(inst, VPU_INST_STATE_STOP);
 
                dev_dbg(inst->dev->dev, "%s: pic run failed / finish job", 
__func__);
                v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
@@ -268,6 +280,7 @@ static void send_eos_event(struct vpu_instance *inst)
 
        v4l2_event_queue_fh(&inst->v4l2_fh, &vpu_event_eos);
        inst->eos = false;
+       inst->sent_eos = true;
 }
 
 static int handle_dynamic_resolution_change(struct vpu_instance *inst)
@@ -347,13 +360,12 @@ static void wave5_vpu_dec_finish_decode(struct 
vpu_instance *inst)
        struct vb2_v4l2_buffer *dec_buf = NULL;
        struct vb2_v4l2_buffer *disp_buf = NULL;
        struct vb2_queue *dst_vq = v4l2_m2m_get_dst_vq(m2m_ctx);
-       struct queue_status_info q_status;
 
        dev_dbg(inst->dev->dev, "%s: Fetch output info from firmware.", 
__func__);
 
        ret = wave5_vpu_dec_get_output_info(inst, &dec_info);
        if (ret) {
-               dev_warn(inst->dev->dev, "%s: could not get output info.", 
__func__);
+               dev_dbg(inst->dev->dev, "%s: could not get output info.", 
__func__);
                v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
                return;
        }
@@ -442,18 +454,17 @@ static void wave5_vpu_dec_finish_decode(struct 
vpu_instance *inst)
                spin_unlock_irqrestore(&inst->state_spinlock, flags);
        }
 
-       /*
-        * During a resolution change and while draining, the firmware may flush
-        * the reorder queue regardless of having a matching decoding operation
-        * pending. Only terminate the job if there are no more IRQ coming.
-        */
-       wave5_vpu_dec_give_command(inst, DEC_GET_QUEUE_STATUS, &q_status);
-       if (q_status.report_queue_count == 0 &&
-           (q_status.instance_queue_count == 0 || dec_info.sequence_changed)) {
-               dev_dbg(inst->dev->dev, "%s: finishing job.\n", __func__);
-               pm_runtime_put_autosuspend(inst->dev->dev);
-               v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
+       if (inst->sent_eos &&
+           v4l2_m2m_get_curr_priv(inst->v4l2_m2m_dev)) {
+               struct queue_status_info q_status;
+
+               wave5_vpu_dec_give_command(inst, DEC_GET_QUEUE_STATUS, 
&q_status);
+               if (q_status.report_queue_count == 0 &&
+                   q_status.instance_queue_count == 0)
+                       v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
        }
+
+       inst->queuing_fail = false;
 }
 
 static int wave5_vpu_dec_querycap(struct file *file, void *fh, struct 
v4l2_capability *cap)
@@ -1142,11 +1153,30 @@ static int write_to_ringbuffer(struct vpu_instance 
*inst, void *buffer, size_t b
        return 0;
 }
 
+static struct vpu_src_buffer *inst_src_buf_remove(struct vpu_instance *inst)
+{
+       struct vpu_src_buffer *b;
+       int ret;
+
+       ret = mutex_lock_interruptible(&inst->feed_lock);
+       if (ret)
+               return NULL;
+
+       if (list_empty(&inst->avail_src_bufs)) {
+               mutex_unlock(&inst->feed_lock);
+               return NULL;
+       }
+       b = list_first_entry(&inst->avail_src_bufs, struct vpu_src_buffer, 
list);
+       list_del_init(&b->list);
+       mutex_unlock(&inst->feed_lock);
+       return b;
+}
+
 static int fill_ringbuffer(struct vpu_instance *inst)
 {
        struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
-       struct v4l2_m2m_buffer *buf, *n;
-       int ret;
+       struct vpu_src_buffer *vpu_buf;
+       int ret = 0;
 
        if (m2m_ctx->last_src_buf)  {
                struct vpu_src_buffer *vpu_buf = 
wave5_to_vpu_src_buf(m2m_ctx->last_src_buf);
@@ -1157,9 +1187,8 @@ static int fill_ringbuffer(struct vpu_instance *inst)
                }
        }
 
-       v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
-               struct vb2_v4l2_buffer *vbuf = &buf->vb;
-               struct vpu_src_buffer *vpu_buf = wave5_to_vpu_src_buf(vbuf);
+       while ((vpu_buf = inst_src_buf_remove(inst)) != NULL) {
+               struct vb2_v4l2_buffer *vbuf = &vpu_buf->v4l2_m2m_buf.vb;
                struct vpu_buf *ring_buffer = &inst->bitstream_vbuf;
                size_t src_size = vb2_get_plane_payload(&vbuf->vb2_buf, 0);
                void *src_buf = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
@@ -1219,9 +1248,12 @@ static int fill_ringbuffer(struct vpu_instance *inst)
                        dev_dbg(inst->dev->dev, "last src buffer written to the 
ring buffer\n");
                        break;
                }
+
+               inst->queuing_num++;
+               break;
        }
 
-       return 0;
+       return ret;
 }
 
 static void wave5_vpu_dec_buf_queue_src(struct vb2_buffer *vb)
@@ -1230,10 +1262,16 @@ static void wave5_vpu_dec_buf_queue_src(struct 
vb2_buffer *vb)
        struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vpu_src_buffer *vpu_buf = wave5_to_vpu_src_buf(vbuf);
+       int ret;
 
        vpu_buf->consumed = false;
        vbuf->sequence = inst->queued_src_buf_num++;
-
+       ret = mutex_lock_interruptible(&inst->feed_lock);
+       if (ret)
+               return;
+       INIT_LIST_HEAD(&vpu_buf->list);
+       list_add_tail(&vpu_buf->list, &inst->avail_src_bufs);
+       mutex_unlock(&inst->feed_lock);
        v4l2_m2m_buf_queue(m2m_ctx, vbuf);
 }
 
@@ -1288,10 +1326,13 @@ static void wave5_vpu_dec_buf_queue(struct vb2_buffer 
*vb)
                __func__, vb->type, vb->index, vb2_plane_size(&vbuf->vb2_buf, 
0),
                vb2_plane_size(&vbuf->vb2_buf, 1), 
vb2_plane_size(&vbuf->vb2_buf, 2));
 
-       if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+       if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
                wave5_vpu_dec_buf_queue_src(vb);
-       else if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+               if (inst->empty_queue)
+                       inst->empty_queue = false;
+       } else if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
                wave5_vpu_dec_buf_queue_dst(vb);
+       }
 }
 
 static int wave5_vpu_dec_allocate_ring_buffer(struct vpu_instance *inst)
@@ -1385,6 +1426,12 @@ static int streamoff_output(struct vb2_queue *q)
        dma_addr_t new_rd_ptr;
        struct dec_output_info dec_info;
        unsigned int i;
+       struct vpu_src_buffer *vpu_buf;
+
+       inst->retry = false;
+       inst->queuing_num = 0;
+       while ((vpu_buf = inst_src_buf_remove(inst)) != NULL)
+               ;
 
        for (i = 0; i < v4l2_m2m_num_dst_bufs_ready(m2m_ctx); i++) {
                ret = wave5_vpu_dec_set_disp_flag(inst, i);
@@ -1470,21 +1517,21 @@ static void wave5_vpu_dec_stop_streaming(struct 
vb2_queue *q)
 {
        struct vpu_instance *inst = vb2_get_drv_priv(q);
        struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+
        bool check_cmd = TRUE;
 
        dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, q->type);
        pm_runtime_resume_and_get(inst->dev->dev);
-
+       inst->empty_queue = true;
        while (check_cmd) {
                struct queue_status_info q_status;
                struct dec_output_info dec_output_info;
 
                wave5_vpu_dec_give_command(inst, DEC_GET_QUEUE_STATUS, 
&q_status);
-
-               if (q_status.report_queue_count == 0)
-                       break;
-
-               if (wave5_vpu_wait_interrupt(inst, VPU_DEC_TIMEOUT) < 0)
+               if ((inst->state == VPU_INST_STATE_STOP ||
+                    inst->state == VPU_INST_STATE_INIT_SEQ ||
+                    q_status.instance_queue_count == 0) &&
+                       q_status.report_queue_count == 0)
                        break;
 
                if (wave5_vpu_dec_get_output_info(inst, &dec_output_info))
@@ -1498,6 +1545,8 @@ static void wave5_vpu_dec_stop_streaming(struct vb2_queue 
*q)
        else
                streamoff_capture(q);
 
+       inst->empty_queue = false;
+       inst->sent_eos = false;
        pm_runtime_put_autosuspend(inst->dev->dev);
 }
 
@@ -1579,10 +1628,18 @@ static void wave5_vpu_dec_device_run(void *priv)
 
        dev_dbg(inst->dev->dev, "%s: Fill the ring buffer with new bitstream 
data", __func__);
        pm_runtime_resume_and_get(inst->dev->dev);
-       ret = fill_ringbuffer(inst);
-       if (ret) {
-               dev_warn(inst->dev->dev, "Filling ring buffer failed\n");
-               goto finish_job_and_return;
+       if (!inst->retry) {
+               ret = fill_ringbuffer(inst);
+               if (ret < 0) {
+                       dev_warn(inst->dev->dev, "Filling ring buffer 
failed\n");
+                       goto finish_job_and_return;
+               } else if (!inst->eos &&
+                               inst->queuing_num == 0 &&
+                               inst->state == VPU_INST_STATE_PIC_RUN) {
+                       dev_dbg(inst->dev->dev, "%s: no bitstream for feeding, 
so skip ", __func__);
+                       inst->empty_queue = true;
+                       goto finish_job_and_return;
+               }
        }
 
        switch (inst->state) {
@@ -1607,7 +1664,7 @@ static void wave5_vpu_dec_device_run(void *priv)
                        }
                        spin_unlock_irqrestore(&inst->state_spinlock, flags);
                } else {
-                       switch_state(inst, VPU_INST_STATE_INIT_SEQ);
+                       set_instance_state(inst, VPU_INST_STATE_INIT_SEQ);
                }
 
                break;
@@ -1618,8 +1675,7 @@ static void wave5_vpu_dec_device_run(void *priv)
                 * we had a chance to switch, which leads to an invalid state
                 * change.
                 */
-               switch_state(inst, VPU_INST_STATE_PIC_RUN);
-
+               set_instance_state(inst, VPU_INST_STATE_PIC_RUN);
                /*
                 * During DRC, the picture decoding remains pending, so just 
leave the job
                 * active until this decode operation completes.
@@ -1633,14 +1689,12 @@ static void wave5_vpu_dec_device_run(void *priv)
                ret = wave5_prepare_fb(inst);
                if (ret) {
                        dev_warn(inst->dev->dev, "Framebuffer preparation, 
fail: %d\n", ret);
-                       switch_state(inst, VPU_INST_STATE_STOP);
+                       set_instance_state(inst, VPU_INST_STATE_STOP);
                        break;
                }
 
-               if (q_status.instance_queue_count) {
-                       dev_dbg(inst->dev->dev, "%s: leave with active job", 
__func__);
-                       return;
-               }
+               if (q_status.instance_queue_count)
+                       goto finish_job_and_return;
 
                fallthrough;
        case VPU_INST_STATE_PIC_RUN:
@@ -1649,28 +1703,45 @@ static void wave5_vpu_dec_device_run(void *priv)
                        dev_err(inst->dev->dev,
                                "Frame decoding on m2m context (%p), fail: %d 
(result: %d)\n",
                                m2m_ctx, ret, fail_res);
-                       break;
+                       goto finish_job_and_return;
+               }
+
+               if (fail_res == WAVE5_SYSERR_QUEUEING_FAIL) {
+                       inst->retry = true;
+                       inst->queuing_fail = true;
+               } else {
+                       inst->retry = false;
+                       if (!inst->eos)
+                               inst->queuing_num--;
                }
-               /* Return so that we leave this job active */
-               dev_dbg(inst->dev->dev, "%s: leave with active job", __func__);
-               return;
-       default:
-               WARN(1, "Execution of a job in state %s illegal.\n", 
state_to_str(inst->state));
                break;
+       default:
+               dev_dbg(inst->dev->dev, "Execution of a job in state %s 
illegal.\n",
+                       state_to_str(inst->state));
        }
 
 finish_job_and_return:
        dev_dbg(inst->dev->dev, "%s: leave and finish job", __func__);
        pm_runtime_put_autosuspend(inst->dev->dev);
-       v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
+       /*
+        * After receiving CMD_STOP, there is no input, but we have to run 
device_run
+        * to send DEC_PIC command until display index == -1, so job_finish was 
always
+        * called in the device_run to archive it, the logic was very wasteful
+        * in power and CPU time.
+        * If EOS is passed, device_run will not call job_finish no more, it is 
called
+        * only if HW is idle status in order to reduce overhead.
+        */
+       if (!inst->sent_eos)
+               v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
 }
 
 static void wave5_vpu_dec_job_abort(void *priv)
 {
        struct vpu_instance *inst = priv;
+       struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
        int ret;
 
-       ret = switch_state(inst, VPU_INST_STATE_STOP);
+       ret = set_instance_state(inst, VPU_INST_STATE_STOP);
        if (ret)
                return;
 
@@ -1678,6 +1749,8 @@ static void wave5_vpu_dec_job_abort(void *priv)
        if (ret)
                dev_warn(inst->dev->dev,
                         "Setting EOS for the bitstream, fail: %d\n", ret);
+
+       v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
 }
 
 static int wave5_vpu_dec_job_ready(void *priv)
@@ -1713,10 +1786,15 @@ static int wave5_vpu_dec_job_ready(void *priv)
                                "No capture buffer ready to decode!\n");
                        break;
                } else if (!wave5_is_draining_or_eos(inst) &&
-                          !v4l2_m2m_num_src_bufs_ready(m2m_ctx)) {
+                          (!v4l2_m2m_num_src_bufs_ready(m2m_ctx) ||
+                           inst->empty_queue)) {
                        dev_dbg(inst->dev->dev,
                                "No bitstream data to decode!\n");
                        break;
+               } else if (inst->state == VPU_INST_STATE_PIC_RUN &&
+                          !wave5_is_draining_or_eos(inst) &&
+                          inst->queuing_fail) {
+                       break;
                }
                ret = 1;
                break;
@@ -1753,6 +1831,8 @@ static int wave5_vpu_open_dec(struct file *filp)
        inst->ops = &wave5_vpu_dec_inst_ops;
 
        spin_lock_init(&inst->state_spinlock);
+       mutex_init(&inst->feed_lock);
+       INIT_LIST_HEAD(&inst->avail_src_bufs);
 
        inst->codec_info = kzalloc(sizeof(*inst->codec_info), GFP_KERNEL);
        if (!inst->codec_info)
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.h 
b/drivers/media/platform/chips-media/wave5/wave5-vpu.h
index 5943bdaa9c4c..99c3be882192 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu.h
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.h
@@ -22,8 +22,8 @@
 
 struct vpu_src_buffer {
        struct v4l2_m2m_buffer  v4l2_m2m_buf;
-       struct list_head        list;
        bool                    consumed;
+       struct list_head        list;
 };
 
 struct vpu_dst_buffer {
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c 
b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
index 5b10f9f49b9f..d26ffc942219 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
@@ -52,11 +52,12 @@ int wave5_vpu_init_with_bitcode(struct device *dev, u8 
*bitcode, size_t size)
 int wave5_vpu_flush_instance(struct vpu_instance *inst)
 {
        int ret = 0;
+       int mutex_ret = 0;
        int retry = 0;
 
-       ret = mutex_lock_interruptible(&inst->dev->hw_lock);
-       if (ret)
-               return ret;
+       mutex_ret = mutex_lock_interruptible(&inst->dev->hw_lock);
+       if (mutex_ret)
+               return mutex_ret;
        do {
                /*
                 * Repeat the FLUSH command until the firmware reports that the
@@ -80,11 +81,16 @@ int wave5_vpu_flush_instance(struct vpu_instance *inst)
 
                        mutex_unlock(&inst->dev->hw_lock);
                        wave5_vpu_dec_get_output_info(inst, &dec_info);
-                       ret = mutex_lock_interruptible(&inst->dev->hw_lock);
-                       if (ret)
-                               return ret;
-                       if (dec_info.index_frame_display > 0)
+                       mutex_ret = 
mutex_lock_interruptible(&inst->dev->hw_lock);
+                       if (mutex_ret)
+                               return mutex_ret;
+                       if (dec_info.index_frame_display >= 0) {
+                               mutex_unlock(&inst->dev->hw_lock);
                                wave5_vpu_dec_set_disp_flag(inst, 
dec_info.index_frame_display);
+                               mutex_ret = 
mutex_lock_interruptible(&inst->dev->hw_lock);
+                               if (mutex_ret)
+                                       return mutex_ret;
+                       }
                }
        } while (ret != 0);
        mutex_unlock(&inst->dev->hw_lock);
@@ -207,6 +213,8 @@ int wave5_vpu_dec_close(struct vpu_instance *inst, u32 
*fail_res)
        int retry = 0;
        struct vpu_device *vpu_dev = inst->dev;
        int i;
+       struct dec_output_info dec_info;
+       int ret_mutex;
 
        *fail_res = 0;
        if (!inst->codec_info)
@@ -214,10 +222,10 @@ int wave5_vpu_dec_close(struct vpu_instance *inst, u32 
*fail_res)
 
        pm_runtime_resume_and_get(inst->dev->dev);
 
-       ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
-       if (ret) {
+       ret_mutex = mutex_lock_interruptible(&vpu_dev->hw_lock);
+       if (ret_mutex) {
                pm_runtime_put_sync(inst->dev->dev);
-               return ret;
+               return ret_mutex;
        }
 
        do {
@@ -227,11 +235,26 @@ int wave5_vpu_dec_close(struct vpu_instance *inst, u32 
*fail_res)
                        goto unlock_and_return;
                }
 
-               if (*fail_res == WAVE5_SYSERR_VPU_STILL_RUNNING &&
-                   retry++ >= MAX_FIRMWARE_CALL_RETRY) {
+               if (ret == 0)
+                       break;
+
+               if (*fail_res != WAVE5_SYSERR_VPU_STILL_RUNNING) {
+                       dev_warn(inst->dev->dev, "dec_finish_seq timed out\n");
+                       goto unlock_and_return;
+               }
+
+               if (retry++ >= MAX_FIRMWARE_CALL_RETRY) {
                        ret = -ETIMEDOUT;
                        goto unlock_and_return;
                }
+
+               mutex_unlock(&vpu_dev->hw_lock);
+               wave5_vpu_dec_get_output_info(inst, &dec_info);
+               ret_mutex = mutex_lock_interruptible(&vpu_dev->hw_lock);
+               if (ret_mutex) {
+                       pm_runtime_put_sync(inst->dev->dev);
+                       return ret_mutex;
+               }
        } while (ret != 0);
 
        dev_dbg(inst->dev->dev, "%s: dec_finish_seq complete\n", __func__);
@@ -248,6 +271,8 @@ int wave5_vpu_dec_close(struct vpu_instance *inst, u32 
*fail_res)
 
        wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_task);
 
+       mutex_destroy(&inst->feed_lock);
+
 unlock_and_return:
        mutex_unlock(&vpu_dev->hw_lock);
        pm_runtime_put_sync(inst->dev->dev);
@@ -460,11 +485,11 @@ int wave5_vpu_dec_set_rd_ptr(struct vpu_instance *inst, 
dma_addr_t addr, int upd
 dma_addr_t wave5_vpu_dec_get_rd_ptr(struct vpu_instance *inst)
 {
        int ret;
-       dma_addr_t rd_ptr;
+       dma_addr_t rd_ptr = 0;
 
        ret = mutex_lock_interruptible(&inst->dev->hw_lock);
        if (ret)
-               return ret;
+               return rd_ptr;
 
        rd_ptr = wave5_dec_get_rd_ptr(inst);
 
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h 
b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h
index bc101397204d..c64135769869 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h
@@ -818,6 +818,12 @@ struct vpu_instance {
        bool cbcr_interleave;
        bool nv21;
        bool eos;
+       bool sent_eos; /* check if EOS is sent to application */
+       bool retry; /* retry to feed bitstream if failure reason is 
WAVE5_SYSERR_QUEUEING_FAIL*/
+       int queuing_num; /* count of bitstream queued */
+       struct mutex feed_lock; /* lock for feeding bitstream buffers */
+       bool queuing_fail; /* if there is the queuing failure */
+       bool empty_queue;
        struct vpu_buf bitstream_vbuf;
        dma_addr_t last_rd_ptr;
        size_t remaining_consumed_bytes;
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h 
b/drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h
index 1ea9f5f31499..4ebd48d5550e 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h
@@ -59,6 +59,7 @@
 //  application specific configuration
 #define VPU_ENC_TIMEOUT                 60000
 #define VPU_DEC_TIMEOUT                 60000
+#define VPU_DEC_STOP_TIMEOUT            10
 
 // for WAVE encoder
 #define USE_SRC_PRP_AXI         0
_______________________________________________
linuxtv-commits mailing list -- [email protected]
To unsubscribe send an email to [email protected]

Reply via email to