This new data structure allows a task to wait for N things to complete.
Usually the submitting task will handle cleanup, but if it is killed,
the last completer will take care of it.

Signed-off-by: Matthew Wilcox (Oracle) <wi...@infradead.org>
---
 block/blk-core.c    | 61 +++++++++++++++++++++++++++++++++++++++++++++
 include/linux/bio.h | 11 ++++++++
 2 files changed, 72 insertions(+)

diff --git a/block/blk-core.c b/block/blk-core.c
index 10c08ac50697..2892246f2176 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1900,6 +1900,67 @@ void blk_io_schedule(void)
 }
 EXPORT_SYMBOL_GPL(blk_io_schedule);
 
+void blk_completion_init(struct blk_completion *cmpl, int n)
+{
+       spin_lock_init(&cmpl->cmpl_lock);
+       cmpl->cmpl_count = n;
+       cmpl->cmpl_task = current;
+       cmpl->cmpl_status = BLK_STS_OK;
+}
+
+int blk_completion_sub(struct blk_completion *cmpl, blk_status_t status, int n)
+{
+       int ret = 0;
+
+       spin_lock_bh(&cmpl->cmpl_lock);
+       if (cmpl->cmpl_status == BLK_STS_OK && status != BLK_STS_OK)
+               cmpl->cmpl_status = status;
+       cmpl->cmpl_count -= n;
+       BUG_ON(cmpl->cmpl_count < 0);
+       if (cmpl->cmpl_count == 0) {
+               if (cmpl->cmpl_task)
+                       wake_up_process(cmpl->cmpl_task);
+               else
+                       ret = -EINTR;
+       }
+       spin_unlock_bh(&cmpl->cmpl_lock);
+       if (ret < 0)
+               kfree(cmpl);
+       return ret;
+}
+
+int blk_completion_wait_killable(struct blk_completion *cmpl)
+{
+       int err = 0;
+
+       for (;;) {
+               set_current_state(TASK_KILLABLE);
+               spin_lock_bh(&cmpl->cmpl_lock);
+               if (cmpl->cmpl_count == 0)
+                       break;
+               spin_unlock_bh(&cmpl->cmpl_lock);
+               blk_io_schedule();
+               if (fatal_signal_pending(current)) {
+                       spin_lock_bh(&cmpl->cmpl_lock);
+                       cmpl->cmpl_task = NULL;
+                       if (cmpl->cmpl_count != 0) {
+                               spin_unlock_bh(&cmpl->cmpl_lock);
+                               cmpl = NULL;
+                       }
+                       err = -ERESTARTSYS;
+                       break;
+               }
+       }
+       set_current_state(TASK_RUNNING);
+       if (cmpl) {
+               spin_unlock_bh(&cmpl->cmpl_lock);
+               err = blk_status_to_errno(cmpl->cmpl_status);
+               kfree(cmpl);
+       }
+
+       return err;
+}
+
 int __init blk_dev_init(void)
 {
        BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
diff --git a/include/linux/bio.h b/include/linux/bio.h
index f254bc79bb3a..0bde05f5548c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -814,4 +814,15 @@ static inline void bio_set_polled(struct bio *bio, struct 
kiocb *kiocb)
                bio->bi_opf |= REQ_NOWAIT;
 }
 
+struct blk_completion {
+       struct task_struct *cmpl_task;
+       spinlock_t cmpl_lock;
+       int cmpl_count;
+       blk_status_t cmpl_status;
+};
+
+void blk_completion_init(struct blk_completion *, int n);
+int blk_completion_sub(struct blk_completion *, blk_status_t status, int n);
+int blk_completion_wait_killable(struct blk_completion *);
+
 #endif /* __LINUX_BIO_H */
-- 
2.28.0

Reply via email to