On Tue, Feb 07, 2017 at 05:02:53PM +0000, fdman...@kernel.org wrote:
> From: Filipe Manana <fdman...@suse.com>
> 
> Before we destroy all work queues (and wait for their tasks to complete)
> we were destroying the work queues used for metadata I/O operations, which
> can result in a use-after-free problem because most tasks from all work
> queues do metadata I/O operations. For example, the tasks from the caching
> workers work queue (fs_info->caching_workers), which is destroyed only
> after the work queue used for metadata reads (fs_info->endio_meta_workers)
> is destroyed, do metadata reads, which result in attempts to queue tasks
> into the later work queue, triggering a use-after-free with a trace like
> the following:
> 
> [23114.613543] general protection fault: 0000 [#1] PREEMPT SMP
> [23114.614442] Modules linked in: dm_thin_pool dm_persistent_data 
> dm_bio_prison dm_bufio libcrc32c btrfs xor raid6_pq dm_flakey dm_mod 
> crc32c_generic
> acpi_cpufreq tpm_tis tpm_tis_core tpm ppdev parport_pc parport i2c_piix4 
> processor sg evdev i2c_core psmouse pcspkr serio_raw button loop autofs4 ext4 
> crc16
> jbd2 mbcache sr_mod cdrom sd_mod ata_generic virtio_scsi ata_piix virtio_pci 
> libata virtio_ring virtio e1000 scsi_mod floppy [last unloaded: scsi_debug]
> [23114.616932] CPU: 9 PID: 4537 Comm: kworker/u32:8 Not tainted 
> 4.9.0-rc7-btrfs-next-36+ #1
> [23114.616932] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 
> rel-1.9.1-0-gb3ef39f-prebuilt.qemu-project.org 04/01/2014
> [23114.616932] Workqueue: btrfs-cache btrfs_cache_helper [btrfs]
> [23114.616932] task: ffff880221d45780 task.stack: ffffc9000bc50000
> [23114.616932] RIP: 0010:[<ffffffffa037c1bf>]  [<ffffffffa037c1bf>] 
> btrfs_queue_work+0x2c/0x190 [btrfs]
> [23114.616932] RSP: 0018:ffff88023f443d60  EFLAGS: 00010246
> [23114.616932] RAX: 0000000000000000 RBX: 6b6b6b6b6b6b6b6b RCX: 
> 0000000000000102
> [23114.616932] RDX: ffffffffa0419000 RSI: ffff88011df534f0 RDI: 
> ffff880101f01c00
> [23114.616932] RBP: ffff88023f443d80 R08: 00000000000f7000 R09: 
> 000000000000ffff
> [23114.616932] R10: ffff88023f443d48 R11: 0000000000001000 R12: 
> ffff88011df534f0
> [23114.616932] R13: ffff880135963868 R14: 0000000000001000 R15: 
> 0000000000001000
> [23114.616932] FS:  0000000000000000(0000) GS:ffff88023f440000(0000) 
> knlGS:0000000000000000
> [23114.616932] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [23114.616932] CR2: 00007f0fb9f8e520 CR3: 0000000001a0b000 CR4: 
> 00000000000006e0
> [23114.616932] Stack:
> [23114.616932]  ffff880101f01c00 ffff88011df534f0 ffff880135963868 
> 0000000000001000
> [23114.616932]  ffff88023f443da0 ffffffffa03470af ffff880149b37200 
> ffff880135963868
> [23114.616932]  ffff88023f443db8 ffffffff8125293c ffff880149b37200 
> ffff88023f443de0
> [23114.616932] Call Trace:
> [23114.616932]  <IRQ> [23114.616932]  [<ffffffffa03470af>] 
> end_workqueue_bio+0xd5/0xda [btrfs]
> [23114.616932]  [<ffffffff8125293c>] bio_endio+0x54/0x57
> [23114.616932]  [<ffffffffa0377929>] btrfs_end_bio+0xf7/0x106 [btrfs]
> [23114.616932]  [<ffffffff8125293c>] bio_endio+0x54/0x57
> [23114.616932]  [<ffffffff8125955f>] blk_update_request+0x21a/0x30f
> [23114.616932]  [<ffffffffa0022316>] scsi_end_request+0x31/0x182 [scsi_mod]
> [23114.616932]  [<ffffffffa00235fc>] scsi_io_completion+0x1ce/0x4c8 [scsi_mod]
> [23114.616932]  [<ffffffffa001ba9d>] scsi_finish_command+0x104/0x10d 
> [scsi_mod]
> [23114.616932]  [<ffffffffa002311f>] scsi_softirq_done+0x101/0x10a [scsi_mod]
> [23114.616932]  [<ffffffff8125fbd9>] blk_done_softirq+0x82/0x8d
> [23114.616932]  [<ffffffff814c8a4b>] __do_softirq+0x1ab/0x412
> [23114.616932]  [<ffffffff8105b01d>] irq_exit+0x49/0x99
> [23114.616932]  [<ffffffff81035135>] 
> smp_call_function_single_interrupt+0x24/0x26
> [23114.616932]  [<ffffffff814c7ec9>] call_function_single_interrupt+0x89/0x90
> [23114.616932]  <EOI> [23114.616932]  [<ffffffffa0023262>] ? 
> scsi_request_fn+0x13a/0x2a1 [scsi_mod]
> [23114.616932]  [<ffffffff814c5966>] ? _raw_spin_unlock_irq+0x2c/0x4a
> [23114.616932]  [<ffffffff814c596c>] ? _raw_spin_unlock_irq+0x32/0x4a
> [23114.616932]  [<ffffffff814c5966>] ? _raw_spin_unlock_irq+0x2c/0x4a
> [23114.616932]  [<ffffffffa0023262>] scsi_request_fn+0x13a/0x2a1 [scsi_mod]
> [23114.616932]  [<ffffffff8125590e>] __blk_run_queue_uncond+0x22/0x2b
> [23114.616932]  [<ffffffff81255930>] __blk_run_queue+0x19/0x1b
> [23114.616932]  [<ffffffff8125ab01>] blk_queue_bio+0x268/0x282
> [23114.616932]  [<ffffffff81258f44>] generic_make_request+0xbd/0x160
> [23114.616932]  [<ffffffff812590e7>] submit_bio+0x100/0x11d
> [23114.616932]  [<ffffffff81298603>] ? __this_cpu_preempt_check+0x13/0x15
> [23114.616932]  [<ffffffff812a1805>] ? __percpu_counter_add+0x8e/0xa7
> [23114.616932]  [<ffffffffa03bfd47>] btrfsic_submit_bio+0x1a/0x1d [btrfs]
> [23114.616932]  [<ffffffffa0377db2>] btrfs_map_bio+0x1f4/0x26d [btrfs]
> [23114.616932]  [<ffffffffa0348a33>] btree_submit_bio_hook+0x74/0xbf [btrfs]
> [23114.616932]  [<ffffffffa03489bf>] ? btrfs_wq_submit_bio+0x160/0x160 [btrfs]
> [23114.616932]  [<ffffffffa03697a9>] submit_one_bio+0x6b/0x89 [btrfs]
> [23114.616932]  [<ffffffffa036f5be>] read_extent_buffer_pages+0x170/0x1ec 
> [btrfs]
> [23114.616932]  [<ffffffffa03471fa>] ? free_root_pointers+0x64/0x64 [btrfs]
> [23114.616932]  [<ffffffffa0348adf>] readahead_tree_block+0x3f/0x4c [btrfs]
> [23114.616932]  [<ffffffffa032e115>] 
> read_block_for_search.isra.20+0x1ce/0x23d [btrfs]
> [23114.616932]  [<ffffffffa032fab8>] btrfs_search_slot+0x65f/0x774 [btrfs]
> [23114.616932]  [<ffffffffa036eff1>] ? free_extent_buffer+0x73/0x7e [btrfs]
> [23114.616932]  [<ffffffffa0331ba4>] btrfs_next_old_leaf+0xa1/0x33c [btrfs]
> [23114.616932]  [<ffffffffa0331e4f>] btrfs_next_leaf+0x10/0x12 [btrfs]
> [23114.616932]  [<ffffffffa0336aa6>] caching_thread+0x22d/0x416 [btrfs]
> [23114.616932]  [<ffffffffa037bce9>] btrfs_scrubparity_helper+0x187/0x3b6 
> [btrfs]
> [23114.616932]  [<ffffffffa037c036>] btrfs_cache_helper+0xe/0x10 [btrfs]
> [23114.616932]  [<ffffffff8106cf96>] process_one_work+0x273/0x4e4
> [23114.616932]  [<ffffffff8106d6db>] worker_thread+0x1eb/0x2ca
> [23114.616932]  [<ffffffff8106d4f0>] ? rescuer_thread+0x2b6/0x2b6
> [23114.616932]  [<ffffffff81072a81>] kthread+0xd5/0xdd
> [23114.616932]  [<ffffffff810729ac>] ? __kthread_unpark+0x5a/0x5a
> [23114.616932]  [<ffffffff814c6257>] ret_from_fork+0x27/0x40
> [23114.616932] Code: 1f 44 00 00 55 48 89 e5 41 56 41 55 41 54 53 49 89 f4 48 
> 8b 46 70 a8 04 74 09 48 8b 5f 08 48 85 db 75 03 48 8b 1f 49 89 5c 24 68 <83> 
> 7b
> 64 ff 74 04 f0 ff 43 58 49 83 7c 24 08 00 74 2c 4c 8d 6b
> [23114.616932] RIP  [<ffffffffa037c1bf>] btrfs_queue_work+0x2c/0x190 [btrfs]
> [23114.616932]  RSP <ffff88023f443d60>
> [23114.689493] ---[ end trace 6e48b6bc707ca34b ]---
> [23114.690166] Kernel panic - not syncing: Fatal exception in interrupt
> [23114.691283] Kernel Offset: disabled
> [23114.691918] ---[ end Kernel panic - not syncing: Fatal exception in 
> interrupt
> 
> The following diagram shows the sequence of operations that lead to the
> use-after-free problem from the above trace:
> 
>         CPU 1                               CPU 2                             
>         CPU 3
> 
>                                        caching_thread()
>  close_ctree()
>    btrfs_stop_all_workers()
>      btrfs_destroy_workqueue(
>       fs_info->endio_meta_workers)
> 
>                                          btrfs_search_slot()
>                                           read_block_for_search()
>                                            readahead_tree_block()
>                                             read_extent_buffer_pages()
>                                              submit_one_bio()
>                                               btree_submit_bio_hook()
>                                                btrfs_bio_wq_end_io()
>                                                 --> sets the bio's
>                                                     bi_end_io callback
>                                                     to end_workqueue_bio()
>                                                --> bio is submitted
>                                                                               
>     bio completes
>                                                                               
>     and its bi_end_io callback
>                                                                               
>     is invoked
>                                                                               
>      --> end_workqueue_bio()
>                                                                               
>          --> attempts to queue
>                                                                               
>              a task on fs_info->endio_meta_workers
> 
>      btrfs_destroy_workqueue(
>       fs_info->caching_workers)
> 
> So fix this by destroying the queues used for metadata I/O tasks only
> after destroying all the other queues.

Looks good.

Reviewed-by: Liu Bo <bo.li....@oracle.com>

Thanks,

-liubo
> 
> Signed-off-by: Filipe Manana <fdman...@suse.com>
> ---
>  fs/btrfs/disk-io.c | 9 +++++++--
>  1 file changed, 7 insertions(+), 2 deletions(-)
> 
> diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
> index bf54d7d..8c7e19f 100644
> --- a/fs/btrfs/disk-io.c
> +++ b/fs/btrfs/disk-io.c
> @@ -2207,11 +2207,9 @@ static void btrfs_stop_all_workers(struct 
> btrfs_fs_info *fs_info)
>       btrfs_destroy_workqueue(fs_info->delalloc_workers);
>       btrfs_destroy_workqueue(fs_info->workers);
>       btrfs_destroy_workqueue(fs_info->endio_workers);
> -     btrfs_destroy_workqueue(fs_info->endio_meta_workers);
>       btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
>       btrfs_destroy_workqueue(fs_info->endio_repair_workers);
>       btrfs_destroy_workqueue(fs_info->rmw_workers);
> -     btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
>       btrfs_destroy_workqueue(fs_info->endio_write_workers);
>       btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
>       btrfs_destroy_workqueue(fs_info->submit_workers);
> @@ -2221,6 +2219,13 @@ static void btrfs_stop_all_workers(struct 
> btrfs_fs_info *fs_info)
>       btrfs_destroy_workqueue(fs_info->flush_workers);
>       btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
>       btrfs_destroy_workqueue(fs_info->extent_workers);
> +     /*
> +      * Now that all other work queues are destroyed, we can safely destroy
> +      * the queues used for metadata I/O, since tasks from those other work
> +      * queues can do metadata I/O operations.
> +      */
> +     btrfs_destroy_workqueue(fs_info->endio_meta_workers);
> +     btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
>  }
>  
>  static void free_root_extent_buffers(struct btrfs_root *root)
> -- 
> 2.7.0.rc3
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
> the body of a message to majord...@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to