[PATCH 5/8] blk-mq: allow the driver to pass in an affinity mask
Allow drivers to pass in the affinity mask from the generic interrupt layer, and spread queues based on that. If the driver doesn't pass in a mask we will create it using the genirq helper. As this helper was modelled after the blk-mq algorithm there should be no change in behavior. XXX: Just as with the core IRQ spreading code this doesn't handle CPU hotplug yet. Signed-off-by: Christoph Hellwig--- block/Makefile | 2 +- block/blk-mq-cpumap.c | 120 - block/blk-mq.c | 60 - block/blk-mq.h | 8 include/linux/blk-mq.h | 1 + 5 files changed, 60 insertions(+), 131 deletions(-) delete mode 100644 block/blk-mq-cpumap.c diff --git a/block/Makefile b/block/Makefile index 9eda232..aeb318d 100644 --- a/block/Makefile +++ b/block/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ blk-lib.o blk-mq.o blk-mq-tag.o \ - blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \ + blk-mq-sysfs.o blk-mq-cpu.o ioctl.o \ genhd.o scsi_ioctl.o partition-generic.o ioprio.o \ badblocks.o partitions/ diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c deleted file mode 100644 index d0634bc..000 --- a/block/blk-mq-cpumap.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * CPU <-> hardware queue mapping helpers - * - * Copyright (C) 2013-2014 Jens Axboe - */ -#include -#include -#include -#include -#include -#include - -#include -#include "blk.h" -#include "blk-mq.h" - -static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues, - const int cpu) -{ - return cpu * nr_queues / nr_cpus; -} - -static int get_first_sibling(unsigned int cpu) -{ - unsigned int ret; - - ret = cpumask_first(topology_sibling_cpumask(cpu)); - if (ret < nr_cpu_ids) - return ret; - - return cpu; -} - -int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, - const struct cpumask *online_mask) -{ - unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; - cpumask_var_t cpus; - - if (!alloc_cpumask_var(, GFP_ATOMIC)) - return 1; - - cpumask_clear(cpus); - nr_cpus = nr_uniq_cpus = 0; - for_each_cpu(i, online_mask) { - nr_cpus++; - first_sibling = get_first_sibling(i); - if (!cpumask_test_cpu(first_sibling, cpus)) - nr_uniq_cpus++; - cpumask_set_cpu(i, cpus); - } - - queue = 0; - for_each_possible_cpu(i) { - if (!cpumask_test_cpu(i, online_mask)) { - map[i] = 0; - continue; - } - - /* -* Easy case - we have equal or more hardware queues. Or -* there are no thread siblings to take into account. Do -* 1:1 if enough, or sequential mapping if less. -*/ - if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) { - map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue); - queue++; - continue; - } - - /* -* Less then nr_cpus queues, and we have some number of -* threads per cores. Map sibling threads to the same -* queue. -*/ - first_sibling = get_first_sibling(i); - if (first_sibling == i) { - map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues, - queue); - queue++; - } else - map[i] = map[first_sibling]; - } - - free_cpumask_var(cpus); - return 0; -} - -unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) -{ - unsigned int *map; - - /* If cpus are offline, map them to first hctx */ - map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL, - set->numa_node); - if (!map) - return NULL; - - if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask)) - return map; - - kfree(map); - return NULL; -} - -/* - * We have no quick way of doing reverse lookups. This is only used at - * queue init time, so runtime isn't important. - */ -int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index) -{ - int i; - - for_each_possible_cpu(i) { - if (index == mq_map[i]) - return
[PATCH 5/8] blk-mq: allow the driver to pass in an affinity mask
Allow drivers to pass in the affinity mask from the generic interrupt layer, and spread queues based on that. If the driver doesn't pass in a mask we will create it using the genirq helper. As this helper was modelled after the blk-mq algorithm there should be no change in behavior. XXX: Just as with the core IRQ spreading code this doesn't handle CPU hotplug yet. Signed-off-by: Christoph Hellwig --- block/Makefile | 2 +- block/blk-mq-cpumap.c | 120 - block/blk-mq.c | 60 - block/blk-mq.h | 8 include/linux/blk-mq.h | 1 + 5 files changed, 60 insertions(+), 131 deletions(-) delete mode 100644 block/blk-mq-cpumap.c diff --git a/block/Makefile b/block/Makefile index 9eda232..aeb318d 100644 --- a/block/Makefile +++ b/block/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ blk-lib.o blk-mq.o blk-mq-tag.o \ - blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \ + blk-mq-sysfs.o blk-mq-cpu.o ioctl.o \ genhd.o scsi_ioctl.o partition-generic.o ioprio.o \ badblocks.o partitions/ diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c deleted file mode 100644 index d0634bc..000 --- a/block/blk-mq-cpumap.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * CPU <-> hardware queue mapping helpers - * - * Copyright (C) 2013-2014 Jens Axboe - */ -#include -#include -#include -#include -#include -#include - -#include -#include "blk.h" -#include "blk-mq.h" - -static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues, - const int cpu) -{ - return cpu * nr_queues / nr_cpus; -} - -static int get_first_sibling(unsigned int cpu) -{ - unsigned int ret; - - ret = cpumask_first(topology_sibling_cpumask(cpu)); - if (ret < nr_cpu_ids) - return ret; - - return cpu; -} - -int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, - const struct cpumask *online_mask) -{ - unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; - cpumask_var_t cpus; - - if (!alloc_cpumask_var(, GFP_ATOMIC)) - return 1; - - cpumask_clear(cpus); - nr_cpus = nr_uniq_cpus = 0; - for_each_cpu(i, online_mask) { - nr_cpus++; - first_sibling = get_first_sibling(i); - if (!cpumask_test_cpu(first_sibling, cpus)) - nr_uniq_cpus++; - cpumask_set_cpu(i, cpus); - } - - queue = 0; - for_each_possible_cpu(i) { - if (!cpumask_test_cpu(i, online_mask)) { - map[i] = 0; - continue; - } - - /* -* Easy case - we have equal or more hardware queues. Or -* there are no thread siblings to take into account. Do -* 1:1 if enough, or sequential mapping if less. -*/ - if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) { - map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue); - queue++; - continue; - } - - /* -* Less then nr_cpus queues, and we have some number of -* threads per cores. Map sibling threads to the same -* queue. -*/ - first_sibling = get_first_sibling(i); - if (first_sibling == i) { - map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues, - queue); - queue++; - } else - map[i] = map[first_sibling]; - } - - free_cpumask_var(cpus); - return 0; -} - -unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) -{ - unsigned int *map; - - /* If cpus are offline, map them to first hctx */ - map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL, - set->numa_node); - if (!map) - return NULL; - - if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask)) - return map; - - kfree(map); - return NULL; -} - -/* - * We have no quick way of doing reverse lookups. This is only used at - * queue init time, so runtime isn't important. - */ -int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index) -{ - int i; - - for_each_possible_cpu(i) { - if (index == mq_map[i]) - return local_memory_node(cpu_to_node(i)); -