Christoph Hellwig | 3dcf60bc | 2019-04-30 14:42:43 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jens Axboe | 75bb462 | 2014-05-28 10:15:41 -0600 | [diff] [blame] | 2 | /* |
| 3 | * CPU <-> hardware queue mapping helpers |
| 4 | * |
| 5 | * Copyright (C) 2013-2014 Jens Axboe |
| 6 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 7 | #include <linux/kernel.h> |
| 8 | #include <linux/threads.h> |
| 9 | #include <linux/module.h> |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/smp.h> |
| 12 | #include <linux/cpu.h> |
Ming Lei | 6a6dcae | 2022-12-27 10:29:05 +0800 | [diff] [blame] | 13 | #include <linux/group_cpus.h> |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 14 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 15 | #include "blk.h" |
| 16 | #include "blk-mq.h" |
| 17 | |
Bart Van Assche | a4e1d0b | 2022-08-15 10:00:43 -0700 | [diff] [blame] | 18 | void blk_mq_map_queues(struct blk_mq_queue_map *qmap) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 19 | { |
Ming Lei | 6a6dcae | 2022-12-27 10:29:05 +0800 | [diff] [blame] | 20 | const struct cpumask *masks; |
| 21 | unsigned int queue, cpu; |
Ming Lei | 556f36e | 2019-07-25 17:41:46 +0800 | [diff] [blame] | 22 | |
Ming Lei | 6a6dcae | 2022-12-27 10:29:05 +0800 | [diff] [blame] | 23 | masks = group_cpus_evenly(qmap->nr_queues); |
| 24 | if (!masks) { |
| 25 | for_each_possible_cpu(cpu) |
| 26 | qmap->mq_map[cpu] = qmap->queue_offset; |
| 27 | return; |
Ming Lei | 556f36e | 2019-07-25 17:41:46 +0800 | [diff] [blame] | 28 | } |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 29 | |
Ming Lei | 6a6dcae | 2022-12-27 10:29:05 +0800 | [diff] [blame] | 30 | for (queue = 0; queue < qmap->nr_queues; queue++) { |
| 31 | for_each_cpu(cpu, &masks[queue]) |
| 32 | qmap->mq_map[cpu] = qmap->queue_offset + queue; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 33 | } |
Ming Lei | 6a6dcae | 2022-12-27 10:29:05 +0800 | [diff] [blame] | 34 | kfree(masks); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 35 | } |
Christoph Hellwig | 9e5a7e2 | 2016-11-01 08:12:47 -0600 | [diff] [blame] | 36 | EXPORT_SYMBOL_GPL(blk_mq_map_queues); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 37 | |
Bart Van Assche | cd669f8 | 2019-05-30 17:00:53 -0700 | [diff] [blame] | 38 | /** |
| 39 | * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index |
| 40 | * @qmap: CPU to hardware queue map. |
| 41 | * @index: hardware queue index. |
| 42 | * |
Jens Axboe | f14bbe7 | 2014-05-27 12:06:53 -0600 | [diff] [blame] | 43 | * We have no quick way of doing reverse lookups. This is only used at |
| 44 | * queue init time, so runtime isn't important. |
| 45 | */ |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 46 | int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index) |
Jens Axboe | f14bbe7 | 2014-05-27 12:06:53 -0600 | [diff] [blame] | 47 | { |
| 48 | int i; |
| 49 | |
| 50 | for_each_possible_cpu(i) { |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 51 | if (index == qmap->mq_map[i]) |
Xianting Tian | 576e85c | 2020-10-19 16:20:47 +0800 | [diff] [blame] | 52 | return cpu_to_node(i); |
Jens Axboe | f14bbe7 | 2014-05-27 12:06:53 -0600 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | return NUMA_NO_NODE; |
| 56 | } |