Loading...
1/*
2 * CPU <-> hardware queue mapping helpers
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 */
6#include <linux/kernel.h>
7#include <linux/threads.h>
8#include <linux/module.h>
9#include <linux/mm.h>
10#include <linux/smp.h>
11#include <linux/cpu.h>
12
13#include <linux/blk-mq.h>
14#include "blk.h"
15#include "blk-mq.h"
16
17static int cpu_to_queue_index(unsigned int nr_queues, const int cpu)
18{
19 return cpu % nr_queues;
20}
21
22static int get_first_sibling(unsigned int cpu)
23{
24 unsigned int ret;
25
26 ret = cpumask_first(topology_sibling_cpumask(cpu));
27 if (ret < nr_cpu_ids)
28 return ret;
29
30 return cpu;
31}
32
33int blk_mq_map_queues(struct blk_mq_tag_set *set)
34{
35 unsigned int *map = set->mq_map;
36 unsigned int nr_queues = set->nr_hw_queues;
37 unsigned int cpu, first_sibling;
38
39 for_each_possible_cpu(cpu) {
40 /*
41 * First do sequential mapping between CPUs and queues.
42 * In case we still have CPUs to map, and we have some number of
43 * threads per cores then map sibling threads to the same queue for
44 * performace optimizations.
45 */
46 if (cpu < nr_queues) {
47 map[cpu] = cpu_to_queue_index(nr_queues, cpu);
48 } else {
49 first_sibling = get_first_sibling(cpu);
50 if (first_sibling == cpu)
51 map[cpu] = cpu_to_queue_index(nr_queues, cpu);
52 else
53 map[cpu] = map[first_sibling];
54 }
55 }
56
57 return 0;
58}
59EXPORT_SYMBOL_GPL(blk_mq_map_queues);
60
61/*
62 * We have no quick way of doing reverse lookups. This is only used at
63 * queue init time, so runtime isn't important.
64 */
65int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
66{
67 int i;
68
69 for_each_possible_cpu(i) {
70 if (index == mq_map[i])
71 return local_memory_node(cpu_to_node(i));
72 }
73
74 return NUMA_NO_NODE;
75}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * CPU <-> hardware queue mapping helpers
4 *
5 * Copyright (C) 2013-2014 Jens Axboe
6 */
7#include <linux/kernel.h>
8#include <linux/threads.h>
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/smp.h>
12#include <linux/cpu.h>
13#include <linux/group_cpus.h>
14
15#include "blk.h"
16#include "blk-mq.h"
17
18void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
19{
20 const struct cpumask *masks;
21 unsigned int queue, cpu;
22
23 masks = group_cpus_evenly(qmap->nr_queues);
24 if (!masks) {
25 for_each_possible_cpu(cpu)
26 qmap->mq_map[cpu] = qmap->queue_offset;
27 return;
28 }
29
30 for (queue = 0; queue < qmap->nr_queues; queue++) {
31 for_each_cpu(cpu, &masks[queue])
32 qmap->mq_map[cpu] = qmap->queue_offset + queue;
33 }
34 kfree(masks);
35}
36EXPORT_SYMBOL_GPL(blk_mq_map_queues);
37
38/**
39 * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
40 * @qmap: CPU to hardware queue map.
41 * @index: hardware queue index.
42 *
43 * We have no quick way of doing reverse lookups. This is only used at
44 * queue init time, so runtime isn't important.
45 */
46int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
47{
48 int i;
49
50 for_each_possible_cpu(i) {
51 if (index == qmap->mq_map[i])
52 return cpu_to_node(i);
53 }
54
55 return NUMA_NO_NODE;
56}