Blame view

kernel/linux-rt-4.4.41/block/blk-mq-cpumap.c 2.54 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
  /*
   * CPU <-> hardware queue mapping helpers
   *
   * Copyright (C) 2013-2014 Jens Axboe
   */
  #include <linux/kernel.h>
  #include <linux/threads.h>
  #include <linux/module.h>
  #include <linux/mm.h>
  #include <linux/smp.h>
  #include <linux/cpu.h>
  
  #include <linux/blk-mq.h>
  #include "blk.h"
  #include "blk-mq.h"
  
  static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
  			      const int cpu)
  {
  	return cpu * nr_queues / nr_cpus;
  }
  
  static int get_first_sibling(unsigned int cpu)
  {
  	unsigned int ret;
  
  	ret = cpumask_first(topology_sibling_cpumask(cpu));
  	if (ret < nr_cpu_ids)
  		return ret;
  
  	return cpu;
  }
  
  int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
  			    const struct cpumask *online_mask)
  {
  	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
  	cpumask_var_t cpus;
  
  	if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
  		return 1;
  
  	cpumask_clear(cpus);
  	nr_cpus = nr_uniq_cpus = 0;
  	for_each_cpu(i, online_mask) {
  		nr_cpus++;
  		first_sibling = get_first_sibling(i);
  		if (!cpumask_test_cpu(first_sibling, cpus))
  			nr_uniq_cpus++;
  		cpumask_set_cpu(i, cpus);
  	}
  
  	queue = 0;
  	for_each_possible_cpu(i) {
  		if (!cpumask_test_cpu(i, online_mask)) {
  			map[i] = 0;
  			continue;
  		}
  
  		/*
  		 * Easy case - we have equal or more hardware queues. Or
  		 * there are no thread siblings to take into account. Do
  		 * 1:1 if enough, or sequential mapping if less.
  		 */
  		if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
  			map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
  			queue++;
  			continue;
  		}
  
  		/*
  		 * Less then nr_cpus queues, and we have some number of
  		 * threads per cores. Map sibling threads to the same
  		 * queue.
  		 */
  		first_sibling = get_first_sibling(i);
  		if (first_sibling == i) {
  			map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
  							queue);
  			queue++;
  		} else
  			map[i] = map[first_sibling];
  	}
  
  	free_cpumask_var(cpus);
  	return 0;
  }
  
  unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
  {
  	unsigned int *map;
  
  	/* If cpus are offline, map them to first hctx */
  	map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
  				set->numa_node);
  	if (!map)
  		return NULL;
  
  	if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
  		return map;
  
  	kfree(map);
  	return NULL;
  }
  
  /*
   * We have no quick way of doing reverse lookups. This is only used at
   * queue init time, so runtime isn't important.
   */
  int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
  {
  	int i;
  
  	for_each_possible_cpu(i) {
  		if (index == mq_map[i])
  			return cpu_to_node(i);
  	}
  
  	return NUMA_NO_NODE;
  }