Blame view

kernel/linux-rt-4.4.41/block/blk-mq.h 3.3 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
  #ifndef INT_BLK_MQ_H
  #define INT_BLK_MQ_H
  
  struct blk_mq_tag_set;
  
  struct blk_mq_ctx {
  	struct {
  		spinlock_t		lock;
  		struct list_head	rq_list;
  	}  ____cacheline_aligned_in_smp;
  
  	unsigned int		cpu;
  	unsigned int		index_hw;
  
  	unsigned int		last_tag ____cacheline_aligned_in_smp;
  
  	/* incremented at dispatch time */
  	unsigned long		rq_dispatched[2];
  	unsigned long		rq_merged;
  
  	/* incremented at completion time */
  	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
  
  	struct request_queue	*queue;
  	struct kobject		kobj;
  } ____cacheline_aligned_in_smp;
  
  void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
  void blk_mq_freeze_queue(struct request_queue *q);
  void blk_mq_free_queue(struct request_queue *q);
  int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
  void blk_mq_wake_waiters(struct request_queue *q);
  
  /*
   * CPU hotplug helpers
   */
  struct blk_mq_cpu_notifier;
  void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
  			      int (*fn)(void *, unsigned long, unsigned int),
  			      void *data);
  void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
  void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
  void blk_mq_cpu_init(void);
  void blk_mq_enable_hotplug(void);
  void blk_mq_disable_hotplug(void);
  
  /*
   * CPU -> queue mappings
   */
  extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
  extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
  				   const struct cpumask *online_mask);
  extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
  
  /*
   * sysfs helpers
   */
  extern int blk_mq_sysfs_register(struct request_queue *q);
  extern void blk_mq_sysfs_unregister(struct request_queue *q);
  
  extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
  
  void blk_mq_release(struct request_queue *q);
  
  /*
   * Basic implementation of sparser bitmap, allowing the user to spread
   * the bits over more cachelines.
   */
  struct blk_align_bitmap {
  	unsigned long word;
  	unsigned long depth;
  } ____cacheline_aligned_in_smp;
  
  static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
  					   unsigned int cpu)
  {
  	struct blk_mq_ctx *ctx;
  
  	ctx = per_cpu_ptr(q->queue_ctx, cpu);
  	return ctx;
  }
  
  /*
   * This assumes per-cpu software queueing queues. They could be per-node
   * as well, for instance. For now this is hardcoded as-is. Note that we don't
   * care about preemption, since we know the ctx's are persistent. This does
   * mean that we can't rely on ctx always matching the currently running CPU.
   */
  static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
  {
  	return __blk_mq_get_ctx(q, get_cpu_light());
  }
  
  static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
  {
  	put_cpu_light();
  }
  
  struct blk_mq_alloc_data {
  	/* input parameter */
  	struct request_queue *q;
  	gfp_t gfp;
  	bool reserved;
  
  	/* input & output parameter */
  	struct blk_mq_ctx *ctx;
  	struct blk_mq_hw_ctx *hctx;
  };
  
  static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
  		struct request_queue *q, gfp_t gfp, bool reserved,
  		struct blk_mq_ctx *ctx,
  		struct blk_mq_hw_ctx *hctx)
  {
  	data->q = q;
  	data->gfp = gfp;
  	data->reserved = reserved;
  	data->ctx = ctx;
  	data->hctx = hctx;
  }
  
  static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
  {
  	return hctx->nr_ctx && hctx->tags;
  }
  
  #endif