xref: /openbmc/linux/block/blk.h (revision 1b69c6d0ae90b7f1a4f61d5c8209d5cb7a55f849)
1  #ifndef BLK_INTERNAL_H
2  #define BLK_INTERNAL_H
3  
4  #include <linux/idr.h>
5  #include <linux/blk-mq.h>
6  #include "blk-mq.h"
7  
8  /* Amount of time in which a process may batch requests */
9  #define BLK_BATCH_TIME	(HZ/50UL)
10  
11  /* Number of requests a "batching" process may submit */
12  #define BLK_BATCH_REQ	32
13  
14  /* Max future timer expiry for timeouts */
15  #define BLK_MAX_TIMEOUT		(5 * HZ)
16  
17  struct blk_flush_queue {
18  	unsigned int		flush_queue_delayed:1;
19  	unsigned int		flush_pending_idx:1;
20  	unsigned int		flush_running_idx:1;
21  	unsigned long		flush_pending_since;
22  	struct list_head	flush_queue[2];
23  	struct list_head	flush_data_in_flight;
24  	struct request		*flush_rq;
25  
26  	/*
27  	 * flush_rq shares tag with this rq, both can't be active
28  	 * at the same time
29  	 */
30  	struct request		*orig_rq;
31  	spinlock_t		mq_flush_lock;
32  };
33  
34  extern struct kmem_cache *blk_requestq_cachep;
35  extern struct kmem_cache *request_cachep;
36  extern struct kobj_type blk_queue_ktype;
37  extern struct ida blk_queue_ida;
38  
39  static inline struct blk_flush_queue *blk_get_flush_queue(
40  		struct request_queue *q, struct blk_mq_ctx *ctx)
41  {
42  	struct blk_mq_hw_ctx *hctx;
43  
44  	if (!q->mq_ops)
45  		return q->fq;
46  
47  	hctx = q->mq_ops->map_queue(q, ctx->cpu);
48  
49  	return hctx->fq;
50  }
51  
52  static inline void __blk_get_queue(struct request_queue *q)
53  {
54  	kobject_get(&q->kobj);
55  }
56  
57  struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
58  		int node, int cmd_size);
59  void blk_free_flush_queue(struct blk_flush_queue *q);
60  
61  int blk_init_rl(struct request_list *rl, struct request_queue *q,
62  		gfp_t gfp_mask);
63  void blk_exit_rl(struct request_list *rl);
64  void init_request_from_bio(struct request *req, struct bio *bio);
65  void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
66  			struct bio *bio);
67  int blk_rq_append_bio(struct request_queue *q, struct request *rq,
68  		      struct bio *bio);
69  void blk_queue_bypass_start(struct request_queue *q);
70  void blk_queue_bypass_end(struct request_queue *q);
71  void blk_dequeue_request(struct request *rq);
72  void __blk_queue_free_tags(struct request_queue *q);
73  bool __blk_end_bidi_request(struct request *rq, int error,
74  			    unsigned int nr_bytes, unsigned int bidi_bytes);
75  
76  void blk_rq_timed_out_timer(unsigned long data);
77  unsigned long blk_rq_timeout(unsigned long timeout);
78  void blk_add_timer(struct request *req);
79  void blk_delete_timer(struct request *);
80  
81  
82  bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
83  			     struct bio *bio);
84  bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
85  			    struct bio *bio);
86  bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
87  			    unsigned int *request_count,
88  			    struct request **same_queue_rq);
89  
90  void blk_account_io_start(struct request *req, bool new_io);
91  void blk_account_io_completion(struct request *req, unsigned int bytes);
92  void blk_account_io_done(struct request *req);
93  
94  /*
95   * Internal atomic flags for request handling
96   */
97  enum rq_atomic_flags {
98  	REQ_ATOM_COMPLETE = 0,
99  	REQ_ATOM_STARTED,
100  };
101  
102  /*
103   * EH timer and IO completion will both attempt to 'grab' the request, make
104   * sure that only one of them succeeds
105   */
106  static inline int blk_mark_rq_complete(struct request *rq)
107  {
108  	return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
109  }
110  
111  static inline void blk_clear_rq_complete(struct request *rq)
112  {
113  	clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
114  }
115  
116  /*
117   * Internal elevator interface
118   */
119  #define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
120  
121  void blk_insert_flush(struct request *rq);
122  
123  static inline struct request *__elv_next_request(struct request_queue *q)
124  {
125  	struct request *rq;
126  	struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
127  
128  	while (1) {
129  		if (!list_empty(&q->queue_head)) {
130  			rq = list_entry_rq(q->queue_head.next);
131  			return rq;
132  		}
133  
134  		/*
135  		 * Flush request is running and flush request isn't queueable
136  		 * in the drive, we can hold the queue till flush request is
137  		 * finished. Even we don't do this, driver can't dispatch next
138  		 * requests and will requeue them. And this can improve
139  		 * throughput too. For example, we have request flush1, write1,
140  		 * flush 2. flush1 is dispatched, then queue is hold, write1
141  		 * isn't inserted to queue. After flush1 is finished, flush2
142  		 * will be dispatched. Since disk cache is already clean,
143  		 * flush2 will be finished very soon, so looks like flush2 is
144  		 * folded to flush1.
145  		 * Since the queue is hold, a flag is set to indicate the queue
146  		 * should be restarted later. Please see flush_end_io() for
147  		 * details.
148  		 */
149  		if (fq->flush_pending_idx != fq->flush_running_idx &&
150  				!queue_flush_queueable(q)) {
151  			fq->flush_queue_delayed = 1;
152  			return NULL;
153  		}
154  		if (unlikely(blk_queue_bypass(q)) ||
155  		    !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
156  			return NULL;
157  	}
158  }
159  
160  static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
161  {
162  	struct elevator_queue *e = q->elevator;
163  
164  	if (e->type->ops.elevator_activate_req_fn)
165  		e->type->ops.elevator_activate_req_fn(q, rq);
166  }
167  
168  static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
169  {
170  	struct elevator_queue *e = q->elevator;
171  
172  	if (e->type->ops.elevator_deactivate_req_fn)
173  		e->type->ops.elevator_deactivate_req_fn(q, rq);
174  }
175  
176  #ifdef CONFIG_FAIL_IO_TIMEOUT
177  int blk_should_fake_timeout(struct request_queue *);
178  ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
179  ssize_t part_timeout_store(struct device *, struct device_attribute *,
180  				const char *, size_t);
181  #else
182  static inline int blk_should_fake_timeout(struct request_queue *q)
183  {
184  	return 0;
185  }
186  #endif
187  
188  int ll_back_merge_fn(struct request_queue *q, struct request *req,
189  		     struct bio *bio);
190  int ll_front_merge_fn(struct request_queue *q, struct request *req,
191  		      struct bio *bio);
192  int attempt_back_merge(struct request_queue *q, struct request *rq);
193  int attempt_front_merge(struct request_queue *q, struct request *rq);
194  int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
195  				struct request *next);
196  void blk_recalc_rq_segments(struct request *rq);
197  void blk_rq_set_mixed_merge(struct request *rq);
198  bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
199  int blk_try_merge(struct request *rq, struct bio *bio);
200  
201  void blk_queue_congestion_threshold(struct request_queue *q);
202  
203  int blk_dev_init(void);
204  
205  
206  /*
207   * Return the threshold (number of used requests) at which the queue is
208   * considered to be congested.  It include a little hysteresis to keep the
209   * context switch rate down.
210   */
211  static inline int queue_congestion_on_threshold(struct request_queue *q)
212  {
213  	return q->nr_congestion_on;
214  }
215  
216  /*
217   * The threshold at which a queue is considered to be uncongested
218   */
219  static inline int queue_congestion_off_threshold(struct request_queue *q)
220  {
221  	return q->nr_congestion_off;
222  }
223  
224  extern int blk_update_nr_requests(struct request_queue *, unsigned int);
225  
226  /*
227   * Contribute to IO statistics IFF:
228   *
229   *	a) it's attached to a gendisk, and
230   *	b) the queue had IO stats enabled when this request was started, and
231   *	c) it's a file system request
232   */
233  static inline int blk_do_io_stat(struct request *rq)
234  {
235  	return rq->rq_disk &&
236  	       (rq->cmd_flags & REQ_IO_STAT) &&
237  		(rq->cmd_type == REQ_TYPE_FS);
238  }
239  
240  /*
241   * Internal io_context interface
242   */
243  void get_io_context(struct io_context *ioc);
244  struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
245  struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
246  			     gfp_t gfp_mask);
247  void ioc_clear_queue(struct request_queue *q);
248  
249  int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
250  
251  /**
252   * create_io_context - try to create task->io_context
253   * @gfp_mask: allocation mask
254   * @node: allocation node
255   *
256   * If %current->io_context is %NULL, allocate a new io_context and install
257   * it.  Returns the current %current->io_context which may be %NULL if
258   * allocation failed.
259   *
260   * Note that this function can't be called with IRQ disabled because
261   * task_lock which protects %current->io_context is IRQ-unsafe.
262   */
263  static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
264  {
265  	WARN_ON_ONCE(irqs_disabled());
266  	if (unlikely(!current->io_context))
267  		create_task_io_context(current, gfp_mask, node);
268  	return current->io_context;
269  }
270  
271  /*
272   * Internal throttling interface
273   */
274  #ifdef CONFIG_BLK_DEV_THROTTLING
275  extern void blk_throtl_drain(struct request_queue *q);
276  extern int blk_throtl_init(struct request_queue *q);
277  extern void blk_throtl_exit(struct request_queue *q);
278  #else /* CONFIG_BLK_DEV_THROTTLING */
279  static inline void blk_throtl_drain(struct request_queue *q) { }
280  static inline int blk_throtl_init(struct request_queue *q) { return 0; }
281  static inline void blk_throtl_exit(struct request_queue *q) { }
282  #endif /* CONFIG_BLK_DEV_THROTTLING */
283  
284  #endif /* BLK_INTERNAL_H */
285