blk.h (9758ff2fa240173e9a45613b07774b7a78b7653e) blk.h (5ef1630586317e92c9ebd7b4ce48f393b7ff790f)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
5#include <linux/idr.h>
6#include <linux/blk-mq.h>
7#include <linux/part_stat.h>
8#include <linux/blk-crypto.h>
9#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
10#include <xen/xen.h>
11#include "blk-crypto-internal.h"
5#include <linux/blk-crypto.h>
6#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
7#include <xen/xen.h>
8#include "blk-crypto-internal.h"
12#include "blk-mq.h"
13#include "blk-mq-sched.h"
14
15struct elevator_type;
16
17/* Max future timer expiry for timeouts */
18#define BLK_MAX_TIMEOUT (5 * HZ)
19
20extern struct dentry *blk_debugfs_root;
21

--- 5 unchanged lines hidden (view full) ---

27 struct list_head flush_queue[2];
28 struct list_head flush_data_in_flight;
29 struct request *flush_rq;
30
31 spinlock_t mq_flush_lock;
32};
33
34extern struct kmem_cache *blk_requestq_cachep;
9
10struct elevator_type;
11
12/* Max future timer expiry for timeouts */
13#define BLK_MAX_TIMEOUT (5 * HZ)
14
15extern struct dentry *blk_debugfs_root;
16

--- 5 unchanged lines hidden (view full) ---

22 struct list_head flush_queue[2];
23 struct list_head flush_data_in_flight;
24 struct request *flush_rq;
25
26 spinlock_t mq_flush_lock;
27};
28
29extern struct kmem_cache *blk_requestq_cachep;
30extern struct kmem_cache *blk_requestq_srcu_cachep;
35extern struct kobj_type blk_queue_ktype;
36extern struct ida blk_queue_ida;
37
31extern struct kobj_type blk_queue_ktype;
32extern struct ida blk_queue_ida;
33
38static inline struct blk_flush_queue *
39blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
40{
41 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
42}
43
44static inline void __blk_get_queue(struct request_queue *q)
45{
46 kobject_get(&q->kobj);
47}
48
49bool is_flush_rq(struct request *req);
50
51struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,

--- 193 unchanged lines hidden (view full) ---

245}
246static inline void blk_integrity_del(struct gendisk *disk)
247{
248}
249#endif /* CONFIG_BLK_DEV_INTEGRITY */
250
251unsigned long blk_rq_timeout(unsigned long timeout);
252void blk_add_timer(struct request *req);
34static inline void __blk_get_queue(struct request_queue *q)
35{
36 kobject_get(&q->kobj);
37}
38
39bool is_flush_rq(struct request *req);
40
41struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,

--- 193 unchanged lines hidden (view full) ---

235}
236static inline void blk_integrity_del(struct gendisk *disk)
237{
238}
239#endif /* CONFIG_BLK_DEV_INTEGRITY */
240
241unsigned long blk_rq_timeout(unsigned long timeout);
242void blk_add_timer(struct request *req);
253void blk_print_req_error(struct request *req, blk_status_t status);
243const char *blk_status_to_str(blk_status_t status);
254
255bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
244
245bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
256 unsigned int nr_segs, bool *same_queue_rq);
246 unsigned int nr_segs);
257bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
258 struct bio *bio, unsigned int nr_segs);
259
247bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
248 struct bio *bio, unsigned int nr_segs);
249
260void __blk_account_io_start(struct request *req);
261void __blk_account_io_done(struct request *req, u64 now);
262
263/*
264 * Plug flush limits
265 */
266#define BLK_MAX_REQUEST_COUNT 32
267#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
268
269/*
270 * Internal elevator interface
271 */
272#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
273
274void blk_insert_flush(struct request *rq);
275
276int elevator_switch_mq(struct request_queue *q,
277 struct elevator_type *new_e);
250/*
251 * Plug flush limits
252 */
253#define BLK_MAX_REQUEST_COUNT 32
254#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
255
256/*
257 * Internal elevator interface
258 */
259#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
260
261void blk_insert_flush(struct request *rq);
262
263int elevator_switch_mq(struct request_queue *q,
264 struct elevator_type *new_e);
278void __elevator_exit(struct request_queue *, struct elevator_queue *);
265void elevator_exit(struct request_queue *q);
279int elv_register_queue(struct request_queue *q, bool uevent);
280void elv_unregister_queue(struct request_queue *q);
281
266int elv_register_queue(struct request_queue *q, bool uevent);
267void elv_unregister_queue(struct request_queue *q);
268
282static inline void elevator_exit(struct request_queue *q,
283 struct elevator_queue *e)
284{
285 lockdep_assert_held(&q->sysfs_lock);
286
287 blk_mq_sched_free_rqs(q);
288 __elevator_exit(q, e);
289}
290
291ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
292 char *buf);
293ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
294 char *buf);
295ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
296 char *buf);
297ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
298 char *buf);

--- 43 unchanged lines hidden (view full) ---

342/*
343 * Contribute to IO statistics IFF:
344 *
345 * a) it's attached to a gendisk, and
346 * b) the queue had IO stats enabled when this request was started
347 */
348static inline bool blk_do_io_stat(struct request *rq)
349{
269ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
270 char *buf);
271ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
272 char *buf);
273ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
274 char *buf);
275ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
276 char *buf);

--- 43 unchanged lines hidden (view full) ---

320/*
321 * Contribute to IO statistics IFF:
322 *
323 * a) it's attached to a gendisk, and
324 * b) the queue had IO stats enabled when this request was started
325 */
326static inline bool blk_do_io_stat(struct request *rq)
327{
350 return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk;
328 return (rq->rq_flags & RQF_IO_STAT) && rq->q->disk;
351}
352
329}
330
353static inline void blk_account_io_done(struct request *req, u64 now)
354{
355 /*
356 * Account IO completion. flush_rq isn't accounted as a
357 * normal IO on queueing nor completion. Accounting the
358 * containing request is enough.
359 */
360 if (blk_do_io_stat(req) && req->part &&
361 !(req->rq_flags & RQF_FLUSH_SEQ))
362 __blk_account_io_done(req, now);
363}
331void update_io_ticks(struct block_device *part, unsigned long now, bool end);
364
332
365static inline void blk_account_io_start(struct request *req)
366{
367 if (blk_do_io_stat(req))
368 __blk_account_io_start(req);
369}
370
371static inline void req_set_nomerge(struct request_queue *q, struct request *req)
372{
373 req->cmd_flags |= REQ_NOMERGE;
374 if (req == q->last_merge)
375 q->last_merge = NULL;
376}
377
378/*

--- 18 unchanged lines hidden (view full) ---

397{
398 return round_down(UINT_MAX, q->limits.discard_granularity) >>
399 SECTOR_SHIFT;
400}
401
402/*
403 * Internal io_context interface
404 */
333static inline void req_set_nomerge(struct request_queue *q, struct request *req)
334{
335 req->cmd_flags |= REQ_NOMERGE;
336 if (req == q->last_merge)
337 q->last_merge = NULL;
338}
339
340/*

--- 18 unchanged lines hidden (view full) ---

359{
360 return round_down(UINT_MAX, q->limits.discard_granularity) >>
361 SECTOR_SHIFT;
362}
363
364/*
365 * Internal io_context interface
366 */
405void get_io_context(struct io_context *ioc);
406struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
407struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
408 gfp_t gfp_mask);
367struct io_cq *ioc_find_get_icq(struct request_queue *q);
368struct io_cq *ioc_lookup_icq(struct request_queue *q);
369#ifdef CONFIG_BLK_ICQ
409void ioc_clear_queue(struct request_queue *q);
370void ioc_clear_queue(struct request_queue *q);
371#else
372static inline void ioc_clear_queue(struct request_queue *q)
373{
374}
375#endif /* CONFIG_BLK_ICQ */
410
376
411int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
412
413#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
414extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
415extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
416 const char *page, size_t count);
417extern void blk_throtl_bio_endio(struct bio *bio);
418extern void blk_throtl_stat_add(struct request *rq, u64 time);
419#else
420static inline void blk_throtl_bio_endio(struct bio *bio) { }

--- 41 unchanged lines hidden (view full) ---

462int bdev_del_partition(struct gendisk *disk, int partno);
463int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
464 sector_t length);
465
466int bio_add_hw_page(struct request_queue *q, struct bio *bio,
467 struct page *page, unsigned int len, unsigned int offset,
468 unsigned int max_sectors, bool *same_page);
469
377#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
378extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
379extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
380 const char *page, size_t count);
381extern void blk_throtl_bio_endio(struct bio *bio);
382extern void blk_throtl_stat_add(struct request *rq, u64 time);
383#else
384static inline void blk_throtl_bio_endio(struct bio *bio) { }

--- 41 unchanged lines hidden (view full) ---

426int bdev_del_partition(struct gendisk *disk, int partno);
427int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
428 sector_t length);
429
430int bio_add_hw_page(struct request_queue *q, struct bio *bio,
431 struct page *page, unsigned int len, unsigned int offset,
432 unsigned int max_sectors, bool *same_page);
433
470struct request_queue *blk_alloc_queue(int node_id);
434static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
435{
436 if (srcu)
437 return blk_requestq_srcu_cachep;
438 return blk_requestq_cachep;
439}
440struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
471
441
442int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
443
472int disk_alloc_events(struct gendisk *disk);
473void disk_add_events(struct gendisk *disk);
474void disk_del_events(struct gendisk *disk);
475void disk_release_events(struct gendisk *disk);
476extern struct device_attribute dev_attr_events;
477extern struct device_attribute dev_attr_events_async;
478extern struct device_attribute dev_attr_events_poll_msecs;
479

--- 8 unchanged lines hidden (view full) ---

488long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
489
490extern const struct address_space_operations def_blk_aops;
491
492int disk_register_independent_access_ranges(struct gendisk *disk,
493 struct blk_independent_access_ranges *new_iars);
494void disk_unregister_independent_access_ranges(struct gendisk *disk);
495
444int disk_alloc_events(struct gendisk *disk);
445void disk_add_events(struct gendisk *disk);
446void disk_del_events(struct gendisk *disk);
447void disk_release_events(struct gendisk *disk);
448extern struct device_attribute dev_attr_events;
449extern struct device_attribute dev_attr_events_async;
450extern struct device_attribute dev_attr_events_poll_msecs;
451

--- 8 unchanged lines hidden (view full) ---

460long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
461
462extern const struct address_space_operations def_blk_aops;
463
464int disk_register_independent_access_ranges(struct gendisk *disk,
465 struct blk_independent_access_ranges *new_iars);
466void disk_unregister_independent_access_ranges(struct gendisk *disk);
467
468#ifdef CONFIG_FAIL_MAKE_REQUEST
469bool should_fail_request(struct block_device *part, unsigned int bytes);
470#else /* CONFIG_FAIL_MAKE_REQUEST */
471static inline bool should_fail_request(struct block_device *part,
472 unsigned int bytes)
473{
474 return false;
475}
476#endif /* CONFIG_FAIL_MAKE_REQUEST */
477
478/*
479 * Optimized request reference counting. Ideally we'd make timeouts be more
480 * clever, as that's the only reason we need references at all... But until
481 * this happens, this is faster than using refcount_t. Also see:
482 *
483 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
484 */
485#define req_ref_zero_or_close_to_overflow(req) \
486 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
487
488static inline bool req_ref_inc_not_zero(struct request *req)
489{
490 return atomic_inc_not_zero(&req->ref);
491}
492
493static inline bool req_ref_put_and_test(struct request *req)
494{
495 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
496 return atomic_dec_and_test(&req->ref);
497}
498
499static inline void req_ref_set(struct request *req, int value)
500{
501 atomic_set(&req->ref, value);
502}
503
504static inline int req_ref_read(struct request *req)
505{
506 return atomic_read(&req->ref);
507}
508
496#endif /* BLK_INTERNAL_H */
509#endif /* BLK_INTERNAL_H */