xref: /openbmc/linux/include/linux/bio.h (revision 0ea33321)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
4  */
5 #ifndef __LINUX_BIO_H
6 #define __LINUX_BIO_H
7 
8 #include <linux/highmem.h>
9 #include <linux/mempool.h>
10 #include <linux/ioprio.h>
11 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
12 #include <linux/blk_types.h>
13 
14 #define BIO_DEBUG
15 
16 #ifdef BIO_DEBUG
17 #define BIO_BUG_ON	BUG_ON
18 #else
19 #define BIO_BUG_ON
20 #endif
21 
22 #define BIO_MAX_PAGES		256
23 
24 #define bio_prio(bio)			(bio)->bi_ioprio
25 #define bio_set_prio(bio, prio)		((bio)->bi_ioprio = prio)
26 
27 #define bio_iter_iovec(bio, iter)				\
28 	bvec_iter_bvec((bio)->bi_io_vec, (iter))
29 
30 #define bio_iter_page(bio, iter)				\
31 	bvec_iter_page((bio)->bi_io_vec, (iter))
32 #define bio_iter_len(bio, iter)					\
33 	bvec_iter_len((bio)->bi_io_vec, (iter))
34 #define bio_iter_offset(bio, iter)				\
35 	bvec_iter_offset((bio)->bi_io_vec, (iter))
36 
37 #define bio_page(bio)		bio_iter_page((bio), (bio)->bi_iter)
38 #define bio_offset(bio)		bio_iter_offset((bio), (bio)->bi_iter)
39 #define bio_iovec(bio)		bio_iter_iovec((bio), (bio)->bi_iter)
40 
41 #define bio_multiple_segments(bio)				\
42 	((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
43 
44 #define bvec_iter_sectors(iter)	((iter).bi_size >> 9)
45 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
46 
47 #define bio_sectors(bio)	bvec_iter_sectors((bio)->bi_iter)
48 #define bio_end_sector(bio)	bvec_iter_end_sector((bio)->bi_iter)
49 
50 /*
51  * Return the data direction, READ or WRITE.
52  */
53 #define bio_data_dir(bio) \
54 	(op_is_write(bio_op(bio)) ? WRITE : READ)
55 
56 /*
57  * Check whether this bio carries any data or not. A NULL bio is allowed.
58  */
59 static inline bool bio_has_data(struct bio *bio)
60 {
61 	if (bio &&
62 	    bio->bi_iter.bi_size &&
63 	    bio_op(bio) != REQ_OP_DISCARD &&
64 	    bio_op(bio) != REQ_OP_SECURE_ERASE &&
65 	    bio_op(bio) != REQ_OP_WRITE_ZEROES)
66 		return true;
67 
68 	return false;
69 }
70 
71 static inline bool bio_no_advance_iter(const struct bio *bio)
72 {
73 	return bio_op(bio) == REQ_OP_DISCARD ||
74 	       bio_op(bio) == REQ_OP_SECURE_ERASE ||
75 	       bio_op(bio) == REQ_OP_WRITE_SAME ||
76 	       bio_op(bio) == REQ_OP_WRITE_ZEROES;
77 }
78 
79 static inline bool bio_mergeable(struct bio *bio)
80 {
81 	if (bio->bi_opf & REQ_NOMERGE_FLAGS)
82 		return false;
83 
84 	return true;
85 }
86 
87 static inline unsigned int bio_cur_bytes(struct bio *bio)
88 {
89 	if (bio_has_data(bio))
90 		return bio_iovec(bio).bv_len;
91 	else /* dataless requests such as discard */
92 		return bio->bi_iter.bi_size;
93 }
94 
95 static inline void *bio_data(struct bio *bio)
96 {
97 	if (bio_has_data(bio))
98 		return page_address(bio_page(bio)) + bio_offset(bio);
99 
100 	return NULL;
101 }
102 
103 /**
104  * bio_full - check if the bio is full
105  * @bio:	bio to check
106  * @len:	length of one segment to be added
107  *
108  * Return true if @bio is full and one segment with @len bytes can't be
109  * added to the bio, otherwise return false
110  */
111 static inline bool bio_full(struct bio *bio, unsigned len)
112 {
113 	if (bio->bi_vcnt >= bio->bi_max_vecs)
114 		return true;
115 
116 	if (bio->bi_iter.bi_size > UINT_MAX - len)
117 		return true;
118 
119 	return false;
120 }
121 
122 static inline bool bio_next_segment(const struct bio *bio,
123 				    struct bvec_iter_all *iter)
124 {
125 	if (iter->idx >= bio->bi_vcnt)
126 		return false;
127 
128 	bvec_advance(&bio->bi_io_vec[iter->idx], iter);
129 	return true;
130 }
131 
132 /*
133  * drivers should _never_ use the all version - the bio may have been split
134  * before it got to the driver and the driver won't own all of it
135  */
136 #define bio_for_each_segment_all(bvl, bio, iter) \
137 	for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
138 
139 static inline void bio_advance_iter(const struct bio *bio,
140 				    struct bvec_iter *iter, unsigned int bytes)
141 {
142 	iter->bi_sector += bytes >> 9;
143 
144 	if (bio_no_advance_iter(bio))
145 		iter->bi_size -= bytes;
146 	else
147 		bvec_iter_advance(bio->bi_io_vec, iter, bytes);
148 		/* TODO: It is reasonable to complete bio with error here. */
149 }
150 
151 /* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
152 static inline void bio_advance_iter_single(const struct bio *bio,
153 					   struct bvec_iter *iter,
154 					   unsigned int bytes)
155 {
156 	iter->bi_sector += bytes >> 9;
157 
158 	if (bio_no_advance_iter(bio))
159 		iter->bi_size -= bytes;
160 	else
161 		bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
162 }
163 
164 #define __bio_for_each_segment(bvl, bio, iter, start)			\
165 	for (iter = (start);						\
166 	     (iter).bi_size &&						\
167 		((bvl = bio_iter_iovec((bio), (iter))), 1);		\
168 	     bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
169 
170 #define bio_for_each_segment(bvl, bio, iter)				\
171 	__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
172 
173 #define __bio_for_each_bvec(bvl, bio, iter, start)		\
174 	for (iter = (start);						\
175 	     (iter).bi_size &&						\
176 		((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
177 	     bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
178 
179 /* iterate over multi-page bvec */
180 #define bio_for_each_bvec(bvl, bio, iter)			\
181 	__bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
182 
183 /*
184  * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
185  * same reasons as bio_for_each_segment_all().
186  */
187 #define bio_for_each_bvec_all(bvl, bio, i)		\
188 	for (i = 0, bvl = bio_first_bvec_all(bio);	\
189 	     i < (bio)->bi_vcnt; i++, bvl++)		\
190 
191 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
192 
193 static inline unsigned bio_segments(struct bio *bio)
194 {
195 	unsigned segs = 0;
196 	struct bio_vec bv;
197 	struct bvec_iter iter;
198 
199 	/*
200 	 * We special case discard/write same/write zeroes, because they
201 	 * interpret bi_size differently:
202 	 */
203 
204 	switch (bio_op(bio)) {
205 	case REQ_OP_DISCARD:
206 	case REQ_OP_SECURE_ERASE:
207 	case REQ_OP_WRITE_ZEROES:
208 		return 0;
209 	case REQ_OP_WRITE_SAME:
210 		return 1;
211 	default:
212 		break;
213 	}
214 
215 	bio_for_each_segment(bv, bio, iter)
216 		segs++;
217 
218 	return segs;
219 }
220 
221 /*
222  * get a reference to a bio, so it won't disappear. the intended use is
223  * something like:
224  *
225  * bio_get(bio);
226  * submit_bio(rw, bio);
227  * if (bio->bi_flags ...)
228  *	do_something
229  * bio_put(bio);
230  *
231  * without the bio_get(), it could potentially complete I/O before submit_bio
232  * returns. and then bio would be freed memory when if (bio->bi_flags ...)
233  * runs
234  */
235 static inline void bio_get(struct bio *bio)
236 {
237 	bio->bi_flags |= (1 << BIO_REFFED);
238 	smp_mb__before_atomic();
239 	atomic_inc(&bio->__bi_cnt);
240 }
241 
242 static inline void bio_cnt_set(struct bio *bio, unsigned int count)
243 {
244 	if (count != 1) {
245 		bio->bi_flags |= (1 << BIO_REFFED);
246 		smp_mb();
247 	}
248 	atomic_set(&bio->__bi_cnt, count);
249 }
250 
251 static inline bool bio_flagged(struct bio *bio, unsigned int bit)
252 {
253 	return (bio->bi_flags & (1U << bit)) != 0;
254 }
255 
256 static inline void bio_set_flag(struct bio *bio, unsigned int bit)
257 {
258 	bio->bi_flags |= (1U << bit);
259 }
260 
261 static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
262 {
263 	bio->bi_flags &= ~(1U << bit);
264 }
265 
266 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
267 {
268 	*bv = bio_iovec(bio);
269 }
270 
271 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
272 {
273 	struct bvec_iter iter = bio->bi_iter;
274 	int idx;
275 
276 	if (unlikely(!bio_multiple_segments(bio))) {
277 		*bv = bio_iovec(bio);
278 		return;
279 	}
280 
281 	bio_advance_iter(bio, &iter, iter.bi_size);
282 
283 	if (!iter.bi_bvec_done)
284 		idx = iter.bi_idx - 1;
285 	else	/* in the middle of bvec */
286 		idx = iter.bi_idx;
287 
288 	*bv = bio->bi_io_vec[idx];
289 
290 	/*
291 	 * iter.bi_bvec_done records actual length of the last bvec
292 	 * if this bio ends in the middle of one io vector
293 	 */
294 	if (iter.bi_bvec_done)
295 		bv->bv_len = iter.bi_bvec_done;
296 }
297 
298 static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
299 {
300 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
301 	return bio->bi_io_vec;
302 }
303 
304 static inline struct page *bio_first_page_all(struct bio *bio)
305 {
306 	return bio_first_bvec_all(bio)->bv_page;
307 }
308 
309 static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
310 {
311 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
312 	return &bio->bi_io_vec[bio->bi_vcnt - 1];
313 }
314 
315 enum bip_flags {
316 	BIP_BLOCK_INTEGRITY	= 1 << 0, /* block layer owns integrity data */
317 	BIP_MAPPED_INTEGRITY	= 1 << 1, /* ref tag has been remapped */
318 	BIP_CTRL_NOCHECK	= 1 << 2, /* disable HBA integrity checking */
319 	BIP_DISK_NOCHECK	= 1 << 3, /* disable disk integrity checking */
320 	BIP_IP_CHECKSUM		= 1 << 4, /* IP checksum */
321 };
322 
323 /*
324  * bio integrity payload
325  */
326 struct bio_integrity_payload {
327 	struct bio		*bip_bio;	/* parent bio */
328 
329 	struct bvec_iter	bip_iter;
330 
331 	unsigned short		bip_slab;	/* slab the bip came from */
332 	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
333 	unsigned short		bip_max_vcnt;	/* integrity bio_vec slots */
334 	unsigned short		bip_flags;	/* control flags */
335 
336 	struct bvec_iter	bio_iter;	/* for rewinding parent bio */
337 
338 	struct work_struct	bip_work;	/* I/O completion */
339 
340 	struct bio_vec		*bip_vec;
341 	struct bio_vec		bip_inline_vecs[];/* embedded bvec array */
342 };
343 
344 #if defined(CONFIG_BLK_DEV_INTEGRITY)
345 
346 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
347 {
348 	if (bio->bi_opf & REQ_INTEGRITY)
349 		return bio->bi_integrity;
350 
351 	return NULL;
352 }
353 
354 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
355 {
356 	struct bio_integrity_payload *bip = bio_integrity(bio);
357 
358 	if (bip)
359 		return bip->bip_flags & flag;
360 
361 	return false;
362 }
363 
364 static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
365 {
366 	return bip->bip_iter.bi_sector;
367 }
368 
369 static inline void bip_set_seed(struct bio_integrity_payload *bip,
370 				sector_t seed)
371 {
372 	bip->bip_iter.bi_sector = seed;
373 }
374 
375 #endif /* CONFIG_BLK_DEV_INTEGRITY */
376 
377 extern void bio_trim(struct bio *bio, int offset, int size);
378 extern struct bio *bio_split(struct bio *bio, int sectors,
379 			     gfp_t gfp, struct bio_set *bs);
380 
381 /**
382  * bio_next_split - get next @sectors from a bio, splitting if necessary
383  * @bio:	bio to split
384  * @sectors:	number of sectors to split from the front of @bio
385  * @gfp:	gfp mask
386  * @bs:		bio set to allocate from
387  *
388  * Returns a bio representing the next @sectors of @bio - if the bio is smaller
389  * than @sectors, returns the original bio unchanged.
390  */
391 static inline struct bio *bio_next_split(struct bio *bio, int sectors,
392 					 gfp_t gfp, struct bio_set *bs)
393 {
394 	if (sectors >= bio_sectors(bio))
395 		return bio;
396 
397 	return bio_split(bio, sectors, gfp, bs);
398 }
399 
400 enum {
401 	BIOSET_NEED_BVECS = BIT(0),
402 	BIOSET_NEED_RESCUER = BIT(1),
403 };
404 extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
405 extern void bioset_exit(struct bio_set *);
406 extern int biovec_init_pool(mempool_t *pool, int pool_entries);
407 extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
408 
409 extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
410 extern void bio_put(struct bio *);
411 
412 extern void __bio_clone_fast(struct bio *, struct bio *);
413 extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
414 
415 extern struct bio_set fs_bio_set;
416 
417 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
418 {
419 	return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
420 }
421 
422 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
423 {
424 	return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
425 }
426 
427 extern blk_qc_t submit_bio(struct bio *);
428 
429 extern void bio_endio(struct bio *);
430 
431 static inline void bio_io_error(struct bio *bio)
432 {
433 	bio->bi_status = BLK_STS_IOERR;
434 	bio_endio(bio);
435 }
436 
437 static inline void bio_wouldblock_error(struct bio *bio)
438 {
439 	bio_set_flag(bio, BIO_QUIET);
440 	bio->bi_status = BLK_STS_AGAIN;
441 	bio_endio(bio);
442 }
443 
444 struct request_queue;
445 
446 extern int submit_bio_wait(struct bio *bio);
447 extern void bio_advance(struct bio *, unsigned);
448 
449 extern void bio_init(struct bio *bio, struct bio_vec *table,
450 		     unsigned short max_vecs);
451 extern void bio_uninit(struct bio *);
452 extern void bio_reset(struct bio *);
453 void bio_chain(struct bio *, struct bio *);
454 
455 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
456 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
457 			   unsigned int, unsigned int);
458 bool __bio_try_merge_page(struct bio *bio, struct page *page,
459 		unsigned int len, unsigned int off, bool *same_page);
460 void __bio_add_page(struct bio *bio, struct page *page,
461 		unsigned int len, unsigned int off);
462 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
463 void bio_release_pages(struct bio *bio, bool mark_dirty);
464 extern void bio_set_pages_dirty(struct bio *bio);
465 extern void bio_check_pages_dirty(struct bio *bio);
466 
467 extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
468 			       struct bio *src, struct bvec_iter *src_iter);
469 extern void bio_copy_data(struct bio *dst, struct bio *src);
470 extern void bio_list_copy_data(struct bio *dst, struct bio *src);
471 extern void bio_free_pages(struct bio *bio);
472 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
473 void bio_truncate(struct bio *bio, unsigned new_size);
474 void guard_bio_eod(struct bio *bio);
475 
476 static inline void zero_fill_bio(struct bio *bio)
477 {
478 	zero_fill_bio_iter(bio, bio->bi_iter);
479 }
480 
481 extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
482 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
483 extern unsigned int bvec_nr_vecs(unsigned short idx);
484 extern const char *bio_devname(struct bio *bio, char *buffer);
485 
486 #define bio_set_dev(bio, bdev) 			\
487 do {						\
488 	if ((bio)->bi_disk != (bdev)->bd_disk)	\
489 		bio_clear_flag(bio, BIO_THROTTLED);\
490 	(bio)->bi_disk = (bdev)->bd_disk;	\
491 	(bio)->bi_partno = (bdev)->bd_partno;	\
492 	bio_associate_blkg(bio);		\
493 } while (0)
494 
495 #define bio_copy_dev(dst, src)			\
496 do {						\
497 	(dst)->bi_disk = (src)->bi_disk;	\
498 	(dst)->bi_partno = (src)->bi_partno;	\
499 	bio_clone_blkg_association(dst, src);	\
500 } while (0)
501 
502 #define bio_dev(bio) \
503 	disk_devt((bio)->bi_disk)
504 
505 #ifdef CONFIG_BLK_CGROUP
506 void bio_associate_blkg(struct bio *bio);
507 void bio_associate_blkg_from_css(struct bio *bio,
508 				 struct cgroup_subsys_state *css);
509 void bio_clone_blkg_association(struct bio *dst, struct bio *src);
510 #else	/* CONFIG_BLK_CGROUP */
511 static inline void bio_associate_blkg(struct bio *bio) { }
512 static inline void bio_associate_blkg_from_css(struct bio *bio,
513 					       struct cgroup_subsys_state *css)
514 { }
515 static inline void bio_clone_blkg_association(struct bio *dst,
516 					      struct bio *src) { }
517 #endif	/* CONFIG_BLK_CGROUP */
518 
519 #ifdef CONFIG_HIGHMEM
520 /*
521  * remember never ever reenable interrupts between a bvec_kmap_irq and
522  * bvec_kunmap_irq!
523  */
524 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
525 {
526 	unsigned long addr;
527 
528 	/*
529 	 * might not be a highmem page, but the preempt/irq count
530 	 * balancing is a lot nicer this way
531 	 */
532 	local_irq_save(*flags);
533 	addr = (unsigned long) kmap_atomic(bvec->bv_page);
534 
535 	BUG_ON(addr & ~PAGE_MASK);
536 
537 	return (char *) addr + bvec->bv_offset;
538 }
539 
540 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
541 {
542 	unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
543 
544 	kunmap_atomic((void *) ptr);
545 	local_irq_restore(*flags);
546 }
547 
548 #else
549 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
550 {
551 	return page_address(bvec->bv_page) + bvec->bv_offset;
552 }
553 
554 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
555 {
556 	*flags = 0;
557 }
558 #endif
559 
560 /*
561  * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
562  *
563  * A bio_list anchors a singly-linked list of bios chained through the bi_next
564  * member of the bio.  The bio_list also caches the last list member to allow
565  * fast access to the tail.
566  */
567 struct bio_list {
568 	struct bio *head;
569 	struct bio *tail;
570 };
571 
572 static inline int bio_list_empty(const struct bio_list *bl)
573 {
574 	return bl->head == NULL;
575 }
576 
577 static inline void bio_list_init(struct bio_list *bl)
578 {
579 	bl->head = bl->tail = NULL;
580 }
581 
582 #define BIO_EMPTY_LIST	{ NULL, NULL }
583 
584 #define bio_list_for_each(bio, bl) \
585 	for (bio = (bl)->head; bio; bio = bio->bi_next)
586 
587 static inline unsigned bio_list_size(const struct bio_list *bl)
588 {
589 	unsigned sz = 0;
590 	struct bio *bio;
591 
592 	bio_list_for_each(bio, bl)
593 		sz++;
594 
595 	return sz;
596 }
597 
598 static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
599 {
600 	bio->bi_next = NULL;
601 
602 	if (bl->tail)
603 		bl->tail->bi_next = bio;
604 	else
605 		bl->head = bio;
606 
607 	bl->tail = bio;
608 }
609 
610 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
611 {
612 	bio->bi_next = bl->head;
613 
614 	bl->head = bio;
615 
616 	if (!bl->tail)
617 		bl->tail = bio;
618 }
619 
620 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
621 {
622 	if (!bl2->head)
623 		return;
624 
625 	if (bl->tail)
626 		bl->tail->bi_next = bl2->head;
627 	else
628 		bl->head = bl2->head;
629 
630 	bl->tail = bl2->tail;
631 }
632 
633 static inline void bio_list_merge_head(struct bio_list *bl,
634 				       struct bio_list *bl2)
635 {
636 	if (!bl2->head)
637 		return;
638 
639 	if (bl->head)
640 		bl2->tail->bi_next = bl->head;
641 	else
642 		bl->tail = bl2->tail;
643 
644 	bl->head = bl2->head;
645 }
646 
647 static inline struct bio *bio_list_peek(struct bio_list *bl)
648 {
649 	return bl->head;
650 }
651 
652 static inline struct bio *bio_list_pop(struct bio_list *bl)
653 {
654 	struct bio *bio = bl->head;
655 
656 	if (bio) {
657 		bl->head = bl->head->bi_next;
658 		if (!bl->head)
659 			bl->tail = NULL;
660 
661 		bio->bi_next = NULL;
662 	}
663 
664 	return bio;
665 }
666 
667 static inline struct bio *bio_list_get(struct bio_list *bl)
668 {
669 	struct bio *bio = bl->head;
670 
671 	bl->head = bl->tail = NULL;
672 
673 	return bio;
674 }
675 
676 /*
677  * Increment chain count for the bio. Make sure the CHAIN flag update
678  * is visible before the raised count.
679  */
680 static inline void bio_inc_remaining(struct bio *bio)
681 {
682 	bio_set_flag(bio, BIO_CHAIN);
683 	smp_mb__before_atomic();
684 	atomic_inc(&bio->__bi_remaining);
685 }
686 
687 /*
688  * bio_set is used to allow other portions of the IO system to
689  * allocate their own private memory pools for bio and iovec structures.
690  * These memory pools in turn all allocate from the bio_slab
691  * and the bvec_slabs[].
692  */
693 #define BIO_POOL_SIZE 2
694 
695 struct bio_set {
696 	struct kmem_cache *bio_slab;
697 	unsigned int front_pad;
698 
699 	mempool_t bio_pool;
700 	mempool_t bvec_pool;
701 #if defined(CONFIG_BLK_DEV_INTEGRITY)
702 	mempool_t bio_integrity_pool;
703 	mempool_t bvec_integrity_pool;
704 #endif
705 
706 	/*
707 	 * Deadlock avoidance for stacking block drivers: see comments in
708 	 * bio_alloc_bioset() for details
709 	 */
710 	spinlock_t		rescue_lock;
711 	struct bio_list		rescue_list;
712 	struct work_struct	rescue_work;
713 	struct workqueue_struct	*rescue_workqueue;
714 };
715 
716 struct biovec_slab {
717 	int nr_vecs;
718 	char *name;
719 	struct kmem_cache *slab;
720 };
721 
722 static inline bool bioset_initialized(struct bio_set *bs)
723 {
724 	return bs->bio_slab != NULL;
725 }
726 
727 #if defined(CONFIG_BLK_DEV_INTEGRITY)
728 
729 #define bip_for_each_vec(bvl, bip, iter)				\
730 	for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
731 
732 #define bio_for_each_integrity_vec(_bvl, _bio, _iter)			\
733 	for_each_bio(_bio)						\
734 		bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
735 
736 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
737 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
738 extern bool bio_integrity_prep(struct bio *);
739 extern void bio_integrity_advance(struct bio *, unsigned int);
740 extern void bio_integrity_trim(struct bio *);
741 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
742 extern int bioset_integrity_create(struct bio_set *, int);
743 extern void bioset_integrity_free(struct bio_set *);
744 extern void bio_integrity_init(void);
745 
746 #else /* CONFIG_BLK_DEV_INTEGRITY */
747 
748 static inline void *bio_integrity(struct bio *bio)
749 {
750 	return NULL;
751 }
752 
753 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
754 {
755 	return 0;
756 }
757 
758 static inline void bioset_integrity_free (struct bio_set *bs)
759 {
760 	return;
761 }
762 
763 static inline bool bio_integrity_prep(struct bio *bio)
764 {
765 	return true;
766 }
767 
768 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
769 				      gfp_t gfp_mask)
770 {
771 	return 0;
772 }
773 
774 static inline void bio_integrity_advance(struct bio *bio,
775 					 unsigned int bytes_done)
776 {
777 	return;
778 }
779 
780 static inline void bio_integrity_trim(struct bio *bio)
781 {
782 	return;
783 }
784 
785 static inline void bio_integrity_init(void)
786 {
787 	return;
788 }
789 
790 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
791 {
792 	return false;
793 }
794 
795 static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
796 								unsigned int nr)
797 {
798 	return ERR_PTR(-EINVAL);
799 }
800 
801 static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
802 					unsigned int len, unsigned int offset)
803 {
804 	return 0;
805 }
806 
807 #endif /* CONFIG_BLK_DEV_INTEGRITY */
808 
809 /*
810  * Mark a bio as polled. Note that for async polled IO, the caller must
811  * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
812  * We cannot block waiting for requests on polled IO, as those completions
813  * must be found by the caller. This is different than IRQ driven IO, where
814  * it's safe to wait for IO to complete.
815  */
816 static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
817 {
818 	bio->bi_opf |= REQ_HIPRI;
819 	if (!is_sync_kiocb(kiocb))
820 		bio->bi_opf |= REQ_NOWAIT;
821 }
822 
823 #endif /* __LINUX_BIO_H */
824