xref: /openbmc/linux/include/linux/bio.h (revision 5b4cb650)
1 /*
2  * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  *
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public Licens
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18 #ifndef __LINUX_BIO_H
19 #define __LINUX_BIO_H
20 
21 #include <linux/highmem.h>
22 #include <linux/mempool.h>
23 #include <linux/ioprio.h>
24 
25 #ifdef CONFIG_BLOCK
26 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
27 #include <linux/blk_types.h>
28 
29 #define BIO_DEBUG
30 
31 #ifdef BIO_DEBUG
32 #define BIO_BUG_ON	BUG_ON
33 #else
34 #define BIO_BUG_ON
35 #endif
36 
37 #ifdef CONFIG_THP_SWAP
38 #if HPAGE_PMD_NR > 256
39 #define BIO_MAX_PAGES		HPAGE_PMD_NR
40 #else
41 #define BIO_MAX_PAGES		256
42 #endif
43 #else
44 #define BIO_MAX_PAGES		256
45 #endif
46 
47 #define bio_prio(bio)			(bio)->bi_ioprio
48 #define bio_set_prio(bio, prio)		((bio)->bi_ioprio = prio)
49 
50 #define bio_iter_iovec(bio, iter)				\
51 	bvec_iter_bvec((bio)->bi_io_vec, (iter))
52 
53 #define bio_iter_page(bio, iter)				\
54 	bvec_iter_page((bio)->bi_io_vec, (iter))
55 #define bio_iter_len(bio, iter)					\
56 	bvec_iter_len((bio)->bi_io_vec, (iter))
57 #define bio_iter_offset(bio, iter)				\
58 	bvec_iter_offset((bio)->bi_io_vec, (iter))
59 
60 #define bio_page(bio)		bio_iter_page((bio), (bio)->bi_iter)
61 #define bio_offset(bio)		bio_iter_offset((bio), (bio)->bi_iter)
62 #define bio_iovec(bio)		bio_iter_iovec((bio), (bio)->bi_iter)
63 
64 #define bio_multiple_segments(bio)				\
65 	((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
66 
67 #define bvec_iter_sectors(iter)	((iter).bi_size >> 9)
68 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
69 
70 #define bio_sectors(bio)	bvec_iter_sectors((bio)->bi_iter)
71 #define bio_end_sector(bio)	bvec_iter_end_sector((bio)->bi_iter)
72 
73 /*
74  * Return the data direction, READ or WRITE.
75  */
76 #define bio_data_dir(bio) \
77 	(op_is_write(bio_op(bio)) ? WRITE : READ)
78 
79 /*
80  * Check whether this bio carries any data or not. A NULL bio is allowed.
81  */
82 static inline bool bio_has_data(struct bio *bio)
83 {
84 	if (bio &&
85 	    bio->bi_iter.bi_size &&
86 	    bio_op(bio) != REQ_OP_DISCARD &&
87 	    bio_op(bio) != REQ_OP_SECURE_ERASE &&
88 	    bio_op(bio) != REQ_OP_WRITE_ZEROES)
89 		return true;
90 
91 	return false;
92 }
93 
94 static inline bool bio_no_advance_iter(struct bio *bio)
95 {
96 	return bio_op(bio) == REQ_OP_DISCARD ||
97 	       bio_op(bio) == REQ_OP_SECURE_ERASE ||
98 	       bio_op(bio) == REQ_OP_WRITE_SAME ||
99 	       bio_op(bio) == REQ_OP_WRITE_ZEROES;
100 }
101 
102 static inline bool bio_mergeable(struct bio *bio)
103 {
104 	if (bio->bi_opf & REQ_NOMERGE_FLAGS)
105 		return false;
106 
107 	return true;
108 }
109 
110 static inline unsigned int bio_cur_bytes(struct bio *bio)
111 {
112 	if (bio_has_data(bio))
113 		return bio_iovec(bio).bv_len;
114 	else /* dataless requests such as discard */
115 		return bio->bi_iter.bi_size;
116 }
117 
118 static inline void *bio_data(struct bio *bio)
119 {
120 	if (bio_has_data(bio))
121 		return page_address(bio_page(bio)) + bio_offset(bio);
122 
123 	return NULL;
124 }
125 
126 static inline bool bio_full(struct bio *bio)
127 {
128 	return bio->bi_vcnt >= bio->bi_max_vecs;
129 }
130 
131 /*
132  * drivers should _never_ use the all version - the bio may have been split
133  * before it got to the driver and the driver won't own all of it
134  */
135 #define bio_for_each_segment_all(bvl, bio, i)				\
136 	for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
137 
138 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
139 				    unsigned bytes)
140 {
141 	iter->bi_sector += bytes >> 9;
142 
143 	if (bio_no_advance_iter(bio))
144 		iter->bi_size -= bytes;
145 	else
146 		bvec_iter_advance(bio->bi_io_vec, iter, bytes);
147 		/* TODO: It is reasonable to complete bio with error here. */
148 }
149 
150 #define __bio_for_each_segment(bvl, bio, iter, start)			\
151 	for (iter = (start);						\
152 	     (iter).bi_size &&						\
153 		((bvl = bio_iter_iovec((bio), (iter))), 1);		\
154 	     bio_advance_iter((bio), &(iter), (bvl).bv_len))
155 
156 #define bio_for_each_segment(bvl, bio, iter)				\
157 	__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
158 
159 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
160 
161 static inline unsigned bio_segments(struct bio *bio)
162 {
163 	unsigned segs = 0;
164 	struct bio_vec bv;
165 	struct bvec_iter iter;
166 
167 	/*
168 	 * We special case discard/write same/write zeroes, because they
169 	 * interpret bi_size differently:
170 	 */
171 
172 	switch (bio_op(bio)) {
173 	case REQ_OP_DISCARD:
174 	case REQ_OP_SECURE_ERASE:
175 	case REQ_OP_WRITE_ZEROES:
176 		return 0;
177 	case REQ_OP_WRITE_SAME:
178 		return 1;
179 	default:
180 		break;
181 	}
182 
183 	bio_for_each_segment(bv, bio, iter)
184 		segs++;
185 
186 	return segs;
187 }
188 
189 /*
190  * get a reference to a bio, so it won't disappear. the intended use is
191  * something like:
192  *
193  * bio_get(bio);
194  * submit_bio(rw, bio);
195  * if (bio->bi_flags ...)
196  *	do_something
197  * bio_put(bio);
198  *
199  * without the bio_get(), it could potentially complete I/O before submit_bio
200  * returns. and then bio would be freed memory when if (bio->bi_flags ...)
201  * runs
202  */
203 static inline void bio_get(struct bio *bio)
204 {
205 	bio->bi_flags |= (1 << BIO_REFFED);
206 	smp_mb__before_atomic();
207 	atomic_inc(&bio->__bi_cnt);
208 }
209 
210 static inline void bio_cnt_set(struct bio *bio, unsigned int count)
211 {
212 	if (count != 1) {
213 		bio->bi_flags |= (1 << BIO_REFFED);
214 		smp_mb__before_atomic();
215 	}
216 	atomic_set(&bio->__bi_cnt, count);
217 }
218 
219 static inline bool bio_flagged(struct bio *bio, unsigned int bit)
220 {
221 	return (bio->bi_flags & (1U << bit)) != 0;
222 }
223 
224 static inline void bio_set_flag(struct bio *bio, unsigned int bit)
225 {
226 	bio->bi_flags |= (1U << bit);
227 }
228 
229 static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
230 {
231 	bio->bi_flags &= ~(1U << bit);
232 }
233 
234 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
235 {
236 	*bv = bio_iovec(bio);
237 }
238 
239 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
240 {
241 	struct bvec_iter iter = bio->bi_iter;
242 	int idx;
243 
244 	if (unlikely(!bio_multiple_segments(bio))) {
245 		*bv = bio_iovec(bio);
246 		return;
247 	}
248 
249 	bio_advance_iter(bio, &iter, iter.bi_size);
250 
251 	if (!iter.bi_bvec_done)
252 		idx = iter.bi_idx - 1;
253 	else	/* in the middle of bvec */
254 		idx = iter.bi_idx;
255 
256 	*bv = bio->bi_io_vec[idx];
257 
258 	/*
259 	 * iter.bi_bvec_done records actual length of the last bvec
260 	 * if this bio ends in the middle of one io vector
261 	 */
262 	if (iter.bi_bvec_done)
263 		bv->bv_len = iter.bi_bvec_done;
264 }
265 
266 static inline unsigned bio_pages_all(struct bio *bio)
267 {
268 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
269 	return bio->bi_vcnt;
270 }
271 
272 static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
273 {
274 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
275 	return bio->bi_io_vec;
276 }
277 
278 static inline struct page *bio_first_page_all(struct bio *bio)
279 {
280 	return bio_first_bvec_all(bio)->bv_page;
281 }
282 
283 static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
284 {
285 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
286 	return &bio->bi_io_vec[bio->bi_vcnt - 1];
287 }
288 
289 enum bip_flags {
290 	BIP_BLOCK_INTEGRITY	= 1 << 0, /* block layer owns integrity data */
291 	BIP_MAPPED_INTEGRITY	= 1 << 1, /* ref tag has been remapped */
292 	BIP_CTRL_NOCHECK	= 1 << 2, /* disable HBA integrity checking */
293 	BIP_DISK_NOCHECK	= 1 << 3, /* disable disk integrity checking */
294 	BIP_IP_CHECKSUM		= 1 << 4, /* IP checksum */
295 };
296 
297 /*
298  * bio integrity payload
299  */
300 struct bio_integrity_payload {
301 	struct bio		*bip_bio;	/* parent bio */
302 
303 	struct bvec_iter	bip_iter;
304 
305 	unsigned short		bip_slab;	/* slab the bip came from */
306 	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
307 	unsigned short		bip_max_vcnt;	/* integrity bio_vec slots */
308 	unsigned short		bip_flags;	/* control flags */
309 
310 	struct bvec_iter	bio_iter;	/* for rewinding parent bio */
311 
312 	struct work_struct	bip_work;	/* I/O completion */
313 
314 	struct bio_vec		*bip_vec;
315 	struct bio_vec		bip_inline_vecs[0];/* embedded bvec array */
316 };
317 
318 #if defined(CONFIG_BLK_DEV_INTEGRITY)
319 
320 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
321 {
322 	if (bio->bi_opf & REQ_INTEGRITY)
323 		return bio->bi_integrity;
324 
325 	return NULL;
326 }
327 
328 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
329 {
330 	struct bio_integrity_payload *bip = bio_integrity(bio);
331 
332 	if (bip)
333 		return bip->bip_flags & flag;
334 
335 	return false;
336 }
337 
338 static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
339 {
340 	return bip->bip_iter.bi_sector;
341 }
342 
343 static inline void bip_set_seed(struct bio_integrity_payload *bip,
344 				sector_t seed)
345 {
346 	bip->bip_iter.bi_sector = seed;
347 }
348 
349 #endif /* CONFIG_BLK_DEV_INTEGRITY */
350 
351 extern void bio_trim(struct bio *bio, int offset, int size);
352 extern struct bio *bio_split(struct bio *bio, int sectors,
353 			     gfp_t gfp, struct bio_set *bs);
354 
355 /**
356  * bio_next_split - get next @sectors from a bio, splitting if necessary
357  * @bio:	bio to split
358  * @sectors:	number of sectors to split from the front of @bio
359  * @gfp:	gfp mask
360  * @bs:		bio set to allocate from
361  *
362  * Returns a bio representing the next @sectors of @bio - if the bio is smaller
363  * than @sectors, returns the original bio unchanged.
364  */
365 static inline struct bio *bio_next_split(struct bio *bio, int sectors,
366 					 gfp_t gfp, struct bio_set *bs)
367 {
368 	if (sectors >= bio_sectors(bio))
369 		return bio;
370 
371 	return bio_split(bio, sectors, gfp, bs);
372 }
373 
374 enum {
375 	BIOSET_NEED_BVECS = BIT(0),
376 	BIOSET_NEED_RESCUER = BIT(1),
377 };
378 extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
379 extern void bioset_exit(struct bio_set *);
380 extern int biovec_init_pool(mempool_t *pool, int pool_entries);
381 extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
382 
383 extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
384 extern void bio_put(struct bio *);
385 
386 extern void __bio_clone_fast(struct bio *, struct bio *);
387 extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
388 
389 extern struct bio_set fs_bio_set;
390 
391 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
392 {
393 	return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
394 }
395 
396 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
397 {
398 	return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
399 }
400 
401 extern blk_qc_t submit_bio(struct bio *);
402 
403 extern void bio_endio(struct bio *);
404 
405 static inline void bio_io_error(struct bio *bio)
406 {
407 	bio->bi_status = BLK_STS_IOERR;
408 	bio_endio(bio);
409 }
410 
411 static inline void bio_wouldblock_error(struct bio *bio)
412 {
413 	bio->bi_status = BLK_STS_AGAIN;
414 	bio_endio(bio);
415 }
416 
417 struct request_queue;
418 extern int bio_phys_segments(struct request_queue *, struct bio *);
419 
420 extern int submit_bio_wait(struct bio *bio);
421 extern void bio_advance(struct bio *, unsigned);
422 
423 extern void bio_init(struct bio *bio, struct bio_vec *table,
424 		     unsigned short max_vecs);
425 extern void bio_uninit(struct bio *);
426 extern void bio_reset(struct bio *);
427 void bio_chain(struct bio *, struct bio *);
428 
429 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
430 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
431 			   unsigned int, unsigned int);
432 bool __bio_try_merge_page(struct bio *bio, struct page *page,
433 		unsigned int len, unsigned int off);
434 void __bio_add_page(struct bio *bio, struct page *page,
435 		unsigned int len, unsigned int off);
436 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
437 struct rq_map_data;
438 extern struct bio *bio_map_user_iov(struct request_queue *,
439 				    struct iov_iter *, gfp_t);
440 extern void bio_unmap_user(struct bio *);
441 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
442 				gfp_t);
443 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
444 				 gfp_t, int);
445 extern void bio_set_pages_dirty(struct bio *bio);
446 extern void bio_check_pages_dirty(struct bio *bio);
447 
448 void generic_start_io_acct(struct request_queue *q, int op,
449 				unsigned long sectors, struct hd_struct *part);
450 void generic_end_io_acct(struct request_queue *q, int op,
451 				struct hd_struct *part,
452 				unsigned long start_time);
453 
454 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
455 # error	"You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
456 #endif
457 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
458 extern void bio_flush_dcache_pages(struct bio *bi);
459 #else
460 static inline void bio_flush_dcache_pages(struct bio *bi)
461 {
462 }
463 #endif
464 
465 extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
466 			       struct bio *src, struct bvec_iter *src_iter);
467 extern void bio_copy_data(struct bio *dst, struct bio *src);
468 extern void bio_list_copy_data(struct bio *dst, struct bio *src);
469 extern void bio_free_pages(struct bio *bio);
470 
471 extern struct bio *bio_copy_user_iov(struct request_queue *,
472 				     struct rq_map_data *,
473 				     struct iov_iter *,
474 				     gfp_t);
475 extern int bio_uncopy_user(struct bio *);
476 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
477 
478 static inline void zero_fill_bio(struct bio *bio)
479 {
480 	zero_fill_bio_iter(bio, bio->bi_iter);
481 }
482 
483 extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
484 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
485 extern unsigned int bvec_nr_vecs(unsigned short idx);
486 extern const char *bio_devname(struct bio *bio, char *buffer);
487 
488 #define bio_set_dev(bio, bdev) 			\
489 do {						\
490 	if ((bio)->bi_disk != (bdev)->bd_disk)	\
491 		bio_clear_flag(bio, BIO_THROTTLED);\
492 	(bio)->bi_disk = (bdev)->bd_disk;	\
493 	(bio)->bi_partno = (bdev)->bd_partno;	\
494 	bio_associate_blkg(bio);		\
495 } while (0)
496 
497 #define bio_copy_dev(dst, src)			\
498 do {						\
499 	(dst)->bi_disk = (src)->bi_disk;	\
500 	(dst)->bi_partno = (src)->bi_partno;	\
501 	bio_clone_blkg_association(dst, src);	\
502 } while (0)
503 
504 #define bio_dev(bio) \
505 	disk_devt((bio)->bi_disk)
506 
507 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
508 void bio_associate_blkg_from_page(struct bio *bio, struct page *page);
509 #else
510 static inline void bio_associate_blkg_from_page(struct bio *bio,
511 						struct page *page) { }
512 #endif
513 
514 #ifdef CONFIG_BLK_CGROUP
515 void bio_disassociate_blkg(struct bio *bio);
516 void bio_associate_blkg(struct bio *bio);
517 void bio_associate_blkg_from_css(struct bio *bio,
518 				 struct cgroup_subsys_state *css);
519 void bio_clone_blkg_association(struct bio *dst, struct bio *src);
520 #else	/* CONFIG_BLK_CGROUP */
521 static inline void bio_disassociate_blkg(struct bio *bio) { }
522 static inline void bio_associate_blkg(struct bio *bio) { }
523 static inline void bio_associate_blkg_from_css(struct bio *bio,
524 					       struct cgroup_subsys_state *css)
525 { }
526 static inline void bio_clone_blkg_association(struct bio *dst,
527 					      struct bio *src) { }
528 #endif	/* CONFIG_BLK_CGROUP */
529 
530 #ifdef CONFIG_HIGHMEM
531 /*
532  * remember never ever reenable interrupts between a bvec_kmap_irq and
533  * bvec_kunmap_irq!
534  */
535 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
536 {
537 	unsigned long addr;
538 
539 	/*
540 	 * might not be a highmem page, but the preempt/irq count
541 	 * balancing is a lot nicer this way
542 	 */
543 	local_irq_save(*flags);
544 	addr = (unsigned long) kmap_atomic(bvec->bv_page);
545 
546 	BUG_ON(addr & ~PAGE_MASK);
547 
548 	return (char *) addr + bvec->bv_offset;
549 }
550 
551 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
552 {
553 	unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
554 
555 	kunmap_atomic((void *) ptr);
556 	local_irq_restore(*flags);
557 }
558 
559 #else
560 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
561 {
562 	return page_address(bvec->bv_page) + bvec->bv_offset;
563 }
564 
565 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
566 {
567 	*flags = 0;
568 }
569 #endif
570 
571 /*
572  * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
573  *
574  * A bio_list anchors a singly-linked list of bios chained through the bi_next
575  * member of the bio.  The bio_list also caches the last list member to allow
576  * fast access to the tail.
577  */
578 struct bio_list {
579 	struct bio *head;
580 	struct bio *tail;
581 };
582 
583 static inline int bio_list_empty(const struct bio_list *bl)
584 {
585 	return bl->head == NULL;
586 }
587 
588 static inline void bio_list_init(struct bio_list *bl)
589 {
590 	bl->head = bl->tail = NULL;
591 }
592 
593 #define BIO_EMPTY_LIST	{ NULL, NULL }
594 
595 #define bio_list_for_each(bio, bl) \
596 	for (bio = (bl)->head; bio; bio = bio->bi_next)
597 
598 static inline unsigned bio_list_size(const struct bio_list *bl)
599 {
600 	unsigned sz = 0;
601 	struct bio *bio;
602 
603 	bio_list_for_each(bio, bl)
604 		sz++;
605 
606 	return sz;
607 }
608 
609 static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
610 {
611 	bio->bi_next = NULL;
612 
613 	if (bl->tail)
614 		bl->tail->bi_next = bio;
615 	else
616 		bl->head = bio;
617 
618 	bl->tail = bio;
619 }
620 
621 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
622 {
623 	bio->bi_next = bl->head;
624 
625 	bl->head = bio;
626 
627 	if (!bl->tail)
628 		bl->tail = bio;
629 }
630 
631 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
632 {
633 	if (!bl2->head)
634 		return;
635 
636 	if (bl->tail)
637 		bl->tail->bi_next = bl2->head;
638 	else
639 		bl->head = bl2->head;
640 
641 	bl->tail = bl2->tail;
642 }
643 
644 static inline void bio_list_merge_head(struct bio_list *bl,
645 				       struct bio_list *bl2)
646 {
647 	if (!bl2->head)
648 		return;
649 
650 	if (bl->head)
651 		bl2->tail->bi_next = bl->head;
652 	else
653 		bl->tail = bl2->tail;
654 
655 	bl->head = bl2->head;
656 }
657 
658 static inline struct bio *bio_list_peek(struct bio_list *bl)
659 {
660 	return bl->head;
661 }
662 
663 static inline struct bio *bio_list_pop(struct bio_list *bl)
664 {
665 	struct bio *bio = bl->head;
666 
667 	if (bio) {
668 		bl->head = bl->head->bi_next;
669 		if (!bl->head)
670 			bl->tail = NULL;
671 
672 		bio->bi_next = NULL;
673 	}
674 
675 	return bio;
676 }
677 
678 static inline struct bio *bio_list_get(struct bio_list *bl)
679 {
680 	struct bio *bio = bl->head;
681 
682 	bl->head = bl->tail = NULL;
683 
684 	return bio;
685 }
686 
687 /*
688  * Increment chain count for the bio. Make sure the CHAIN flag update
689  * is visible before the raised count.
690  */
691 static inline void bio_inc_remaining(struct bio *bio)
692 {
693 	bio_set_flag(bio, BIO_CHAIN);
694 	smp_mb__before_atomic();
695 	atomic_inc(&bio->__bi_remaining);
696 }
697 
698 /*
699  * bio_set is used to allow other portions of the IO system to
700  * allocate their own private memory pools for bio and iovec structures.
701  * These memory pools in turn all allocate from the bio_slab
702  * and the bvec_slabs[].
703  */
704 #define BIO_POOL_SIZE 2
705 
706 struct bio_set {
707 	struct kmem_cache *bio_slab;
708 	unsigned int front_pad;
709 
710 	mempool_t bio_pool;
711 	mempool_t bvec_pool;
712 #if defined(CONFIG_BLK_DEV_INTEGRITY)
713 	mempool_t bio_integrity_pool;
714 	mempool_t bvec_integrity_pool;
715 #endif
716 
717 	/*
718 	 * Deadlock avoidance for stacking block drivers: see comments in
719 	 * bio_alloc_bioset() for details
720 	 */
721 	spinlock_t		rescue_lock;
722 	struct bio_list		rescue_list;
723 	struct work_struct	rescue_work;
724 	struct workqueue_struct	*rescue_workqueue;
725 };
726 
727 struct biovec_slab {
728 	int nr_vecs;
729 	char *name;
730 	struct kmem_cache *slab;
731 };
732 
733 static inline bool bioset_initialized(struct bio_set *bs)
734 {
735 	return bs->bio_slab != NULL;
736 }
737 
738 /*
739  * a small number of entries is fine, not going to be performance critical.
740  * basically we just need to survive
741  */
742 #define BIO_SPLIT_ENTRIES 2
743 
744 #if defined(CONFIG_BLK_DEV_INTEGRITY)
745 
746 #define bip_for_each_vec(bvl, bip, iter)				\
747 	for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
748 
749 #define bio_for_each_integrity_vec(_bvl, _bio, _iter)			\
750 	for_each_bio(_bio)						\
751 		bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
752 
753 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
754 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
755 extern bool bio_integrity_prep(struct bio *);
756 extern void bio_integrity_advance(struct bio *, unsigned int);
757 extern void bio_integrity_trim(struct bio *);
758 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
759 extern int bioset_integrity_create(struct bio_set *, int);
760 extern void bioset_integrity_free(struct bio_set *);
761 extern void bio_integrity_init(void);
762 
763 #else /* CONFIG_BLK_DEV_INTEGRITY */
764 
765 static inline void *bio_integrity(struct bio *bio)
766 {
767 	return NULL;
768 }
769 
770 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
771 {
772 	return 0;
773 }
774 
775 static inline void bioset_integrity_free (struct bio_set *bs)
776 {
777 	return;
778 }
779 
780 static inline bool bio_integrity_prep(struct bio *bio)
781 {
782 	return true;
783 }
784 
785 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
786 				      gfp_t gfp_mask)
787 {
788 	return 0;
789 }
790 
791 static inline void bio_integrity_advance(struct bio *bio,
792 					 unsigned int bytes_done)
793 {
794 	return;
795 }
796 
797 static inline void bio_integrity_trim(struct bio *bio)
798 {
799 	return;
800 }
801 
802 static inline void bio_integrity_init(void)
803 {
804 	return;
805 }
806 
807 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
808 {
809 	return false;
810 }
811 
812 static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
813 								unsigned int nr)
814 {
815 	return ERR_PTR(-EINVAL);
816 }
817 
818 static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
819 					unsigned int len, unsigned int offset)
820 {
821 	return 0;
822 }
823 
824 #endif /* CONFIG_BLK_DEV_INTEGRITY */
825 
826 #endif /* CONFIG_BLOCK */
827 #endif /* __LINUX_BIO_H */
828