xref: /openbmc/linux/include/linux/bio.h (revision 1c2dd16a)
1 /*
2  * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  *
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public Licens
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18 #ifndef __LINUX_BIO_H
19 #define __LINUX_BIO_H
20 
21 #include <linux/highmem.h>
22 #include <linux/mempool.h>
23 #include <linux/ioprio.h>
24 #include <linux/bug.h>
25 
26 #ifdef CONFIG_BLOCK
27 
28 #include <asm/io.h>
29 
30 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
31 #include <linux/blk_types.h>
32 
33 #define BIO_DEBUG
34 
35 #ifdef BIO_DEBUG
36 #define BIO_BUG_ON	BUG_ON
37 #else
38 #define BIO_BUG_ON
39 #endif
40 
41 #define BIO_MAX_PAGES		256
42 
43 #define bio_prio(bio)			(bio)->bi_ioprio
44 #define bio_set_prio(bio, prio)		((bio)->bi_ioprio = prio)
45 
46 #define bio_iter_iovec(bio, iter)				\
47 	bvec_iter_bvec((bio)->bi_io_vec, (iter))
48 
49 #define bio_iter_page(bio, iter)				\
50 	bvec_iter_page((bio)->bi_io_vec, (iter))
51 #define bio_iter_len(bio, iter)					\
52 	bvec_iter_len((bio)->bi_io_vec, (iter))
53 #define bio_iter_offset(bio, iter)				\
54 	bvec_iter_offset((bio)->bi_io_vec, (iter))
55 
56 #define bio_page(bio)		bio_iter_page((bio), (bio)->bi_iter)
57 #define bio_offset(bio)		bio_iter_offset((bio), (bio)->bi_iter)
58 #define bio_iovec(bio)		bio_iter_iovec((bio), (bio)->bi_iter)
59 
60 #define bio_multiple_segments(bio)				\
61 	((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
62 #define bio_sectors(bio)	((bio)->bi_iter.bi_size >> 9)
63 #define bio_end_sector(bio)	((bio)->bi_iter.bi_sector + bio_sectors((bio)))
64 
65 /*
66  * Return the data direction, READ or WRITE.
67  */
68 #define bio_data_dir(bio) \
69 	(op_is_write(bio_op(bio)) ? WRITE : READ)
70 
71 /*
72  * Check whether this bio carries any data or not. A NULL bio is allowed.
73  */
74 static inline bool bio_has_data(struct bio *bio)
75 {
76 	if (bio &&
77 	    bio->bi_iter.bi_size &&
78 	    bio_op(bio) != REQ_OP_DISCARD &&
79 	    bio_op(bio) != REQ_OP_SECURE_ERASE &&
80 	    bio_op(bio) != REQ_OP_WRITE_ZEROES)
81 		return true;
82 
83 	return false;
84 }
85 
86 static inline bool bio_no_advance_iter(struct bio *bio)
87 {
88 	return bio_op(bio) == REQ_OP_DISCARD ||
89 	       bio_op(bio) == REQ_OP_SECURE_ERASE ||
90 	       bio_op(bio) == REQ_OP_WRITE_SAME ||
91 	       bio_op(bio) == REQ_OP_WRITE_ZEROES;
92 }
93 
94 static inline bool bio_mergeable(struct bio *bio)
95 {
96 	if (bio->bi_opf & REQ_NOMERGE_FLAGS)
97 		return false;
98 
99 	return true;
100 }
101 
102 static inline unsigned int bio_cur_bytes(struct bio *bio)
103 {
104 	if (bio_has_data(bio))
105 		return bio_iovec(bio).bv_len;
106 	else /* dataless requests such as discard */
107 		return bio->bi_iter.bi_size;
108 }
109 
110 static inline void *bio_data(struct bio *bio)
111 {
112 	if (bio_has_data(bio))
113 		return page_address(bio_page(bio)) + bio_offset(bio);
114 
115 	return NULL;
116 }
117 
118 /*
119  * will die
120  */
121 #define bio_to_phys(bio)	(page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
122 #define bvec_to_phys(bv)	(page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
123 
124 /*
125  * queues that have highmem support enabled may still need to revert to
126  * PIO transfers occasionally and thus map high pages temporarily. For
127  * permanent PIO fall back, user is probably better off disabling highmem
128  * I/O completely on that queue (see ide-dma for example)
129  */
130 #define __bio_kmap_atomic(bio, iter)				\
131 	(kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) +	\
132 		bio_iter_iovec((bio), (iter)).bv_offset)
133 
134 #define __bio_kunmap_atomic(addr)	kunmap_atomic(addr)
135 
136 /*
137  * merge helpers etc
138  */
139 
140 /* Default implementation of BIOVEC_PHYS_MERGEABLE */
141 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
142 	((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
143 
144 /*
145  * allow arch override, for eg virtualized architectures (put in asm/io.h)
146  */
147 #ifndef BIOVEC_PHYS_MERGEABLE
148 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
149 	__BIOVEC_PHYS_MERGEABLE(vec1, vec2)
150 #endif
151 
152 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
153 	(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
154 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
155 	__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
156 
157 /*
158  * drivers should _never_ use the all version - the bio may have been split
159  * before it got to the driver and the driver won't own all of it
160  */
161 #define bio_for_each_segment_all(bvl, bio, i)				\
162 	for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
163 
164 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
165 				    unsigned bytes)
166 {
167 	iter->bi_sector += bytes >> 9;
168 
169 	if (bio_no_advance_iter(bio))
170 		iter->bi_size -= bytes;
171 	else
172 		bvec_iter_advance(bio->bi_io_vec, iter, bytes);
173 }
174 
175 #define __bio_for_each_segment(bvl, bio, iter, start)			\
176 	for (iter = (start);						\
177 	     (iter).bi_size &&						\
178 		((bvl = bio_iter_iovec((bio), (iter))), 1);		\
179 	     bio_advance_iter((bio), &(iter), (bvl).bv_len))
180 
181 #define bio_for_each_segment(bvl, bio, iter)				\
182 	__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
183 
184 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
185 
186 static inline unsigned __bio_segments(struct bio *bio, struct bvec_iter *bvec)
187 {
188 	unsigned segs = 0;
189 	struct bio_vec bv;
190 	struct bvec_iter iter;
191 
192 	/*
193 	 * We special case discard/write same/write zeroes, because they
194 	 * interpret bi_size differently:
195 	 */
196 
197 	switch (bio_op(bio)) {
198 	case REQ_OP_DISCARD:
199 	case REQ_OP_SECURE_ERASE:
200 	case REQ_OP_WRITE_ZEROES:
201 		return 0;
202 	case REQ_OP_WRITE_SAME:
203 		return 1;
204 	default:
205 		break;
206 	}
207 
208 	__bio_for_each_segment(bv, bio, iter, *bvec)
209 		segs++;
210 
211 	return segs;
212 }
213 
214 static inline unsigned bio_segments(struct bio *bio)
215 {
216 	return __bio_segments(bio, &bio->bi_iter);
217 }
218 
219 /*
220  * get a reference to a bio, so it won't disappear. the intended use is
221  * something like:
222  *
223  * bio_get(bio);
224  * submit_bio(rw, bio);
225  * if (bio->bi_flags ...)
226  *	do_something
227  * bio_put(bio);
228  *
229  * without the bio_get(), it could potentially complete I/O before submit_bio
230  * returns. and then bio would be freed memory when if (bio->bi_flags ...)
231  * runs
232  */
233 static inline void bio_get(struct bio *bio)
234 {
235 	bio->bi_flags |= (1 << BIO_REFFED);
236 	smp_mb__before_atomic();
237 	atomic_inc(&bio->__bi_cnt);
238 }
239 
240 static inline void bio_cnt_set(struct bio *bio, unsigned int count)
241 {
242 	if (count != 1) {
243 		bio->bi_flags |= (1 << BIO_REFFED);
244 		smp_mb__before_atomic();
245 	}
246 	atomic_set(&bio->__bi_cnt, count);
247 }
248 
249 static inline bool bio_flagged(struct bio *bio, unsigned int bit)
250 {
251 	return (bio->bi_flags & (1U << bit)) != 0;
252 }
253 
254 static inline void bio_set_flag(struct bio *bio, unsigned int bit)
255 {
256 	bio->bi_flags |= (1U << bit);
257 }
258 
259 static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
260 {
261 	bio->bi_flags &= ~(1U << bit);
262 }
263 
264 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
265 {
266 	*bv = bio_iovec(bio);
267 }
268 
269 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
270 {
271 	struct bvec_iter iter = bio->bi_iter;
272 	int idx;
273 
274 	if (unlikely(!bio_multiple_segments(bio))) {
275 		*bv = bio_iovec(bio);
276 		return;
277 	}
278 
279 	bio_advance_iter(bio, &iter, iter.bi_size);
280 
281 	if (!iter.bi_bvec_done)
282 		idx = iter.bi_idx - 1;
283 	else	/* in the middle of bvec */
284 		idx = iter.bi_idx;
285 
286 	*bv = bio->bi_io_vec[idx];
287 
288 	/*
289 	 * iter.bi_bvec_done records actual length of the last bvec
290 	 * if this bio ends in the middle of one io vector
291 	 */
292 	if (iter.bi_bvec_done)
293 		bv->bv_len = iter.bi_bvec_done;
294 }
295 
296 enum bip_flags {
297 	BIP_BLOCK_INTEGRITY	= 1 << 0, /* block layer owns integrity data */
298 	BIP_MAPPED_INTEGRITY	= 1 << 1, /* ref tag has been remapped */
299 	BIP_CTRL_NOCHECK	= 1 << 2, /* disable HBA integrity checking */
300 	BIP_DISK_NOCHECK	= 1 << 3, /* disable disk integrity checking */
301 	BIP_IP_CHECKSUM		= 1 << 4, /* IP checksum */
302 };
303 
304 /*
305  * bio integrity payload
306  */
307 struct bio_integrity_payload {
308 	struct bio		*bip_bio;	/* parent bio */
309 
310 	struct bvec_iter	bip_iter;
311 
312 	bio_end_io_t		*bip_end_io;	/* saved I/O completion fn */
313 
314 	unsigned short		bip_slab;	/* slab the bip came from */
315 	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
316 	unsigned short		bip_max_vcnt;	/* integrity bio_vec slots */
317 	unsigned short		bip_flags;	/* control flags */
318 
319 	struct work_struct	bip_work;	/* I/O completion */
320 
321 	struct bio_vec		*bip_vec;
322 	struct bio_vec		bip_inline_vecs[0];/* embedded bvec array */
323 };
324 
325 #if defined(CONFIG_BLK_DEV_INTEGRITY)
326 
327 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
328 {
329 	if (bio->bi_opf & REQ_INTEGRITY)
330 		return bio->bi_integrity;
331 
332 	return NULL;
333 }
334 
335 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
336 {
337 	struct bio_integrity_payload *bip = bio_integrity(bio);
338 
339 	if (bip)
340 		return bip->bip_flags & flag;
341 
342 	return false;
343 }
344 
345 static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
346 {
347 	return bip->bip_iter.bi_sector;
348 }
349 
350 static inline void bip_set_seed(struct bio_integrity_payload *bip,
351 				sector_t seed)
352 {
353 	bip->bip_iter.bi_sector = seed;
354 }
355 
356 #endif /* CONFIG_BLK_DEV_INTEGRITY */
357 
358 extern void bio_trim(struct bio *bio, int offset, int size);
359 extern struct bio *bio_split(struct bio *bio, int sectors,
360 			     gfp_t gfp, struct bio_set *bs);
361 
362 /**
363  * bio_next_split - get next @sectors from a bio, splitting if necessary
364  * @bio:	bio to split
365  * @sectors:	number of sectors to split from the front of @bio
366  * @gfp:	gfp mask
367  * @bs:		bio set to allocate from
368  *
369  * Returns a bio representing the next @sectors of @bio - if the bio is smaller
370  * than @sectors, returns the original bio unchanged.
371  */
372 static inline struct bio *bio_next_split(struct bio *bio, int sectors,
373 					 gfp_t gfp, struct bio_set *bs)
374 {
375 	if (sectors >= bio_sectors(bio))
376 		return bio;
377 
378 	return bio_split(bio, sectors, gfp, bs);
379 }
380 
381 extern struct bio_set *bioset_create(unsigned int, unsigned int);
382 extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
383 extern void bioset_free(struct bio_set *);
384 extern mempool_t *biovec_create_pool(int pool_entries);
385 
386 extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
387 extern void bio_put(struct bio *);
388 
389 extern void __bio_clone_fast(struct bio *, struct bio *);
390 extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
391 extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
392 extern struct bio *bio_clone_bioset_partial(struct bio *, gfp_t,
393 					    struct bio_set *, int, int);
394 
395 extern struct bio_set *fs_bio_set;
396 
397 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
398 {
399 	return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
400 }
401 
402 static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
403 {
404 	return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
405 }
406 
407 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
408 {
409 	return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
410 }
411 
412 static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
413 {
414 	return bio_clone_bioset(bio, gfp_mask, NULL);
415 
416 }
417 
418 extern blk_qc_t submit_bio(struct bio *);
419 
420 extern void bio_endio(struct bio *);
421 
422 static inline void bio_io_error(struct bio *bio)
423 {
424 	bio->bi_error = -EIO;
425 	bio_endio(bio);
426 }
427 
428 struct request_queue;
429 extern int bio_phys_segments(struct request_queue *, struct bio *);
430 
431 extern int submit_bio_wait(struct bio *bio);
432 extern void bio_advance(struct bio *, unsigned);
433 
434 extern void bio_init(struct bio *bio, struct bio_vec *table,
435 		     unsigned short max_vecs);
436 extern void bio_reset(struct bio *);
437 void bio_chain(struct bio *, struct bio *);
438 
439 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
440 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
441 			   unsigned int, unsigned int);
442 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
443 struct rq_map_data;
444 extern struct bio *bio_map_user_iov(struct request_queue *,
445 				    const struct iov_iter *, gfp_t);
446 extern void bio_unmap_user(struct bio *);
447 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
448 				gfp_t);
449 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
450 				 gfp_t, int);
451 extern void bio_set_pages_dirty(struct bio *bio);
452 extern void bio_check_pages_dirty(struct bio *bio);
453 
454 void generic_start_io_acct(int rw, unsigned long sectors,
455 			   struct hd_struct *part);
456 void generic_end_io_acct(int rw, struct hd_struct *part,
457 			 unsigned long start_time);
458 
459 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
460 # error	"You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
461 #endif
462 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
463 extern void bio_flush_dcache_pages(struct bio *bi);
464 #else
465 static inline void bio_flush_dcache_pages(struct bio *bi)
466 {
467 }
468 #endif
469 
470 extern void bio_copy_data(struct bio *dst, struct bio *src);
471 extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
472 extern void bio_free_pages(struct bio *bio);
473 
474 extern struct bio *bio_copy_user_iov(struct request_queue *,
475 				     struct rq_map_data *,
476 				     const struct iov_iter *,
477 				     gfp_t);
478 extern int bio_uncopy_user(struct bio *);
479 void zero_fill_bio(struct bio *bio);
480 extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
481 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
482 extern unsigned int bvec_nr_vecs(unsigned short idx);
483 
484 #ifdef CONFIG_BLK_CGROUP
485 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
486 int bio_associate_current(struct bio *bio);
487 void bio_disassociate_task(struct bio *bio);
488 void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
489 #else	/* CONFIG_BLK_CGROUP */
490 static inline int bio_associate_blkcg(struct bio *bio,
491 			struct cgroup_subsys_state *blkcg_css) { return 0; }
492 static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
493 static inline void bio_disassociate_task(struct bio *bio) { }
494 static inline void bio_clone_blkcg_association(struct bio *dst,
495 			struct bio *src) { }
496 #endif	/* CONFIG_BLK_CGROUP */
497 
498 #ifdef CONFIG_HIGHMEM
499 /*
500  * remember never ever reenable interrupts between a bvec_kmap_irq and
501  * bvec_kunmap_irq!
502  */
503 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
504 {
505 	unsigned long addr;
506 
507 	/*
508 	 * might not be a highmem page, but the preempt/irq count
509 	 * balancing is a lot nicer this way
510 	 */
511 	local_irq_save(*flags);
512 	addr = (unsigned long) kmap_atomic(bvec->bv_page);
513 
514 	BUG_ON(addr & ~PAGE_MASK);
515 
516 	return (char *) addr + bvec->bv_offset;
517 }
518 
519 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
520 {
521 	unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
522 
523 	kunmap_atomic((void *) ptr);
524 	local_irq_restore(*flags);
525 }
526 
527 #else
528 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
529 {
530 	return page_address(bvec->bv_page) + bvec->bv_offset;
531 }
532 
533 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
534 {
535 	*flags = 0;
536 }
537 #endif
538 
539 static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
540 				   unsigned long *flags)
541 {
542 	return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
543 }
544 #define __bio_kunmap_irq(buf, flags)	bvec_kunmap_irq(buf, flags)
545 
546 #define bio_kmap_irq(bio, flags) \
547 	__bio_kmap_irq((bio), (bio)->bi_iter, (flags))
548 #define bio_kunmap_irq(buf,flags)	__bio_kunmap_irq(buf, flags)
549 
550 /*
551  * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
552  *
553  * A bio_list anchors a singly-linked list of bios chained through the bi_next
554  * member of the bio.  The bio_list also caches the last list member to allow
555  * fast access to the tail.
556  */
557 struct bio_list {
558 	struct bio *head;
559 	struct bio *tail;
560 };
561 
562 static inline int bio_list_empty(const struct bio_list *bl)
563 {
564 	return bl->head == NULL;
565 }
566 
567 static inline void bio_list_init(struct bio_list *bl)
568 {
569 	bl->head = bl->tail = NULL;
570 }
571 
572 #define BIO_EMPTY_LIST	{ NULL, NULL }
573 
574 #define bio_list_for_each(bio, bl) \
575 	for (bio = (bl)->head; bio; bio = bio->bi_next)
576 
577 static inline unsigned bio_list_size(const struct bio_list *bl)
578 {
579 	unsigned sz = 0;
580 	struct bio *bio;
581 
582 	bio_list_for_each(bio, bl)
583 		sz++;
584 
585 	return sz;
586 }
587 
588 static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
589 {
590 	bio->bi_next = NULL;
591 
592 	if (bl->tail)
593 		bl->tail->bi_next = bio;
594 	else
595 		bl->head = bio;
596 
597 	bl->tail = bio;
598 }
599 
600 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
601 {
602 	bio->bi_next = bl->head;
603 
604 	bl->head = bio;
605 
606 	if (!bl->tail)
607 		bl->tail = bio;
608 }
609 
610 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
611 {
612 	if (!bl2->head)
613 		return;
614 
615 	if (bl->tail)
616 		bl->tail->bi_next = bl2->head;
617 	else
618 		bl->head = bl2->head;
619 
620 	bl->tail = bl2->tail;
621 }
622 
623 static inline void bio_list_merge_head(struct bio_list *bl,
624 				       struct bio_list *bl2)
625 {
626 	if (!bl2->head)
627 		return;
628 
629 	if (bl->head)
630 		bl2->tail->bi_next = bl->head;
631 	else
632 		bl->tail = bl2->tail;
633 
634 	bl->head = bl2->head;
635 }
636 
637 static inline struct bio *bio_list_peek(struct bio_list *bl)
638 {
639 	return bl->head;
640 }
641 
642 static inline struct bio *bio_list_pop(struct bio_list *bl)
643 {
644 	struct bio *bio = bl->head;
645 
646 	if (bio) {
647 		bl->head = bl->head->bi_next;
648 		if (!bl->head)
649 			bl->tail = NULL;
650 
651 		bio->bi_next = NULL;
652 	}
653 
654 	return bio;
655 }
656 
657 static inline struct bio *bio_list_get(struct bio_list *bl)
658 {
659 	struct bio *bio = bl->head;
660 
661 	bl->head = bl->tail = NULL;
662 
663 	return bio;
664 }
665 
666 /*
667  * Increment chain count for the bio. Make sure the CHAIN flag update
668  * is visible before the raised count.
669  */
670 static inline void bio_inc_remaining(struct bio *bio)
671 {
672 	bio_set_flag(bio, BIO_CHAIN);
673 	smp_mb__before_atomic();
674 	atomic_inc(&bio->__bi_remaining);
675 }
676 
677 /*
678  * bio_set is used to allow other portions of the IO system to
679  * allocate their own private memory pools for bio and iovec structures.
680  * These memory pools in turn all allocate from the bio_slab
681  * and the bvec_slabs[].
682  */
683 #define BIO_POOL_SIZE 2
684 
685 struct bio_set {
686 	struct kmem_cache *bio_slab;
687 	unsigned int front_pad;
688 
689 	mempool_t *bio_pool;
690 	mempool_t *bvec_pool;
691 #if defined(CONFIG_BLK_DEV_INTEGRITY)
692 	mempool_t *bio_integrity_pool;
693 	mempool_t *bvec_integrity_pool;
694 #endif
695 
696 	/*
697 	 * Deadlock avoidance for stacking block drivers: see comments in
698 	 * bio_alloc_bioset() for details
699 	 */
700 	spinlock_t		rescue_lock;
701 	struct bio_list		rescue_list;
702 	struct work_struct	rescue_work;
703 	struct workqueue_struct	*rescue_workqueue;
704 };
705 
706 struct biovec_slab {
707 	int nr_vecs;
708 	char *name;
709 	struct kmem_cache *slab;
710 };
711 
712 /*
713  * a small number of entries is fine, not going to be performance critical.
714  * basically we just need to survive
715  */
716 #define BIO_SPLIT_ENTRIES 2
717 
718 #if defined(CONFIG_BLK_DEV_INTEGRITY)
719 
720 #define bip_for_each_vec(bvl, bip, iter)				\
721 	for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
722 
723 #define bio_for_each_integrity_vec(_bvl, _bio, _iter)			\
724 	for_each_bio(_bio)						\
725 		bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
726 
727 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
728 extern void bio_integrity_free(struct bio *);
729 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
730 extern bool bio_integrity_enabled(struct bio *bio);
731 extern int bio_integrity_prep(struct bio *);
732 extern void bio_integrity_endio(struct bio *);
733 extern void bio_integrity_advance(struct bio *, unsigned int);
734 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
735 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
736 extern int bioset_integrity_create(struct bio_set *, int);
737 extern void bioset_integrity_free(struct bio_set *);
738 extern void bio_integrity_init(void);
739 
740 #else /* CONFIG_BLK_DEV_INTEGRITY */
741 
742 static inline void *bio_integrity(struct bio *bio)
743 {
744 	return NULL;
745 }
746 
747 static inline bool bio_integrity_enabled(struct bio *bio)
748 {
749 	return false;
750 }
751 
752 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
753 {
754 	return 0;
755 }
756 
757 static inline void bioset_integrity_free (struct bio_set *bs)
758 {
759 	return;
760 }
761 
762 static inline int bio_integrity_prep(struct bio *bio)
763 {
764 	return 0;
765 }
766 
767 static inline void bio_integrity_free(struct bio *bio)
768 {
769 	return;
770 }
771 
772 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
773 				      gfp_t gfp_mask)
774 {
775 	return 0;
776 }
777 
778 static inline void bio_integrity_advance(struct bio *bio,
779 					 unsigned int bytes_done)
780 {
781 	return;
782 }
783 
784 static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
785 				      unsigned int sectors)
786 {
787 	return;
788 }
789 
790 static inline void bio_integrity_init(void)
791 {
792 	return;
793 }
794 
795 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
796 {
797 	return false;
798 }
799 
800 static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
801 								unsigned int nr)
802 {
803 	return ERR_PTR(-EINVAL);
804 }
805 
806 static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
807 					unsigned int len, unsigned int offset)
808 {
809 	return 0;
810 }
811 
812 #endif /* CONFIG_BLK_DEV_INTEGRITY */
813 
814 #endif /* CONFIG_BLOCK */
815 #endif /* __LINUX_BIO_H */
816