xref: /openbmc/linux/block/bio.c (revision da521626)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4  */
5 #include <linux/mm.h>
6 #include <linux/swap.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/uio.h>
10 #include <linux/iocontext.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/mempool.h>
16 #include <linux/workqueue.h>
17 #include <linux/cgroup.h>
18 #include <linux/blk-cgroup.h>
19 #include <linux/highmem.h>
20 #include <linux/sched/sysctl.h>
21 #include <linux/blk-crypto.h>
22 #include <linux/xarray.h>
23 
24 #include <trace/events/block.h>
25 #include "blk.h"
26 #include "blk-rq-qos.h"
27 
28 static struct biovec_slab {
29 	int nr_vecs;
30 	char *name;
31 	struct kmem_cache *slab;
32 } bvec_slabs[] __read_mostly = {
33 	{ .nr_vecs = 16, .name = "biovec-16" },
34 	{ .nr_vecs = 64, .name = "biovec-64" },
35 	{ .nr_vecs = 128, .name = "biovec-128" },
36 	{ .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
37 };
38 
39 static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
40 {
41 	switch (nr_vecs) {
42 	/* smaller bios use inline vecs */
43 	case 5 ... 16:
44 		return &bvec_slabs[0];
45 	case 17 ... 64:
46 		return &bvec_slabs[1];
47 	case 65 ... 128:
48 		return &bvec_slabs[2];
49 	case 129 ... BIO_MAX_VECS:
50 		return &bvec_slabs[3];
51 	default:
52 		BUG();
53 		return NULL;
54 	}
55 }
56 
57 /*
58  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
59  * IO code that does not need private memory pools.
60  */
61 struct bio_set fs_bio_set;
62 EXPORT_SYMBOL(fs_bio_set);
63 
64 /*
65  * Our slab pool management
66  */
67 struct bio_slab {
68 	struct kmem_cache *slab;
69 	unsigned int slab_ref;
70 	unsigned int slab_size;
71 	char name[8];
72 };
73 static DEFINE_MUTEX(bio_slab_lock);
74 static DEFINE_XARRAY(bio_slabs);
75 
76 static struct bio_slab *create_bio_slab(unsigned int size)
77 {
78 	struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
79 
80 	if (!bslab)
81 		return NULL;
82 
83 	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
84 	bslab->slab = kmem_cache_create(bslab->name, size,
85 			ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL);
86 	if (!bslab->slab)
87 		goto fail_alloc_slab;
88 
89 	bslab->slab_ref = 1;
90 	bslab->slab_size = size;
91 
92 	if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
93 		return bslab;
94 
95 	kmem_cache_destroy(bslab->slab);
96 
97 fail_alloc_slab:
98 	kfree(bslab);
99 	return NULL;
100 }
101 
102 static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
103 {
104 	return bs->front_pad + sizeof(struct bio) + bs->back_pad;
105 }
106 
107 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
108 {
109 	unsigned int size = bs_bio_slab_size(bs);
110 	struct bio_slab *bslab;
111 
112 	mutex_lock(&bio_slab_lock);
113 	bslab = xa_load(&bio_slabs, size);
114 	if (bslab)
115 		bslab->slab_ref++;
116 	else
117 		bslab = create_bio_slab(size);
118 	mutex_unlock(&bio_slab_lock);
119 
120 	if (bslab)
121 		return bslab->slab;
122 	return NULL;
123 }
124 
125 static void bio_put_slab(struct bio_set *bs)
126 {
127 	struct bio_slab *bslab = NULL;
128 	unsigned int slab_size = bs_bio_slab_size(bs);
129 
130 	mutex_lock(&bio_slab_lock);
131 
132 	bslab = xa_load(&bio_slabs, slab_size);
133 	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
134 		goto out;
135 
136 	WARN_ON_ONCE(bslab->slab != bs->bio_slab);
137 
138 	WARN_ON(!bslab->slab_ref);
139 
140 	if (--bslab->slab_ref)
141 		goto out;
142 
143 	xa_erase(&bio_slabs, slab_size);
144 
145 	kmem_cache_destroy(bslab->slab);
146 	kfree(bslab);
147 
148 out:
149 	mutex_unlock(&bio_slab_lock);
150 }
151 
152 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
153 {
154 	BIO_BUG_ON(nr_vecs > BIO_MAX_VECS);
155 
156 	if (nr_vecs == BIO_MAX_VECS)
157 		mempool_free(bv, pool);
158 	else if (nr_vecs > BIO_INLINE_VECS)
159 		kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
160 }
161 
162 /*
163  * Make the first allocation restricted and don't dump info on allocation
164  * failures, since we'll fall back to the mempool in case of failure.
165  */
166 static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
167 {
168 	return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
169 		__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
170 }
171 
172 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
173 		gfp_t gfp_mask)
174 {
175 	struct biovec_slab *bvs = biovec_slab(*nr_vecs);
176 
177 	if (WARN_ON_ONCE(!bvs))
178 		return NULL;
179 
180 	/*
181 	 * Upgrade the nr_vecs request to take full advantage of the allocation.
182 	 * We also rely on this in the bvec_free path.
183 	 */
184 	*nr_vecs = bvs->nr_vecs;
185 
186 	/*
187 	 * Try a slab allocation first for all smaller allocations.  If that
188 	 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
189 	 * The mempool is sized to handle up to BIO_MAX_VECS entries.
190 	 */
191 	if (*nr_vecs < BIO_MAX_VECS) {
192 		struct bio_vec *bvl;
193 
194 		bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
195 		if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
196 			return bvl;
197 		*nr_vecs = BIO_MAX_VECS;
198 	}
199 
200 	return mempool_alloc(pool, gfp_mask);
201 }
202 
203 void bio_uninit(struct bio *bio)
204 {
205 #ifdef CONFIG_BLK_CGROUP
206 	if (bio->bi_blkg) {
207 		blkg_put(bio->bi_blkg);
208 		bio->bi_blkg = NULL;
209 	}
210 #endif
211 	if (bio_integrity(bio))
212 		bio_integrity_free(bio);
213 
214 	bio_crypt_free_ctx(bio);
215 }
216 EXPORT_SYMBOL(bio_uninit);
217 
218 static void bio_free(struct bio *bio)
219 {
220 	struct bio_set *bs = bio->bi_pool;
221 	void *p;
222 
223 	bio_uninit(bio);
224 
225 	if (bs) {
226 		bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
227 
228 		/*
229 		 * If we have front padding, adjust the bio pointer before freeing
230 		 */
231 		p = bio;
232 		p -= bs->front_pad;
233 
234 		mempool_free(p, &bs->bio_pool);
235 	} else {
236 		/* Bio was allocated by bio_kmalloc() */
237 		kfree(bio);
238 	}
239 }
240 
241 /*
242  * Users of this function have their own bio allocation. Subsequently,
243  * they must remember to pair any call to bio_init() with bio_uninit()
244  * when IO has completed, or when the bio is released.
245  */
246 void bio_init(struct bio *bio, struct bio_vec *table,
247 	      unsigned short max_vecs)
248 {
249 	bio->bi_next = NULL;
250 	bio->bi_bdev = NULL;
251 	bio->bi_opf = 0;
252 	bio->bi_flags = 0;
253 	bio->bi_ioprio = 0;
254 	bio->bi_write_hint = 0;
255 	bio->bi_status = 0;
256 	bio->bi_iter.bi_sector = 0;
257 	bio->bi_iter.bi_size = 0;
258 	bio->bi_iter.bi_idx = 0;
259 	bio->bi_iter.bi_bvec_done = 0;
260 	bio->bi_end_io = NULL;
261 	bio->bi_private = NULL;
262 #ifdef CONFIG_BLK_CGROUP
263 	bio->bi_blkg = NULL;
264 	bio->bi_issue.value = 0;
265 #ifdef CONFIG_BLK_CGROUP_IOCOST
266 	bio->bi_iocost_cost = 0;
267 #endif
268 #endif
269 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
270 	bio->bi_crypt_context = NULL;
271 #endif
272 #ifdef CONFIG_BLK_DEV_INTEGRITY
273 	bio->bi_integrity = NULL;
274 #endif
275 	bio->bi_vcnt = 0;
276 
277 	atomic_set(&bio->__bi_remaining, 1);
278 	atomic_set(&bio->__bi_cnt, 1);
279 
280 	bio->bi_max_vecs = max_vecs;
281 	bio->bi_io_vec = table;
282 	bio->bi_pool = NULL;
283 }
284 EXPORT_SYMBOL(bio_init);
285 
286 /**
287  * bio_reset - reinitialize a bio
288  * @bio:	bio to reset
289  *
290  * Description:
291  *   After calling bio_reset(), @bio will be in the same state as a freshly
292  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
293  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
294  *   comment in struct bio.
295  */
296 void bio_reset(struct bio *bio)
297 {
298 	bio_uninit(bio);
299 	memset(bio, 0, BIO_RESET_BYTES);
300 	atomic_set(&bio->__bi_remaining, 1);
301 }
302 EXPORT_SYMBOL(bio_reset);
303 
304 static struct bio *__bio_chain_endio(struct bio *bio)
305 {
306 	struct bio *parent = bio->bi_private;
307 
308 	if (bio->bi_status && !parent->bi_status)
309 		parent->bi_status = bio->bi_status;
310 	bio_put(bio);
311 	return parent;
312 }
313 
314 static void bio_chain_endio(struct bio *bio)
315 {
316 	bio_endio(__bio_chain_endio(bio));
317 }
318 
319 /**
320  * bio_chain - chain bio completions
321  * @bio: the target bio
322  * @parent: the parent bio of @bio
323  *
324  * The caller won't have a bi_end_io called when @bio completes - instead,
325  * @parent's bi_end_io won't be called until both @parent and @bio have
326  * completed; the chained bio will also be freed when it completes.
327  *
328  * The caller must not set bi_private or bi_end_io in @bio.
329  */
330 void bio_chain(struct bio *bio, struct bio *parent)
331 {
332 	BUG_ON(bio->bi_private || bio->bi_end_io);
333 
334 	bio->bi_private = parent;
335 	bio->bi_end_io	= bio_chain_endio;
336 	bio_inc_remaining(parent);
337 }
338 EXPORT_SYMBOL(bio_chain);
339 
340 static void bio_alloc_rescue(struct work_struct *work)
341 {
342 	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
343 	struct bio *bio;
344 
345 	while (1) {
346 		spin_lock(&bs->rescue_lock);
347 		bio = bio_list_pop(&bs->rescue_list);
348 		spin_unlock(&bs->rescue_lock);
349 
350 		if (!bio)
351 			break;
352 
353 		submit_bio_noacct(bio);
354 	}
355 }
356 
357 static void punt_bios_to_rescuer(struct bio_set *bs)
358 {
359 	struct bio_list punt, nopunt;
360 	struct bio *bio;
361 
362 	if (WARN_ON_ONCE(!bs->rescue_workqueue))
363 		return;
364 	/*
365 	 * In order to guarantee forward progress we must punt only bios that
366 	 * were allocated from this bio_set; otherwise, if there was a bio on
367 	 * there for a stacking driver higher up in the stack, processing it
368 	 * could require allocating bios from this bio_set, and doing that from
369 	 * our own rescuer would be bad.
370 	 *
371 	 * Since bio lists are singly linked, pop them all instead of trying to
372 	 * remove from the middle of the list:
373 	 */
374 
375 	bio_list_init(&punt);
376 	bio_list_init(&nopunt);
377 
378 	while ((bio = bio_list_pop(&current->bio_list[0])))
379 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
380 	current->bio_list[0] = nopunt;
381 
382 	bio_list_init(&nopunt);
383 	while ((bio = bio_list_pop(&current->bio_list[1])))
384 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
385 	current->bio_list[1] = nopunt;
386 
387 	spin_lock(&bs->rescue_lock);
388 	bio_list_merge(&bs->rescue_list, &punt);
389 	spin_unlock(&bs->rescue_lock);
390 
391 	queue_work(bs->rescue_workqueue, &bs->rescue_work);
392 }
393 
394 /**
395  * bio_alloc_bioset - allocate a bio for I/O
396  * @gfp_mask:   the GFP_* mask given to the slab allocator
397  * @nr_iovecs:	number of iovecs to pre-allocate
398  * @bs:		the bio_set to allocate from.
399  *
400  * Allocate a bio from the mempools in @bs.
401  *
402  * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
403  * allocate a bio.  This is due to the mempool guarantees.  To make this work,
404  * callers must never allocate more than 1 bio at a time from the general pool.
405  * Callers that need to allocate more than 1 bio must always submit the
406  * previously allocated bio for IO before attempting to allocate a new one.
407  * Failure to do so can cause deadlocks under memory pressure.
408  *
409  * Note that when running under submit_bio_noacct() (i.e. any block driver),
410  * bios are not submitted until after you return - see the code in
411  * submit_bio_noacct() that converts recursion into iteration, to prevent
412  * stack overflows.
413  *
414  * This would normally mean allocating multiple bios under submit_bio_noacct()
415  * would be susceptible to deadlocks, but we have
416  * deadlock avoidance code that resubmits any blocked bios from a rescuer
417  * thread.
418  *
419  * However, we do not guarantee forward progress for allocations from other
420  * mempools. Doing multiple allocations from the same mempool under
421  * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
422  * for per bio allocations.
423  *
424  * Returns: Pointer to new bio on success, NULL on failure.
425  */
426 struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs,
427 			     struct bio_set *bs)
428 {
429 	gfp_t saved_gfp = gfp_mask;
430 	struct bio *bio;
431 	void *p;
432 
433 	/* should not use nobvec bioset for nr_iovecs > 0 */
434 	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0))
435 		return NULL;
436 
437 	/*
438 	 * submit_bio_noacct() converts recursion to iteration; this means if
439 	 * we're running beneath it, any bios we allocate and submit will not be
440 	 * submitted (and thus freed) until after we return.
441 	 *
442 	 * This exposes us to a potential deadlock if we allocate multiple bios
443 	 * from the same bio_set() while running underneath submit_bio_noacct().
444 	 * If we were to allocate multiple bios (say a stacking block driver
445 	 * that was splitting bios), we would deadlock if we exhausted the
446 	 * mempool's reserve.
447 	 *
448 	 * We solve this, and guarantee forward progress, with a rescuer
449 	 * workqueue per bio_set. If we go to allocate and there are bios on
450 	 * current->bio_list, we first try the allocation without
451 	 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
452 	 * blocking to the rescuer workqueue before we retry with the original
453 	 * gfp_flags.
454 	 */
455 	if (current->bio_list &&
456 	    (!bio_list_empty(&current->bio_list[0]) ||
457 	     !bio_list_empty(&current->bio_list[1])) &&
458 	    bs->rescue_workqueue)
459 		gfp_mask &= ~__GFP_DIRECT_RECLAIM;
460 
461 	p = mempool_alloc(&bs->bio_pool, gfp_mask);
462 	if (!p && gfp_mask != saved_gfp) {
463 		punt_bios_to_rescuer(bs);
464 		gfp_mask = saved_gfp;
465 		p = mempool_alloc(&bs->bio_pool, gfp_mask);
466 	}
467 	if (unlikely(!p))
468 		return NULL;
469 
470 	bio = p + bs->front_pad;
471 	if (nr_iovecs > BIO_INLINE_VECS) {
472 		struct bio_vec *bvl = NULL;
473 
474 		bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
475 		if (!bvl && gfp_mask != saved_gfp) {
476 			punt_bios_to_rescuer(bs);
477 			gfp_mask = saved_gfp;
478 			bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
479 		}
480 		if (unlikely(!bvl))
481 			goto err_free;
482 
483 		bio_init(bio, bvl, nr_iovecs);
484 	} else if (nr_iovecs) {
485 		bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
486 	} else {
487 		bio_init(bio, NULL, 0);
488 	}
489 
490 	bio->bi_pool = bs;
491 	return bio;
492 
493 err_free:
494 	mempool_free(p, &bs->bio_pool);
495 	return NULL;
496 }
497 EXPORT_SYMBOL(bio_alloc_bioset);
498 
499 /**
500  * bio_kmalloc - kmalloc a bio for I/O
501  * @gfp_mask:   the GFP_* mask given to the slab allocator
502  * @nr_iovecs:	number of iovecs to pre-allocate
503  *
504  * Use kmalloc to allocate and initialize a bio.
505  *
506  * Returns: Pointer to new bio on success, NULL on failure.
507  */
508 struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
509 {
510 	struct bio *bio;
511 
512 	if (nr_iovecs > UIO_MAXIOV)
513 		return NULL;
514 
515 	bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
516 	if (unlikely(!bio))
517 		return NULL;
518 	bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
519 	bio->bi_pool = NULL;
520 	return bio;
521 }
522 EXPORT_SYMBOL(bio_kmalloc);
523 
524 void zero_fill_bio(struct bio *bio)
525 {
526 	unsigned long flags;
527 	struct bio_vec bv;
528 	struct bvec_iter iter;
529 
530 	bio_for_each_segment(bv, bio, iter) {
531 		char *data = bvec_kmap_irq(&bv, &flags);
532 		memset(data, 0, bv.bv_len);
533 		flush_dcache_page(bv.bv_page);
534 		bvec_kunmap_irq(data, &flags);
535 	}
536 }
537 EXPORT_SYMBOL(zero_fill_bio);
538 
539 /**
540  * bio_truncate - truncate the bio to small size of @new_size
541  * @bio:	the bio to be truncated
542  * @new_size:	new size for truncating the bio
543  *
544  * Description:
545  *   Truncate the bio to new size of @new_size. If bio_op(bio) is
546  *   REQ_OP_READ, zero the truncated part. This function should only
547  *   be used for handling corner cases, such as bio eod.
548  */
549 void bio_truncate(struct bio *bio, unsigned new_size)
550 {
551 	struct bio_vec bv;
552 	struct bvec_iter iter;
553 	unsigned int done = 0;
554 	bool truncated = false;
555 
556 	if (new_size >= bio->bi_iter.bi_size)
557 		return;
558 
559 	if (bio_op(bio) != REQ_OP_READ)
560 		goto exit;
561 
562 	bio_for_each_segment(bv, bio, iter) {
563 		if (done + bv.bv_len > new_size) {
564 			unsigned offset;
565 
566 			if (!truncated)
567 				offset = new_size - done;
568 			else
569 				offset = 0;
570 			zero_user(bv.bv_page, offset, bv.bv_len - offset);
571 			truncated = true;
572 		}
573 		done += bv.bv_len;
574 	}
575 
576  exit:
577 	/*
578 	 * Don't touch bvec table here and make it really immutable, since
579 	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
580 	 * in its .end_bio() callback.
581 	 *
582 	 * It is enough to truncate bio by updating .bi_size since we can make
583 	 * correct bvec with the updated .bi_size for drivers.
584 	 */
585 	bio->bi_iter.bi_size = new_size;
586 }
587 
588 /**
589  * guard_bio_eod - truncate a BIO to fit the block device
590  * @bio:	bio to truncate
591  *
592  * This allows us to do IO even on the odd last sectors of a device, even if the
593  * block size is some multiple of the physical sector size.
594  *
595  * We'll just truncate the bio to the size of the device, and clear the end of
596  * the buffer head manually.  Truly out-of-range accesses will turn into actual
597  * I/O errors, this only handles the "we need to be able to do I/O at the final
598  * sector" case.
599  */
600 void guard_bio_eod(struct bio *bio)
601 {
602 	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
603 
604 	if (!maxsector)
605 		return;
606 
607 	/*
608 	 * If the *whole* IO is past the end of the device,
609 	 * let it through, and the IO layer will turn it into
610 	 * an EIO.
611 	 */
612 	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
613 		return;
614 
615 	maxsector -= bio->bi_iter.bi_sector;
616 	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
617 		return;
618 
619 	bio_truncate(bio, maxsector << 9);
620 }
621 
622 /**
623  * bio_put - release a reference to a bio
624  * @bio:   bio to release reference to
625  *
626  * Description:
627  *   Put a reference to a &struct bio, either one you have gotten with
628  *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
629  **/
630 void bio_put(struct bio *bio)
631 {
632 	if (!bio_flagged(bio, BIO_REFFED))
633 		bio_free(bio);
634 	else {
635 		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
636 
637 		/*
638 		 * last put frees it
639 		 */
640 		if (atomic_dec_and_test(&bio->__bi_cnt))
641 			bio_free(bio);
642 	}
643 }
644 EXPORT_SYMBOL(bio_put);
645 
646 /**
647  * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
648  * 	@bio: destination bio
649  * 	@bio_src: bio to clone
650  *
651  *	Clone a &bio. Caller will own the returned bio, but not
652  *	the actual data it points to. Reference count of returned
653  * 	bio will be one.
654  *
655  * 	Caller must ensure that @bio_src is not freed before @bio.
656  */
657 void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
658 {
659 	WARN_ON_ONCE(bio->bi_pool && bio->bi_max_vecs);
660 
661 	/*
662 	 * most users will be overriding ->bi_bdev with a new target,
663 	 * so we don't set nor calculate new physical/hw segment counts here
664 	 */
665 	bio->bi_bdev = bio_src->bi_bdev;
666 	bio_set_flag(bio, BIO_CLONED);
667 	if (bio_flagged(bio_src, BIO_THROTTLED))
668 		bio_set_flag(bio, BIO_THROTTLED);
669 	if (bio_flagged(bio_src, BIO_REMAPPED))
670 		bio_set_flag(bio, BIO_REMAPPED);
671 	bio->bi_opf = bio_src->bi_opf;
672 	bio->bi_ioprio = bio_src->bi_ioprio;
673 	bio->bi_write_hint = bio_src->bi_write_hint;
674 	bio->bi_iter = bio_src->bi_iter;
675 	bio->bi_io_vec = bio_src->bi_io_vec;
676 
677 	bio_clone_blkg_association(bio, bio_src);
678 	blkcg_bio_issue_init(bio);
679 }
680 EXPORT_SYMBOL(__bio_clone_fast);
681 
682 /**
683  *	bio_clone_fast - clone a bio that shares the original bio's biovec
684  *	@bio: bio to clone
685  *	@gfp_mask: allocation priority
686  *	@bs: bio_set to allocate from
687  *
688  * 	Like __bio_clone_fast, only also allocates the returned bio
689  */
690 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
691 {
692 	struct bio *b;
693 
694 	b = bio_alloc_bioset(gfp_mask, 0, bs);
695 	if (!b)
696 		return NULL;
697 
698 	__bio_clone_fast(b, bio);
699 
700 	if (bio_crypt_clone(b, bio, gfp_mask) < 0)
701 		goto err_put;
702 
703 	if (bio_integrity(bio) &&
704 	    bio_integrity_clone(b, bio, gfp_mask) < 0)
705 		goto err_put;
706 
707 	return b;
708 
709 err_put:
710 	bio_put(b);
711 	return NULL;
712 }
713 EXPORT_SYMBOL(bio_clone_fast);
714 
715 const char *bio_devname(struct bio *bio, char *buf)
716 {
717 	return bdevname(bio->bi_bdev, buf);
718 }
719 EXPORT_SYMBOL(bio_devname);
720 
721 static inline bool page_is_mergeable(const struct bio_vec *bv,
722 		struct page *page, unsigned int len, unsigned int off,
723 		bool *same_page)
724 {
725 	size_t bv_end = bv->bv_offset + bv->bv_len;
726 	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
727 	phys_addr_t page_addr = page_to_phys(page);
728 
729 	if (vec_end_addr + 1 != page_addr + off)
730 		return false;
731 	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
732 		return false;
733 
734 	*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
735 	if (*same_page)
736 		return true;
737 	return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
738 }
739 
740 /*
741  * Try to merge a page into a segment, while obeying the hardware segment
742  * size limit.  This is not for normal read/write bios, but for passthrough
743  * or Zone Append operations that we can't split.
744  */
745 static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
746 				 struct page *page, unsigned len,
747 				 unsigned offset, bool *same_page)
748 {
749 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
750 	unsigned long mask = queue_segment_boundary(q);
751 	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
752 	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
753 
754 	if ((addr1 | mask) != (addr2 | mask))
755 		return false;
756 	if (bv->bv_len + len > queue_max_segment_size(q))
757 		return false;
758 	return __bio_try_merge_page(bio, page, len, offset, same_page);
759 }
760 
761 /**
762  * bio_add_hw_page - attempt to add a page to a bio with hw constraints
763  * @q: the target queue
764  * @bio: destination bio
765  * @page: page to add
766  * @len: vec entry length
767  * @offset: vec entry offset
768  * @max_sectors: maximum number of sectors that can be added
769  * @same_page: return if the segment has been merged inside the same page
770  *
771  * Add a page to a bio while respecting the hardware max_sectors, max_segment
772  * and gap limitations.
773  */
774 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
775 		struct page *page, unsigned int len, unsigned int offset,
776 		unsigned int max_sectors, bool *same_page)
777 {
778 	struct bio_vec *bvec;
779 
780 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
781 		return 0;
782 
783 	if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
784 		return 0;
785 
786 	if (bio->bi_vcnt > 0) {
787 		if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
788 			return len;
789 
790 		/*
791 		 * If the queue doesn't support SG gaps and adding this segment
792 		 * would create a gap, disallow it.
793 		 */
794 		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
795 		if (bvec_gap_to_prev(q, bvec, offset))
796 			return 0;
797 	}
798 
799 	if (bio_full(bio, len))
800 		return 0;
801 
802 	if (bio->bi_vcnt >= queue_max_segments(q))
803 		return 0;
804 
805 	bvec = &bio->bi_io_vec[bio->bi_vcnt];
806 	bvec->bv_page = page;
807 	bvec->bv_len = len;
808 	bvec->bv_offset = offset;
809 	bio->bi_vcnt++;
810 	bio->bi_iter.bi_size += len;
811 	return len;
812 }
813 
814 /**
815  * bio_add_pc_page	- attempt to add page to passthrough bio
816  * @q: the target queue
817  * @bio: destination bio
818  * @page: page to add
819  * @len: vec entry length
820  * @offset: vec entry offset
821  *
822  * Attempt to add a page to the bio_vec maplist. This can fail for a
823  * number of reasons, such as the bio being full or target block device
824  * limitations. The target block device must allow bio's up to PAGE_SIZE,
825  * so it is always possible to add a single page to an empty bio.
826  *
827  * This should only be used by passthrough bios.
828  */
829 int bio_add_pc_page(struct request_queue *q, struct bio *bio,
830 		struct page *page, unsigned int len, unsigned int offset)
831 {
832 	bool same_page = false;
833 	return bio_add_hw_page(q, bio, page, len, offset,
834 			queue_max_hw_sectors(q), &same_page);
835 }
836 EXPORT_SYMBOL(bio_add_pc_page);
837 
838 /**
839  * bio_add_zone_append_page - attempt to add page to zone-append bio
840  * @bio: destination bio
841  * @page: page to add
842  * @len: vec entry length
843  * @offset: vec entry offset
844  *
845  * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
846  * for a zone-append request. This can fail for a number of reasons, such as the
847  * bio being full or the target block device is not a zoned block device or
848  * other limitations of the target block device. The target block device must
849  * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
850  * to an empty bio.
851  *
852  * Returns: number of bytes added to the bio, or 0 in case of a failure.
853  */
854 int bio_add_zone_append_page(struct bio *bio, struct page *page,
855 			     unsigned int len, unsigned int offset)
856 {
857 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
858 	bool same_page = false;
859 
860 	if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
861 		return 0;
862 
863 	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
864 		return 0;
865 
866 	return bio_add_hw_page(q, bio, page, len, offset,
867 			       queue_max_zone_append_sectors(q), &same_page);
868 }
869 EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
870 
871 /**
872  * __bio_try_merge_page - try appending data to an existing bvec.
873  * @bio: destination bio
874  * @page: start page to add
875  * @len: length of the data to add
876  * @off: offset of the data relative to @page
877  * @same_page: return if the segment has been merged inside the same page
878  *
879  * Try to add the data at @page + @off to the last bvec of @bio.  This is a
880  * useful optimisation for file systems with a block size smaller than the
881  * page size.
882  *
883  * Warn if (@len, @off) crosses pages in case that @same_page is true.
884  *
885  * Return %true on success or %false on failure.
886  */
887 bool __bio_try_merge_page(struct bio *bio, struct page *page,
888 		unsigned int len, unsigned int off, bool *same_page)
889 {
890 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
891 		return false;
892 
893 	if (bio->bi_vcnt > 0) {
894 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
895 
896 		if (page_is_mergeable(bv, page, len, off, same_page)) {
897 			if (bio->bi_iter.bi_size > UINT_MAX - len) {
898 				*same_page = false;
899 				return false;
900 			}
901 			bv->bv_len += len;
902 			bio->bi_iter.bi_size += len;
903 			return true;
904 		}
905 	}
906 	return false;
907 }
908 EXPORT_SYMBOL_GPL(__bio_try_merge_page);
909 
910 /**
911  * __bio_add_page - add page(s) to a bio in a new segment
912  * @bio: destination bio
913  * @page: start page to add
914  * @len: length of the data to add, may cross pages
915  * @off: offset of the data relative to @page, may cross pages
916  *
917  * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
918  * that @bio has space for another bvec.
919  */
920 void __bio_add_page(struct bio *bio, struct page *page,
921 		unsigned int len, unsigned int off)
922 {
923 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
924 
925 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
926 	WARN_ON_ONCE(bio_full(bio, len));
927 
928 	bv->bv_page = page;
929 	bv->bv_offset = off;
930 	bv->bv_len = len;
931 
932 	bio->bi_iter.bi_size += len;
933 	bio->bi_vcnt++;
934 
935 	if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
936 		bio_set_flag(bio, BIO_WORKINGSET);
937 }
938 EXPORT_SYMBOL_GPL(__bio_add_page);
939 
940 /**
941  *	bio_add_page	-	attempt to add page(s) to bio
942  *	@bio: destination bio
943  *	@page: start page to add
944  *	@len: vec entry length, may cross pages
945  *	@offset: vec entry offset relative to @page, may cross pages
946  *
947  *	Attempt to add page(s) to the bio_vec maplist. This will only fail
948  *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
949  */
950 int bio_add_page(struct bio *bio, struct page *page,
951 		 unsigned int len, unsigned int offset)
952 {
953 	bool same_page = false;
954 
955 	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
956 		if (bio_full(bio, len))
957 			return 0;
958 		__bio_add_page(bio, page, len, offset);
959 	}
960 	return len;
961 }
962 EXPORT_SYMBOL(bio_add_page);
963 
964 void bio_release_pages(struct bio *bio, bool mark_dirty)
965 {
966 	struct bvec_iter_all iter_all;
967 	struct bio_vec *bvec;
968 
969 	if (bio_flagged(bio, BIO_NO_PAGE_REF))
970 		return;
971 
972 	bio_for_each_segment_all(bvec, bio, iter_all) {
973 		if (mark_dirty && !PageCompound(bvec->bv_page))
974 			set_page_dirty_lock(bvec->bv_page);
975 		put_page(bvec->bv_page);
976 	}
977 }
978 EXPORT_SYMBOL_GPL(bio_release_pages);
979 
980 static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
981 {
982 	WARN_ON_ONCE(bio->bi_max_vecs);
983 
984 	bio->bi_vcnt = iter->nr_segs;
985 	bio->bi_io_vec = (struct bio_vec *)iter->bvec;
986 	bio->bi_iter.bi_bvec_done = iter->iov_offset;
987 	bio->bi_iter.bi_size = iter->count;
988 	bio_set_flag(bio, BIO_NO_PAGE_REF);
989 	bio_set_flag(bio, BIO_CLONED);
990 }
991 
992 static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
993 {
994 	__bio_iov_bvec_set(bio, iter);
995 	iov_iter_advance(iter, iter->count);
996 	return 0;
997 }
998 
999 static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
1000 {
1001 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1002 	struct iov_iter i = *iter;
1003 
1004 	iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9);
1005 	__bio_iov_bvec_set(bio, &i);
1006 	iov_iter_advance(iter, i.count);
1007 	return 0;
1008 }
1009 
1010 #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
1011 
1012 /**
1013  * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
1014  * @bio: bio to add pages to
1015  * @iter: iov iterator describing the region to be mapped
1016  *
1017  * Pins pages from *iter and appends them to @bio's bvec array. The
1018  * pages will have to be released using put_page() when done.
1019  * For multi-segment *iter, this function only adds pages from the
1020  * next non-empty segment of the iov iterator.
1021  */
1022 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1023 {
1024 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1025 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1026 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1027 	struct page **pages = (struct page **)bv;
1028 	bool same_page = false;
1029 	ssize_t size, left;
1030 	unsigned len, i;
1031 	size_t offset;
1032 
1033 	/*
1034 	 * Move page array up in the allocated memory for the bio vecs as far as
1035 	 * possible so that we can start filling biovecs from the beginning
1036 	 * without overwriting the temporary page array.
1037 	*/
1038 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1039 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1040 
1041 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
1042 	if (unlikely(size <= 0))
1043 		return size ? size : -EFAULT;
1044 
1045 	for (left = size, i = 0; left > 0; left -= len, i++) {
1046 		struct page *page = pages[i];
1047 
1048 		len = min_t(size_t, PAGE_SIZE - offset, left);
1049 
1050 		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
1051 			if (same_page)
1052 				put_page(page);
1053 		} else {
1054 			if (WARN_ON_ONCE(bio_full(bio, len)))
1055                                 return -EINVAL;
1056 			__bio_add_page(bio, page, len, offset);
1057 		}
1058 		offset = 0;
1059 	}
1060 
1061 	iov_iter_advance(iter, size);
1062 	return 0;
1063 }
1064 
1065 static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
1066 {
1067 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1068 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1069 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1070 	unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
1071 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1072 	struct page **pages = (struct page **)bv;
1073 	ssize_t size, left;
1074 	unsigned len, i;
1075 	size_t offset;
1076 	int ret = 0;
1077 
1078 	if (WARN_ON_ONCE(!max_append_sectors))
1079 		return 0;
1080 
1081 	/*
1082 	 * Move page array up in the allocated memory for the bio vecs as far as
1083 	 * possible so that we can start filling biovecs from the beginning
1084 	 * without overwriting the temporary page array.
1085 	 */
1086 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1087 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1088 
1089 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
1090 	if (unlikely(size <= 0))
1091 		return size ? size : -EFAULT;
1092 
1093 	for (left = size, i = 0; left > 0; left -= len, i++) {
1094 		struct page *page = pages[i];
1095 		bool same_page = false;
1096 
1097 		len = min_t(size_t, PAGE_SIZE - offset, left);
1098 		if (bio_add_hw_page(q, bio, page, len, offset,
1099 				max_append_sectors, &same_page) != len) {
1100 			ret = -EINVAL;
1101 			break;
1102 		}
1103 		if (same_page)
1104 			put_page(page);
1105 		offset = 0;
1106 	}
1107 
1108 	iov_iter_advance(iter, size - left);
1109 	return ret;
1110 }
1111 
1112 /**
1113  * bio_iov_iter_get_pages - add user or kernel pages to a bio
1114  * @bio: bio to add pages to
1115  * @iter: iov iterator describing the region to be added
1116  *
1117  * This takes either an iterator pointing to user memory, or one pointing to
1118  * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1119  * map them into the kernel. On IO completion, the caller should put those
1120  * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1121  * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1122  * to ensure the bvecs and pages stay referenced until the submitted I/O is
1123  * completed by a call to ->ki_complete() or returns with an error other than
1124  * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1125  * on IO completion. If it isn't, then pages should be released.
1126  *
1127  * The function tries, but does not guarantee, to pin as many pages as
1128  * fit into the bio, or are requested in @iter, whatever is smaller. If
1129  * MM encounters an error pinning the requested pages, it stops. Error
1130  * is returned only if 0 pages could be pinned.
1131  *
1132  * It's intended for direct IO, so doesn't do PSI tracking, the caller is
1133  * responsible for setting BIO_WORKINGSET if necessary.
1134  */
1135 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1136 {
1137 	int ret = 0;
1138 
1139 	if (iov_iter_is_bvec(iter)) {
1140 		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
1141 			return bio_iov_bvec_set_append(bio, iter);
1142 		return bio_iov_bvec_set(bio, iter);
1143 	}
1144 
1145 	do {
1146 		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
1147 			ret = __bio_iov_append_get_pages(bio, iter);
1148 		else
1149 			ret = __bio_iov_iter_get_pages(bio, iter);
1150 	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1151 
1152 	/* don't account direct I/O as memory stall */
1153 	bio_clear_flag(bio, BIO_WORKINGSET);
1154 	return bio->bi_vcnt ? 0 : ret;
1155 }
1156 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
1157 
1158 static void submit_bio_wait_endio(struct bio *bio)
1159 {
1160 	complete(bio->bi_private);
1161 }
1162 
1163 /**
1164  * submit_bio_wait - submit a bio, and wait until it completes
1165  * @bio: The &struct bio which describes the I/O
1166  *
1167  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1168  * bio_endio() on failure.
1169  *
1170  * WARNING: Unlike to how submit_bio() is usually used, this function does not
1171  * result in bio reference to be consumed. The caller must drop the reference
1172  * on his own.
1173  */
1174 int submit_bio_wait(struct bio *bio)
1175 {
1176 	DECLARE_COMPLETION_ONSTACK_MAP(done,
1177 			bio->bi_bdev->bd_disk->lockdep_map);
1178 	unsigned long hang_check;
1179 
1180 	bio->bi_private = &done;
1181 	bio->bi_end_io = submit_bio_wait_endio;
1182 	bio->bi_opf |= REQ_SYNC;
1183 	submit_bio(bio);
1184 
1185 	/* Prevent hang_check timer from firing at us during very long I/O */
1186 	hang_check = sysctl_hung_task_timeout_secs;
1187 	if (hang_check)
1188 		while (!wait_for_completion_io_timeout(&done,
1189 					hang_check * (HZ/2)))
1190 			;
1191 	else
1192 		wait_for_completion_io(&done);
1193 
1194 	return blk_status_to_errno(bio->bi_status);
1195 }
1196 EXPORT_SYMBOL(submit_bio_wait);
1197 
1198 /**
1199  * bio_advance - increment/complete a bio by some number of bytes
1200  * @bio:	bio to advance
1201  * @bytes:	number of bytes to complete
1202  *
1203  * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
1204  * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
1205  * be updated on the last bvec as well.
1206  *
1207  * @bio will then represent the remaining, uncompleted portion of the io.
1208  */
1209 void bio_advance(struct bio *bio, unsigned bytes)
1210 {
1211 	if (bio_integrity(bio))
1212 		bio_integrity_advance(bio, bytes);
1213 
1214 	bio_crypt_advance(bio, bytes);
1215 	bio_advance_iter(bio, &bio->bi_iter, bytes);
1216 }
1217 EXPORT_SYMBOL(bio_advance);
1218 
1219 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1220 			struct bio *src, struct bvec_iter *src_iter)
1221 {
1222 	struct bio_vec src_bv, dst_bv;
1223 	void *src_p, *dst_p;
1224 	unsigned bytes;
1225 
1226 	while (src_iter->bi_size && dst_iter->bi_size) {
1227 		src_bv = bio_iter_iovec(src, *src_iter);
1228 		dst_bv = bio_iter_iovec(dst, *dst_iter);
1229 
1230 		bytes = min(src_bv.bv_len, dst_bv.bv_len);
1231 
1232 		src_p = kmap_atomic(src_bv.bv_page);
1233 		dst_p = kmap_atomic(dst_bv.bv_page);
1234 
1235 		memcpy(dst_p + dst_bv.bv_offset,
1236 		       src_p + src_bv.bv_offset,
1237 		       bytes);
1238 
1239 		kunmap_atomic(dst_p);
1240 		kunmap_atomic(src_p);
1241 
1242 		flush_dcache_page(dst_bv.bv_page);
1243 
1244 		bio_advance_iter_single(src, src_iter, bytes);
1245 		bio_advance_iter_single(dst, dst_iter, bytes);
1246 	}
1247 }
1248 EXPORT_SYMBOL(bio_copy_data_iter);
1249 
1250 /**
1251  * bio_copy_data - copy contents of data buffers from one bio to another
1252  * @src: source bio
1253  * @dst: destination bio
1254  *
1255  * Stops when it reaches the end of either @src or @dst - that is, copies
1256  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1257  */
1258 void bio_copy_data(struct bio *dst, struct bio *src)
1259 {
1260 	struct bvec_iter src_iter = src->bi_iter;
1261 	struct bvec_iter dst_iter = dst->bi_iter;
1262 
1263 	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1264 }
1265 EXPORT_SYMBOL(bio_copy_data);
1266 
1267 void bio_free_pages(struct bio *bio)
1268 {
1269 	struct bio_vec *bvec;
1270 	struct bvec_iter_all iter_all;
1271 
1272 	bio_for_each_segment_all(bvec, bio, iter_all)
1273 		__free_page(bvec->bv_page);
1274 }
1275 EXPORT_SYMBOL(bio_free_pages);
1276 
1277 /*
1278  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1279  * for performing direct-IO in BIOs.
1280  *
1281  * The problem is that we cannot run set_page_dirty() from interrupt context
1282  * because the required locks are not interrupt-safe.  So what we can do is to
1283  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1284  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1285  * in process context.
1286  *
1287  * We special-case compound pages here: normally this means reads into hugetlb
1288  * pages.  The logic in here doesn't really work right for compound pages
1289  * because the VM does not uniformly chase down the head page in all cases.
1290  * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1291  * handle them at all.  So we skip compound pages here at an early stage.
1292  *
1293  * Note that this code is very hard to test under normal circumstances because
1294  * direct-io pins the pages with get_user_pages().  This makes
1295  * is_page_cache_freeable return false, and the VM will not clean the pages.
1296  * But other code (eg, flusher threads) could clean the pages if they are mapped
1297  * pagecache.
1298  *
1299  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1300  * deferred bio dirtying paths.
1301  */
1302 
1303 /*
1304  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1305  */
1306 void bio_set_pages_dirty(struct bio *bio)
1307 {
1308 	struct bio_vec *bvec;
1309 	struct bvec_iter_all iter_all;
1310 
1311 	bio_for_each_segment_all(bvec, bio, iter_all) {
1312 		if (!PageCompound(bvec->bv_page))
1313 			set_page_dirty_lock(bvec->bv_page);
1314 	}
1315 }
1316 
1317 /*
1318  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1319  * If they are, then fine.  If, however, some pages are clean then they must
1320  * have been written out during the direct-IO read.  So we take another ref on
1321  * the BIO and re-dirty the pages in process context.
1322  *
1323  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1324  * here on.  It will run one put_page() against each page and will run one
1325  * bio_put() against the BIO.
1326  */
1327 
1328 static void bio_dirty_fn(struct work_struct *work);
1329 
1330 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1331 static DEFINE_SPINLOCK(bio_dirty_lock);
1332 static struct bio *bio_dirty_list;
1333 
1334 /*
1335  * This runs in process context
1336  */
1337 static void bio_dirty_fn(struct work_struct *work)
1338 {
1339 	struct bio *bio, *next;
1340 
1341 	spin_lock_irq(&bio_dirty_lock);
1342 	next = bio_dirty_list;
1343 	bio_dirty_list = NULL;
1344 	spin_unlock_irq(&bio_dirty_lock);
1345 
1346 	while ((bio = next) != NULL) {
1347 		next = bio->bi_private;
1348 
1349 		bio_release_pages(bio, true);
1350 		bio_put(bio);
1351 	}
1352 }
1353 
1354 void bio_check_pages_dirty(struct bio *bio)
1355 {
1356 	struct bio_vec *bvec;
1357 	unsigned long flags;
1358 	struct bvec_iter_all iter_all;
1359 
1360 	bio_for_each_segment_all(bvec, bio, iter_all) {
1361 		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1362 			goto defer;
1363 	}
1364 
1365 	bio_release_pages(bio, false);
1366 	bio_put(bio);
1367 	return;
1368 defer:
1369 	spin_lock_irqsave(&bio_dirty_lock, flags);
1370 	bio->bi_private = bio_dirty_list;
1371 	bio_dirty_list = bio;
1372 	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1373 	schedule_work(&bio_dirty_work);
1374 }
1375 
1376 static inline bool bio_remaining_done(struct bio *bio)
1377 {
1378 	/*
1379 	 * If we're not chaining, then ->__bi_remaining is always 1 and
1380 	 * we always end io on the first invocation.
1381 	 */
1382 	if (!bio_flagged(bio, BIO_CHAIN))
1383 		return true;
1384 
1385 	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1386 
1387 	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1388 		bio_clear_flag(bio, BIO_CHAIN);
1389 		return true;
1390 	}
1391 
1392 	return false;
1393 }
1394 
1395 /**
1396  * bio_endio - end I/O on a bio
1397  * @bio:	bio
1398  *
1399  * Description:
1400  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1401  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1402  *   bio unless they own it and thus know that it has an end_io function.
1403  *
1404  *   bio_endio() can be called several times on a bio that has been chained
1405  *   using bio_chain().  The ->bi_end_io() function will only be called the
1406  *   last time.
1407  **/
1408 void bio_endio(struct bio *bio)
1409 {
1410 again:
1411 	if (!bio_remaining_done(bio))
1412 		return;
1413 	if (!bio_integrity_endio(bio))
1414 		return;
1415 
1416 	if (bio->bi_bdev)
1417 		rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
1418 
1419 	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1420 		trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
1421 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1422 	}
1423 
1424 	/*
1425 	 * Need to have a real endio function for chained bios, otherwise
1426 	 * various corner cases will break (like stacking block devices that
1427 	 * save/restore bi_end_io) - however, we want to avoid unbounded
1428 	 * recursion and blowing the stack. Tail call optimization would
1429 	 * handle this, but compiling with frame pointers also disables
1430 	 * gcc's sibling call optimization.
1431 	 */
1432 	if (bio->bi_end_io == bio_chain_endio) {
1433 		bio = __bio_chain_endio(bio);
1434 		goto again;
1435 	}
1436 
1437 	blk_throtl_bio_endio(bio);
1438 	/* release cgroup info */
1439 	bio_uninit(bio);
1440 	if (bio->bi_end_io)
1441 		bio->bi_end_io(bio);
1442 }
1443 EXPORT_SYMBOL(bio_endio);
1444 
1445 /**
1446  * bio_split - split a bio
1447  * @bio:	bio to split
1448  * @sectors:	number of sectors to split from the front of @bio
1449  * @gfp:	gfp mask
1450  * @bs:		bio set to allocate from
1451  *
1452  * Allocates and returns a new bio which represents @sectors from the start of
1453  * @bio, and updates @bio to represent the remaining sectors.
1454  *
1455  * Unless this is a discard request the newly allocated bio will point
1456  * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1457  * neither @bio nor @bs are freed before the split bio.
1458  */
1459 struct bio *bio_split(struct bio *bio, int sectors,
1460 		      gfp_t gfp, struct bio_set *bs)
1461 {
1462 	struct bio *split;
1463 
1464 	BUG_ON(sectors <= 0);
1465 	BUG_ON(sectors >= bio_sectors(bio));
1466 
1467 	/* Zone append commands cannot be split */
1468 	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1469 		return NULL;
1470 
1471 	split = bio_clone_fast(bio, gfp, bs);
1472 	if (!split)
1473 		return NULL;
1474 
1475 	split->bi_iter.bi_size = sectors << 9;
1476 
1477 	if (bio_integrity(split))
1478 		bio_integrity_trim(split);
1479 
1480 	bio_advance(bio, split->bi_iter.bi_size);
1481 
1482 	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1483 		bio_set_flag(split, BIO_TRACE_COMPLETION);
1484 
1485 	return split;
1486 }
1487 EXPORT_SYMBOL(bio_split);
1488 
1489 /**
1490  * bio_trim - trim a bio
1491  * @bio:	bio to trim
1492  * @offset:	number of sectors to trim from the front of @bio
1493  * @size:	size we want to trim @bio to, in sectors
1494  */
1495 void bio_trim(struct bio *bio, int offset, int size)
1496 {
1497 	/* 'bio' is a cloned bio which we need to trim to match
1498 	 * the given offset and size.
1499 	 */
1500 
1501 	size <<= 9;
1502 	if (offset == 0 && size == bio->bi_iter.bi_size)
1503 		return;
1504 
1505 	bio_advance(bio, offset << 9);
1506 	bio->bi_iter.bi_size = size;
1507 
1508 	if (bio_integrity(bio))
1509 		bio_integrity_trim(bio);
1510 
1511 }
1512 EXPORT_SYMBOL_GPL(bio_trim);
1513 
1514 /*
1515  * create memory pools for biovec's in a bio_set.
1516  * use the global biovec slabs created for general use.
1517  */
1518 int biovec_init_pool(mempool_t *pool, int pool_entries)
1519 {
1520 	struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1521 
1522 	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1523 }
1524 
1525 /*
1526  * bioset_exit - exit a bioset initialized with bioset_init()
1527  *
1528  * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1529  * kzalloc()).
1530  */
1531 void bioset_exit(struct bio_set *bs)
1532 {
1533 	if (bs->rescue_workqueue)
1534 		destroy_workqueue(bs->rescue_workqueue);
1535 	bs->rescue_workqueue = NULL;
1536 
1537 	mempool_exit(&bs->bio_pool);
1538 	mempool_exit(&bs->bvec_pool);
1539 
1540 	bioset_integrity_free(bs);
1541 	if (bs->bio_slab)
1542 		bio_put_slab(bs);
1543 	bs->bio_slab = NULL;
1544 }
1545 EXPORT_SYMBOL(bioset_exit);
1546 
1547 /**
1548  * bioset_init - Initialize a bio_set
1549  * @bs:		pool to initialize
1550  * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1551  * @front_pad:	Number of bytes to allocate in front of the returned bio
1552  * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1553  *              and %BIOSET_NEED_RESCUER
1554  *
1555  * Description:
1556  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1557  *    to ask for a number of bytes to be allocated in front of the bio.
1558  *    Front pad allocation is useful for embedding the bio inside
1559  *    another structure, to avoid allocating extra data to go with the bio.
1560  *    Note that the bio must be embedded at the END of that structure always,
1561  *    or things will break badly.
1562  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1563  *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
1564  *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1565  *    dispatch queued requests when the mempool runs out of space.
1566  *
1567  */
1568 int bioset_init(struct bio_set *bs,
1569 		unsigned int pool_size,
1570 		unsigned int front_pad,
1571 		int flags)
1572 {
1573 	bs->front_pad = front_pad;
1574 	if (flags & BIOSET_NEED_BVECS)
1575 		bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1576 	else
1577 		bs->back_pad = 0;
1578 
1579 	spin_lock_init(&bs->rescue_lock);
1580 	bio_list_init(&bs->rescue_list);
1581 	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1582 
1583 	bs->bio_slab = bio_find_or_create_slab(bs);
1584 	if (!bs->bio_slab)
1585 		return -ENOMEM;
1586 
1587 	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1588 		goto bad;
1589 
1590 	if ((flags & BIOSET_NEED_BVECS) &&
1591 	    biovec_init_pool(&bs->bvec_pool, pool_size))
1592 		goto bad;
1593 
1594 	if (!(flags & BIOSET_NEED_RESCUER))
1595 		return 0;
1596 
1597 	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1598 	if (!bs->rescue_workqueue)
1599 		goto bad;
1600 
1601 	return 0;
1602 bad:
1603 	bioset_exit(bs);
1604 	return -ENOMEM;
1605 }
1606 EXPORT_SYMBOL(bioset_init);
1607 
1608 /*
1609  * Initialize and setup a new bio_set, based on the settings from
1610  * another bio_set.
1611  */
1612 int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
1613 {
1614 	int flags;
1615 
1616 	flags = 0;
1617 	if (src->bvec_pool.min_nr)
1618 		flags |= BIOSET_NEED_BVECS;
1619 	if (src->rescue_workqueue)
1620 		flags |= BIOSET_NEED_RESCUER;
1621 
1622 	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
1623 }
1624 EXPORT_SYMBOL(bioset_init_from_src);
1625 
1626 static int __init init_bio(void)
1627 {
1628 	int i;
1629 
1630 	bio_integrity_init();
1631 
1632 	for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1633 		struct biovec_slab *bvs = bvec_slabs + i;
1634 
1635 		bvs->slab = kmem_cache_create(bvs->name,
1636 				bvs->nr_vecs * sizeof(struct bio_vec), 0,
1637 				SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1638 	}
1639 
1640 	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1641 		panic("bio: can't allocate bios\n");
1642 
1643 	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
1644 		panic("bio: can't create integrity pool\n");
1645 
1646 	return 0;
1647 }
1648 subsys_initcall(init_bio);
1649