xref: /openbmc/linux/block/bio.c (revision f8b679a0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4  */
5 #include <linux/mm.h>
6 #include <linux/swap.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/uio.h>
10 #include <linux/iocontext.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/mempool.h>
16 #include <linux/workqueue.h>
17 #include <linux/cgroup.h>
18 #include <linux/blk-cgroup.h>
19 #include <linux/highmem.h>
20 #include <linux/sched/sysctl.h>
21 #include <linux/blk-crypto.h>
22 #include <linux/xarray.h>
23 
24 #include <trace/events/block.h>
25 #include "blk.h"
26 #include "blk-rq-qos.h"
27 
28 static struct biovec_slab {
29 	int nr_vecs;
30 	char *name;
31 	struct kmem_cache *slab;
32 } bvec_slabs[] __read_mostly = {
33 	{ .nr_vecs = 16, .name = "biovec-16" },
34 	{ .nr_vecs = 64, .name = "biovec-64" },
35 	{ .nr_vecs = 128, .name = "biovec-128" },
36 	{ .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
37 };
38 
39 static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
40 {
41 	switch (nr_vecs) {
42 	/* smaller bios use inline vecs */
43 	case 5 ... 16:
44 		return &bvec_slabs[0];
45 	case 17 ... 64:
46 		return &bvec_slabs[1];
47 	case 65 ... 128:
48 		return &bvec_slabs[2];
49 	case 129 ... BIO_MAX_VECS:
50 		return &bvec_slabs[3];
51 	default:
52 		BUG();
53 		return NULL;
54 	}
55 }
56 
57 /*
58  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
59  * IO code that does not need private memory pools.
60  */
61 struct bio_set fs_bio_set;
62 EXPORT_SYMBOL(fs_bio_set);
63 
64 /*
65  * Our slab pool management
66  */
67 struct bio_slab {
68 	struct kmem_cache *slab;
69 	unsigned int slab_ref;
70 	unsigned int slab_size;
71 	char name[8];
72 };
73 static DEFINE_MUTEX(bio_slab_lock);
74 static DEFINE_XARRAY(bio_slabs);
75 
76 static struct bio_slab *create_bio_slab(unsigned int size)
77 {
78 	struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
79 
80 	if (!bslab)
81 		return NULL;
82 
83 	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
84 	bslab->slab = kmem_cache_create(bslab->name, size,
85 			ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL);
86 	if (!bslab->slab)
87 		goto fail_alloc_slab;
88 
89 	bslab->slab_ref = 1;
90 	bslab->slab_size = size;
91 
92 	if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
93 		return bslab;
94 
95 	kmem_cache_destroy(bslab->slab);
96 
97 fail_alloc_slab:
98 	kfree(bslab);
99 	return NULL;
100 }
101 
102 static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
103 {
104 	return bs->front_pad + sizeof(struct bio) + bs->back_pad;
105 }
106 
107 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
108 {
109 	unsigned int size = bs_bio_slab_size(bs);
110 	struct bio_slab *bslab;
111 
112 	mutex_lock(&bio_slab_lock);
113 	bslab = xa_load(&bio_slabs, size);
114 	if (bslab)
115 		bslab->slab_ref++;
116 	else
117 		bslab = create_bio_slab(size);
118 	mutex_unlock(&bio_slab_lock);
119 
120 	if (bslab)
121 		return bslab->slab;
122 	return NULL;
123 }
124 
125 static void bio_put_slab(struct bio_set *bs)
126 {
127 	struct bio_slab *bslab = NULL;
128 	unsigned int slab_size = bs_bio_slab_size(bs);
129 
130 	mutex_lock(&bio_slab_lock);
131 
132 	bslab = xa_load(&bio_slabs, slab_size);
133 	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
134 		goto out;
135 
136 	WARN_ON_ONCE(bslab->slab != bs->bio_slab);
137 
138 	WARN_ON(!bslab->slab_ref);
139 
140 	if (--bslab->slab_ref)
141 		goto out;
142 
143 	xa_erase(&bio_slabs, slab_size);
144 
145 	kmem_cache_destroy(bslab->slab);
146 	kfree(bslab);
147 
148 out:
149 	mutex_unlock(&bio_slab_lock);
150 }
151 
152 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
153 {
154 	BIO_BUG_ON(nr_vecs > BIO_MAX_VECS);
155 
156 	if (nr_vecs == BIO_MAX_VECS)
157 		mempool_free(bv, pool);
158 	else if (nr_vecs > BIO_INLINE_VECS)
159 		kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
160 }
161 
162 /*
163  * Make the first allocation restricted and don't dump info on allocation
164  * failures, since we'll fall back to the mempool in case of failure.
165  */
166 static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
167 {
168 	return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
169 		__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
170 }
171 
172 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
173 		gfp_t gfp_mask)
174 {
175 	struct biovec_slab *bvs = biovec_slab(*nr_vecs);
176 
177 	if (WARN_ON_ONCE(!bvs))
178 		return NULL;
179 
180 	/*
181 	 * Upgrade the nr_vecs request to take full advantage of the allocation.
182 	 * We also rely on this in the bvec_free path.
183 	 */
184 	*nr_vecs = bvs->nr_vecs;
185 
186 	/*
187 	 * Try a slab allocation first for all smaller allocations.  If that
188 	 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
189 	 * The mempool is sized to handle up to BIO_MAX_VECS entries.
190 	 */
191 	if (*nr_vecs < BIO_MAX_VECS) {
192 		struct bio_vec *bvl;
193 
194 		bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
195 		if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
196 			return bvl;
197 		*nr_vecs = BIO_MAX_VECS;
198 	}
199 
200 	return mempool_alloc(pool, gfp_mask);
201 }
202 
203 void bio_uninit(struct bio *bio)
204 {
205 #ifdef CONFIG_BLK_CGROUP
206 	if (bio->bi_blkg) {
207 		blkg_put(bio->bi_blkg);
208 		bio->bi_blkg = NULL;
209 	}
210 #endif
211 	if (bio_integrity(bio))
212 		bio_integrity_free(bio);
213 
214 	bio_crypt_free_ctx(bio);
215 }
216 EXPORT_SYMBOL(bio_uninit);
217 
218 static void bio_free(struct bio *bio)
219 {
220 	struct bio_set *bs = bio->bi_pool;
221 	void *p;
222 
223 	bio_uninit(bio);
224 
225 	if (bs) {
226 		bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
227 
228 		/*
229 		 * If we have front padding, adjust the bio pointer before freeing
230 		 */
231 		p = bio;
232 		p -= bs->front_pad;
233 
234 		mempool_free(p, &bs->bio_pool);
235 	} else {
236 		/* Bio was allocated by bio_kmalloc() */
237 		kfree(bio);
238 	}
239 }
240 
241 /*
242  * Users of this function have their own bio allocation. Subsequently,
243  * they must remember to pair any call to bio_init() with bio_uninit()
244  * when IO has completed, or when the bio is released.
245  */
246 void bio_init(struct bio *bio, struct bio_vec *table,
247 	      unsigned short max_vecs)
248 {
249 	memset(bio, 0, sizeof(*bio));
250 	atomic_set(&bio->__bi_remaining, 1);
251 	atomic_set(&bio->__bi_cnt, 1);
252 
253 	bio->bi_io_vec = table;
254 	bio->bi_max_vecs = max_vecs;
255 }
256 EXPORT_SYMBOL(bio_init);
257 
258 /**
259  * bio_reset - reinitialize a bio
260  * @bio:	bio to reset
261  *
262  * Description:
263  *   After calling bio_reset(), @bio will be in the same state as a freshly
264  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
265  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
266  *   comment in struct bio.
267  */
268 void bio_reset(struct bio *bio)
269 {
270 	bio_uninit(bio);
271 	memset(bio, 0, BIO_RESET_BYTES);
272 	atomic_set(&bio->__bi_remaining, 1);
273 }
274 EXPORT_SYMBOL(bio_reset);
275 
276 static struct bio *__bio_chain_endio(struct bio *bio)
277 {
278 	struct bio *parent = bio->bi_private;
279 
280 	if (bio->bi_status && !parent->bi_status)
281 		parent->bi_status = bio->bi_status;
282 	bio_put(bio);
283 	return parent;
284 }
285 
286 static void bio_chain_endio(struct bio *bio)
287 {
288 	bio_endio(__bio_chain_endio(bio));
289 }
290 
291 /**
292  * bio_chain - chain bio completions
293  * @bio: the target bio
294  * @parent: the parent bio of @bio
295  *
296  * The caller won't have a bi_end_io called when @bio completes - instead,
297  * @parent's bi_end_io won't be called until both @parent and @bio have
298  * completed; the chained bio will also be freed when it completes.
299  *
300  * The caller must not set bi_private or bi_end_io in @bio.
301  */
302 void bio_chain(struct bio *bio, struct bio *parent)
303 {
304 	BUG_ON(bio->bi_private || bio->bi_end_io);
305 
306 	bio->bi_private = parent;
307 	bio->bi_end_io	= bio_chain_endio;
308 	bio_inc_remaining(parent);
309 }
310 EXPORT_SYMBOL(bio_chain);
311 
312 static void bio_alloc_rescue(struct work_struct *work)
313 {
314 	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
315 	struct bio *bio;
316 
317 	while (1) {
318 		spin_lock(&bs->rescue_lock);
319 		bio = bio_list_pop(&bs->rescue_list);
320 		spin_unlock(&bs->rescue_lock);
321 
322 		if (!bio)
323 			break;
324 
325 		submit_bio_noacct(bio);
326 	}
327 }
328 
329 static void punt_bios_to_rescuer(struct bio_set *bs)
330 {
331 	struct bio_list punt, nopunt;
332 	struct bio *bio;
333 
334 	if (WARN_ON_ONCE(!bs->rescue_workqueue))
335 		return;
336 	/*
337 	 * In order to guarantee forward progress we must punt only bios that
338 	 * were allocated from this bio_set; otherwise, if there was a bio on
339 	 * there for a stacking driver higher up in the stack, processing it
340 	 * could require allocating bios from this bio_set, and doing that from
341 	 * our own rescuer would be bad.
342 	 *
343 	 * Since bio lists are singly linked, pop them all instead of trying to
344 	 * remove from the middle of the list:
345 	 */
346 
347 	bio_list_init(&punt);
348 	bio_list_init(&nopunt);
349 
350 	while ((bio = bio_list_pop(&current->bio_list[0])))
351 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
352 	current->bio_list[0] = nopunt;
353 
354 	bio_list_init(&nopunt);
355 	while ((bio = bio_list_pop(&current->bio_list[1])))
356 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
357 	current->bio_list[1] = nopunt;
358 
359 	spin_lock(&bs->rescue_lock);
360 	bio_list_merge(&bs->rescue_list, &punt);
361 	spin_unlock(&bs->rescue_lock);
362 
363 	queue_work(bs->rescue_workqueue, &bs->rescue_work);
364 }
365 
366 /**
367  * bio_alloc_bioset - allocate a bio for I/O
368  * @gfp_mask:   the GFP_* mask given to the slab allocator
369  * @nr_iovecs:	number of iovecs to pre-allocate
370  * @bs:		the bio_set to allocate from.
371  *
372  * Allocate a bio from the mempools in @bs.
373  *
374  * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
375  * allocate a bio.  This is due to the mempool guarantees.  To make this work,
376  * callers must never allocate more than 1 bio at a time from the general pool.
377  * Callers that need to allocate more than 1 bio must always submit the
378  * previously allocated bio for IO before attempting to allocate a new one.
379  * Failure to do so can cause deadlocks under memory pressure.
380  *
381  * Note that when running under submit_bio_noacct() (i.e. any block driver),
382  * bios are not submitted until after you return - see the code in
383  * submit_bio_noacct() that converts recursion into iteration, to prevent
384  * stack overflows.
385  *
386  * This would normally mean allocating multiple bios under submit_bio_noacct()
387  * would be susceptible to deadlocks, but we have
388  * deadlock avoidance code that resubmits any blocked bios from a rescuer
389  * thread.
390  *
391  * However, we do not guarantee forward progress for allocations from other
392  * mempools. Doing multiple allocations from the same mempool under
393  * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
394  * for per bio allocations.
395  *
396  * Returns: Pointer to new bio on success, NULL on failure.
397  */
398 struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs,
399 			     struct bio_set *bs)
400 {
401 	gfp_t saved_gfp = gfp_mask;
402 	struct bio *bio;
403 	void *p;
404 
405 	/* should not use nobvec bioset for nr_iovecs > 0 */
406 	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0))
407 		return NULL;
408 
409 	/*
410 	 * submit_bio_noacct() converts recursion to iteration; this means if
411 	 * we're running beneath it, any bios we allocate and submit will not be
412 	 * submitted (and thus freed) until after we return.
413 	 *
414 	 * This exposes us to a potential deadlock if we allocate multiple bios
415 	 * from the same bio_set() while running underneath submit_bio_noacct().
416 	 * If we were to allocate multiple bios (say a stacking block driver
417 	 * that was splitting bios), we would deadlock if we exhausted the
418 	 * mempool's reserve.
419 	 *
420 	 * We solve this, and guarantee forward progress, with a rescuer
421 	 * workqueue per bio_set. If we go to allocate and there are bios on
422 	 * current->bio_list, we first try the allocation without
423 	 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
424 	 * blocking to the rescuer workqueue before we retry with the original
425 	 * gfp_flags.
426 	 */
427 	if (current->bio_list &&
428 	    (!bio_list_empty(&current->bio_list[0]) ||
429 	     !bio_list_empty(&current->bio_list[1])) &&
430 	    bs->rescue_workqueue)
431 		gfp_mask &= ~__GFP_DIRECT_RECLAIM;
432 
433 	p = mempool_alloc(&bs->bio_pool, gfp_mask);
434 	if (!p && gfp_mask != saved_gfp) {
435 		punt_bios_to_rescuer(bs);
436 		gfp_mask = saved_gfp;
437 		p = mempool_alloc(&bs->bio_pool, gfp_mask);
438 	}
439 	if (unlikely(!p))
440 		return NULL;
441 
442 	bio = p + bs->front_pad;
443 	if (nr_iovecs > BIO_INLINE_VECS) {
444 		struct bio_vec *bvl = NULL;
445 
446 		bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
447 		if (!bvl && gfp_mask != saved_gfp) {
448 			punt_bios_to_rescuer(bs);
449 			gfp_mask = saved_gfp;
450 			bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
451 		}
452 		if (unlikely(!bvl))
453 			goto err_free;
454 
455 		bio_init(bio, bvl, nr_iovecs);
456 	} else if (nr_iovecs) {
457 		bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
458 	} else {
459 		bio_init(bio, NULL, 0);
460 	}
461 
462 	bio->bi_pool = bs;
463 	return bio;
464 
465 err_free:
466 	mempool_free(p, &bs->bio_pool);
467 	return NULL;
468 }
469 EXPORT_SYMBOL(bio_alloc_bioset);
470 
471 /**
472  * bio_kmalloc - kmalloc a bio for I/O
473  * @gfp_mask:   the GFP_* mask given to the slab allocator
474  * @nr_iovecs:	number of iovecs to pre-allocate
475  *
476  * Use kmalloc to allocate and initialize a bio.
477  *
478  * Returns: Pointer to new bio on success, NULL on failure.
479  */
480 struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
481 {
482 	struct bio *bio;
483 
484 	if (nr_iovecs > UIO_MAXIOV)
485 		return NULL;
486 
487 	bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
488 	if (unlikely(!bio))
489 		return NULL;
490 	bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
491 	bio->bi_pool = NULL;
492 	return bio;
493 }
494 EXPORT_SYMBOL(bio_kmalloc);
495 
496 void zero_fill_bio(struct bio *bio)
497 {
498 	struct bio_vec bv;
499 	struct bvec_iter iter;
500 
501 	bio_for_each_segment(bv, bio, iter)
502 		memzero_bvec(&bv);
503 }
504 EXPORT_SYMBOL(zero_fill_bio);
505 
506 /**
507  * bio_truncate - truncate the bio to small size of @new_size
508  * @bio:	the bio to be truncated
509  * @new_size:	new size for truncating the bio
510  *
511  * Description:
512  *   Truncate the bio to new size of @new_size. If bio_op(bio) is
513  *   REQ_OP_READ, zero the truncated part. This function should only
514  *   be used for handling corner cases, such as bio eod.
515  */
516 void bio_truncate(struct bio *bio, unsigned new_size)
517 {
518 	struct bio_vec bv;
519 	struct bvec_iter iter;
520 	unsigned int done = 0;
521 	bool truncated = false;
522 
523 	if (new_size >= bio->bi_iter.bi_size)
524 		return;
525 
526 	if (bio_op(bio) != REQ_OP_READ)
527 		goto exit;
528 
529 	bio_for_each_segment(bv, bio, iter) {
530 		if (done + bv.bv_len > new_size) {
531 			unsigned offset;
532 
533 			if (!truncated)
534 				offset = new_size - done;
535 			else
536 				offset = 0;
537 			zero_user(bv.bv_page, offset, bv.bv_len - offset);
538 			truncated = true;
539 		}
540 		done += bv.bv_len;
541 	}
542 
543  exit:
544 	/*
545 	 * Don't touch bvec table here and make it really immutable, since
546 	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
547 	 * in its .end_bio() callback.
548 	 *
549 	 * It is enough to truncate bio by updating .bi_size since we can make
550 	 * correct bvec with the updated .bi_size for drivers.
551 	 */
552 	bio->bi_iter.bi_size = new_size;
553 }
554 
555 /**
556  * guard_bio_eod - truncate a BIO to fit the block device
557  * @bio:	bio to truncate
558  *
559  * This allows us to do IO even on the odd last sectors of a device, even if the
560  * block size is some multiple of the physical sector size.
561  *
562  * We'll just truncate the bio to the size of the device, and clear the end of
563  * the buffer head manually.  Truly out-of-range accesses will turn into actual
564  * I/O errors, this only handles the "we need to be able to do I/O at the final
565  * sector" case.
566  */
567 void guard_bio_eod(struct bio *bio)
568 {
569 	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
570 
571 	if (!maxsector)
572 		return;
573 
574 	/*
575 	 * If the *whole* IO is past the end of the device,
576 	 * let it through, and the IO layer will turn it into
577 	 * an EIO.
578 	 */
579 	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
580 		return;
581 
582 	maxsector -= bio->bi_iter.bi_sector;
583 	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
584 		return;
585 
586 	bio_truncate(bio, maxsector << 9);
587 }
588 
589 /**
590  * bio_put - release a reference to a bio
591  * @bio:   bio to release reference to
592  *
593  * Description:
594  *   Put a reference to a &struct bio, either one you have gotten with
595  *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
596  **/
597 void bio_put(struct bio *bio)
598 {
599 	if (!bio_flagged(bio, BIO_REFFED))
600 		bio_free(bio);
601 	else {
602 		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
603 
604 		/*
605 		 * last put frees it
606 		 */
607 		if (atomic_dec_and_test(&bio->__bi_cnt))
608 			bio_free(bio);
609 	}
610 }
611 EXPORT_SYMBOL(bio_put);
612 
613 /**
614  * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
615  * 	@bio: destination bio
616  * 	@bio_src: bio to clone
617  *
618  *	Clone a &bio. Caller will own the returned bio, but not
619  *	the actual data it points to. Reference count of returned
620  * 	bio will be one.
621  *
622  * 	Caller must ensure that @bio_src is not freed before @bio.
623  */
624 void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
625 {
626 	WARN_ON_ONCE(bio->bi_pool && bio->bi_max_vecs);
627 
628 	/*
629 	 * most users will be overriding ->bi_bdev with a new target,
630 	 * so we don't set nor calculate new physical/hw segment counts here
631 	 */
632 	bio->bi_bdev = bio_src->bi_bdev;
633 	bio_set_flag(bio, BIO_CLONED);
634 	if (bio_flagged(bio_src, BIO_THROTTLED))
635 		bio_set_flag(bio, BIO_THROTTLED);
636 	if (bio_flagged(bio_src, BIO_REMAPPED))
637 		bio_set_flag(bio, BIO_REMAPPED);
638 	bio->bi_opf = bio_src->bi_opf;
639 	bio->bi_ioprio = bio_src->bi_ioprio;
640 	bio->bi_write_hint = bio_src->bi_write_hint;
641 	bio->bi_iter = bio_src->bi_iter;
642 	bio->bi_io_vec = bio_src->bi_io_vec;
643 
644 	bio_clone_blkg_association(bio, bio_src);
645 	blkcg_bio_issue_init(bio);
646 }
647 EXPORT_SYMBOL(__bio_clone_fast);
648 
649 /**
650  *	bio_clone_fast - clone a bio that shares the original bio's biovec
651  *	@bio: bio to clone
652  *	@gfp_mask: allocation priority
653  *	@bs: bio_set to allocate from
654  *
655  * 	Like __bio_clone_fast, only also allocates the returned bio
656  */
657 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
658 {
659 	struct bio *b;
660 
661 	b = bio_alloc_bioset(gfp_mask, 0, bs);
662 	if (!b)
663 		return NULL;
664 
665 	__bio_clone_fast(b, bio);
666 
667 	if (bio_crypt_clone(b, bio, gfp_mask) < 0)
668 		goto err_put;
669 
670 	if (bio_integrity(bio) &&
671 	    bio_integrity_clone(b, bio, gfp_mask) < 0)
672 		goto err_put;
673 
674 	return b;
675 
676 err_put:
677 	bio_put(b);
678 	return NULL;
679 }
680 EXPORT_SYMBOL(bio_clone_fast);
681 
682 const char *bio_devname(struct bio *bio, char *buf)
683 {
684 	return bdevname(bio->bi_bdev, buf);
685 }
686 EXPORT_SYMBOL(bio_devname);
687 
688 static inline bool page_is_mergeable(const struct bio_vec *bv,
689 		struct page *page, unsigned int len, unsigned int off,
690 		bool *same_page)
691 {
692 	size_t bv_end = bv->bv_offset + bv->bv_len;
693 	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
694 	phys_addr_t page_addr = page_to_phys(page);
695 
696 	if (vec_end_addr + 1 != page_addr + off)
697 		return false;
698 	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
699 		return false;
700 
701 	*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
702 	if (*same_page)
703 		return true;
704 	return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
705 }
706 
707 /*
708  * Try to merge a page into a segment, while obeying the hardware segment
709  * size limit.  This is not for normal read/write bios, but for passthrough
710  * or Zone Append operations that we can't split.
711  */
712 static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
713 				 struct page *page, unsigned len,
714 				 unsigned offset, bool *same_page)
715 {
716 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
717 	unsigned long mask = queue_segment_boundary(q);
718 	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
719 	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
720 
721 	if ((addr1 | mask) != (addr2 | mask))
722 		return false;
723 	if (bv->bv_len + len > queue_max_segment_size(q))
724 		return false;
725 	return __bio_try_merge_page(bio, page, len, offset, same_page);
726 }
727 
728 /**
729  * bio_add_hw_page - attempt to add a page to a bio with hw constraints
730  * @q: the target queue
731  * @bio: destination bio
732  * @page: page to add
733  * @len: vec entry length
734  * @offset: vec entry offset
735  * @max_sectors: maximum number of sectors that can be added
736  * @same_page: return if the segment has been merged inside the same page
737  *
738  * Add a page to a bio while respecting the hardware max_sectors, max_segment
739  * and gap limitations.
740  */
741 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
742 		struct page *page, unsigned int len, unsigned int offset,
743 		unsigned int max_sectors, bool *same_page)
744 {
745 	struct bio_vec *bvec;
746 
747 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
748 		return 0;
749 
750 	if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
751 		return 0;
752 
753 	if (bio->bi_vcnt > 0) {
754 		if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
755 			return len;
756 
757 		/*
758 		 * If the queue doesn't support SG gaps and adding this segment
759 		 * would create a gap, disallow it.
760 		 */
761 		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
762 		if (bvec_gap_to_prev(q, bvec, offset))
763 			return 0;
764 	}
765 
766 	if (bio_full(bio, len))
767 		return 0;
768 
769 	if (bio->bi_vcnt >= queue_max_segments(q))
770 		return 0;
771 
772 	bvec = &bio->bi_io_vec[bio->bi_vcnt];
773 	bvec->bv_page = page;
774 	bvec->bv_len = len;
775 	bvec->bv_offset = offset;
776 	bio->bi_vcnt++;
777 	bio->bi_iter.bi_size += len;
778 	return len;
779 }
780 
781 /**
782  * bio_add_pc_page	- attempt to add page to passthrough bio
783  * @q: the target queue
784  * @bio: destination bio
785  * @page: page to add
786  * @len: vec entry length
787  * @offset: vec entry offset
788  *
789  * Attempt to add a page to the bio_vec maplist. This can fail for a
790  * number of reasons, such as the bio being full or target block device
791  * limitations. The target block device must allow bio's up to PAGE_SIZE,
792  * so it is always possible to add a single page to an empty bio.
793  *
794  * This should only be used by passthrough bios.
795  */
796 int bio_add_pc_page(struct request_queue *q, struct bio *bio,
797 		struct page *page, unsigned int len, unsigned int offset)
798 {
799 	bool same_page = false;
800 	return bio_add_hw_page(q, bio, page, len, offset,
801 			queue_max_hw_sectors(q), &same_page);
802 }
803 EXPORT_SYMBOL(bio_add_pc_page);
804 
805 /**
806  * bio_add_zone_append_page - attempt to add page to zone-append bio
807  * @bio: destination bio
808  * @page: page to add
809  * @len: vec entry length
810  * @offset: vec entry offset
811  *
812  * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
813  * for a zone-append request. This can fail for a number of reasons, such as the
814  * bio being full or the target block device is not a zoned block device or
815  * other limitations of the target block device. The target block device must
816  * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
817  * to an empty bio.
818  *
819  * Returns: number of bytes added to the bio, or 0 in case of a failure.
820  */
821 int bio_add_zone_append_page(struct bio *bio, struct page *page,
822 			     unsigned int len, unsigned int offset)
823 {
824 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
825 	bool same_page = false;
826 
827 	if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
828 		return 0;
829 
830 	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
831 		return 0;
832 
833 	return bio_add_hw_page(q, bio, page, len, offset,
834 			       queue_max_zone_append_sectors(q), &same_page);
835 }
836 EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
837 
838 /**
839  * __bio_try_merge_page - try appending data to an existing bvec.
840  * @bio: destination bio
841  * @page: start page to add
842  * @len: length of the data to add
843  * @off: offset of the data relative to @page
844  * @same_page: return if the segment has been merged inside the same page
845  *
846  * Try to add the data at @page + @off to the last bvec of @bio.  This is a
847  * useful optimisation for file systems with a block size smaller than the
848  * page size.
849  *
850  * Warn if (@len, @off) crosses pages in case that @same_page is true.
851  *
852  * Return %true on success or %false on failure.
853  */
854 bool __bio_try_merge_page(struct bio *bio, struct page *page,
855 		unsigned int len, unsigned int off, bool *same_page)
856 {
857 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
858 		return false;
859 
860 	if (bio->bi_vcnt > 0) {
861 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
862 
863 		if (page_is_mergeable(bv, page, len, off, same_page)) {
864 			if (bio->bi_iter.bi_size > UINT_MAX - len) {
865 				*same_page = false;
866 				return false;
867 			}
868 			bv->bv_len += len;
869 			bio->bi_iter.bi_size += len;
870 			return true;
871 		}
872 	}
873 	return false;
874 }
875 EXPORT_SYMBOL_GPL(__bio_try_merge_page);
876 
877 /**
878  * __bio_add_page - add page(s) to a bio in a new segment
879  * @bio: destination bio
880  * @page: start page to add
881  * @len: length of the data to add, may cross pages
882  * @off: offset of the data relative to @page, may cross pages
883  *
884  * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
885  * that @bio has space for another bvec.
886  */
887 void __bio_add_page(struct bio *bio, struct page *page,
888 		unsigned int len, unsigned int off)
889 {
890 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
891 
892 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
893 	WARN_ON_ONCE(bio_full(bio, len));
894 
895 	bv->bv_page = page;
896 	bv->bv_offset = off;
897 	bv->bv_len = len;
898 
899 	bio->bi_iter.bi_size += len;
900 	bio->bi_vcnt++;
901 
902 	if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
903 		bio_set_flag(bio, BIO_WORKINGSET);
904 }
905 EXPORT_SYMBOL_GPL(__bio_add_page);
906 
907 /**
908  *	bio_add_page	-	attempt to add page(s) to bio
909  *	@bio: destination bio
910  *	@page: start page to add
911  *	@len: vec entry length, may cross pages
912  *	@offset: vec entry offset relative to @page, may cross pages
913  *
914  *	Attempt to add page(s) to the bio_vec maplist. This will only fail
915  *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
916  */
917 int bio_add_page(struct bio *bio, struct page *page,
918 		 unsigned int len, unsigned int offset)
919 {
920 	bool same_page = false;
921 
922 	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
923 		if (bio_full(bio, len))
924 			return 0;
925 		__bio_add_page(bio, page, len, offset);
926 	}
927 	return len;
928 }
929 EXPORT_SYMBOL(bio_add_page);
930 
931 void bio_release_pages(struct bio *bio, bool mark_dirty)
932 {
933 	struct bvec_iter_all iter_all;
934 	struct bio_vec *bvec;
935 
936 	if (bio_flagged(bio, BIO_NO_PAGE_REF))
937 		return;
938 
939 	bio_for_each_segment_all(bvec, bio, iter_all) {
940 		if (mark_dirty && !PageCompound(bvec->bv_page))
941 			set_page_dirty_lock(bvec->bv_page);
942 		put_page(bvec->bv_page);
943 	}
944 }
945 EXPORT_SYMBOL_GPL(bio_release_pages);
946 
947 static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
948 {
949 	WARN_ON_ONCE(bio->bi_max_vecs);
950 
951 	bio->bi_vcnt = iter->nr_segs;
952 	bio->bi_io_vec = (struct bio_vec *)iter->bvec;
953 	bio->bi_iter.bi_bvec_done = iter->iov_offset;
954 	bio->bi_iter.bi_size = iter->count;
955 	bio_set_flag(bio, BIO_NO_PAGE_REF);
956 	bio_set_flag(bio, BIO_CLONED);
957 }
958 
959 static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
960 {
961 	__bio_iov_bvec_set(bio, iter);
962 	iov_iter_advance(iter, iter->count);
963 	return 0;
964 }
965 
966 static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
967 {
968 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
969 	struct iov_iter i = *iter;
970 
971 	iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9);
972 	__bio_iov_bvec_set(bio, &i);
973 	iov_iter_advance(iter, i.count);
974 	return 0;
975 }
976 
977 #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
978 
979 /**
980  * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
981  * @bio: bio to add pages to
982  * @iter: iov iterator describing the region to be mapped
983  *
984  * Pins pages from *iter and appends them to @bio's bvec array. The
985  * pages will have to be released using put_page() when done.
986  * For multi-segment *iter, this function only adds pages from the
987  * next non-empty segment of the iov iterator.
988  */
989 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
990 {
991 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
992 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
993 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
994 	struct page **pages = (struct page **)bv;
995 	bool same_page = false;
996 	ssize_t size, left;
997 	unsigned len, i;
998 	size_t offset;
999 
1000 	/*
1001 	 * Move page array up in the allocated memory for the bio vecs as far as
1002 	 * possible so that we can start filling biovecs from the beginning
1003 	 * without overwriting the temporary page array.
1004 	*/
1005 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1006 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1007 
1008 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
1009 	if (unlikely(size <= 0))
1010 		return size ? size : -EFAULT;
1011 
1012 	for (left = size, i = 0; left > 0; left -= len, i++) {
1013 		struct page *page = pages[i];
1014 
1015 		len = min_t(size_t, PAGE_SIZE - offset, left);
1016 
1017 		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
1018 			if (same_page)
1019 				put_page(page);
1020 		} else {
1021 			if (WARN_ON_ONCE(bio_full(bio, len)))
1022                                 return -EINVAL;
1023 			__bio_add_page(bio, page, len, offset);
1024 		}
1025 		offset = 0;
1026 	}
1027 
1028 	iov_iter_advance(iter, size);
1029 	return 0;
1030 }
1031 
1032 static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
1033 {
1034 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1035 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1036 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1037 	unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
1038 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1039 	struct page **pages = (struct page **)bv;
1040 	ssize_t size, left;
1041 	unsigned len, i;
1042 	size_t offset;
1043 	int ret = 0;
1044 
1045 	if (WARN_ON_ONCE(!max_append_sectors))
1046 		return 0;
1047 
1048 	/*
1049 	 * Move page array up in the allocated memory for the bio vecs as far as
1050 	 * possible so that we can start filling biovecs from the beginning
1051 	 * without overwriting the temporary page array.
1052 	 */
1053 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1054 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1055 
1056 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
1057 	if (unlikely(size <= 0))
1058 		return size ? size : -EFAULT;
1059 
1060 	for (left = size, i = 0; left > 0; left -= len, i++) {
1061 		struct page *page = pages[i];
1062 		bool same_page = false;
1063 
1064 		len = min_t(size_t, PAGE_SIZE - offset, left);
1065 		if (bio_add_hw_page(q, bio, page, len, offset,
1066 				max_append_sectors, &same_page) != len) {
1067 			ret = -EINVAL;
1068 			break;
1069 		}
1070 		if (same_page)
1071 			put_page(page);
1072 		offset = 0;
1073 	}
1074 
1075 	iov_iter_advance(iter, size - left);
1076 	return ret;
1077 }
1078 
1079 /**
1080  * bio_iov_iter_get_pages - add user or kernel pages to a bio
1081  * @bio: bio to add pages to
1082  * @iter: iov iterator describing the region to be added
1083  *
1084  * This takes either an iterator pointing to user memory, or one pointing to
1085  * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1086  * map them into the kernel. On IO completion, the caller should put those
1087  * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1088  * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1089  * to ensure the bvecs and pages stay referenced until the submitted I/O is
1090  * completed by a call to ->ki_complete() or returns with an error other than
1091  * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1092  * on IO completion. If it isn't, then pages should be released.
1093  *
1094  * The function tries, but does not guarantee, to pin as many pages as
1095  * fit into the bio, or are requested in @iter, whatever is smaller. If
1096  * MM encounters an error pinning the requested pages, it stops. Error
1097  * is returned only if 0 pages could be pinned.
1098  *
1099  * It's intended for direct IO, so doesn't do PSI tracking, the caller is
1100  * responsible for setting BIO_WORKINGSET if necessary.
1101  */
1102 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1103 {
1104 	int ret = 0;
1105 
1106 	if (iov_iter_is_bvec(iter)) {
1107 		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
1108 			return bio_iov_bvec_set_append(bio, iter);
1109 		return bio_iov_bvec_set(bio, iter);
1110 	}
1111 
1112 	do {
1113 		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
1114 			ret = __bio_iov_append_get_pages(bio, iter);
1115 		else
1116 			ret = __bio_iov_iter_get_pages(bio, iter);
1117 	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1118 
1119 	/* don't account direct I/O as memory stall */
1120 	bio_clear_flag(bio, BIO_WORKINGSET);
1121 	return bio->bi_vcnt ? 0 : ret;
1122 }
1123 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
1124 
1125 static void submit_bio_wait_endio(struct bio *bio)
1126 {
1127 	complete(bio->bi_private);
1128 }
1129 
1130 /**
1131  * submit_bio_wait - submit a bio, and wait until it completes
1132  * @bio: The &struct bio which describes the I/O
1133  *
1134  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1135  * bio_endio() on failure.
1136  *
1137  * WARNING: Unlike to how submit_bio() is usually used, this function does not
1138  * result in bio reference to be consumed. The caller must drop the reference
1139  * on his own.
1140  */
1141 int submit_bio_wait(struct bio *bio)
1142 {
1143 	DECLARE_COMPLETION_ONSTACK_MAP(done,
1144 			bio->bi_bdev->bd_disk->lockdep_map);
1145 	unsigned long hang_check;
1146 
1147 	bio->bi_private = &done;
1148 	bio->bi_end_io = submit_bio_wait_endio;
1149 	bio->bi_opf |= REQ_SYNC;
1150 	submit_bio(bio);
1151 
1152 	/* Prevent hang_check timer from firing at us during very long I/O */
1153 	hang_check = sysctl_hung_task_timeout_secs;
1154 	if (hang_check)
1155 		while (!wait_for_completion_io_timeout(&done,
1156 					hang_check * (HZ/2)))
1157 			;
1158 	else
1159 		wait_for_completion_io(&done);
1160 
1161 	return blk_status_to_errno(bio->bi_status);
1162 }
1163 EXPORT_SYMBOL(submit_bio_wait);
1164 
1165 /**
1166  * bio_advance - increment/complete a bio by some number of bytes
1167  * @bio:	bio to advance
1168  * @bytes:	number of bytes to complete
1169  *
1170  * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
1171  * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
1172  * be updated on the last bvec as well.
1173  *
1174  * @bio will then represent the remaining, uncompleted portion of the io.
1175  */
1176 void bio_advance(struct bio *bio, unsigned bytes)
1177 {
1178 	if (bio_integrity(bio))
1179 		bio_integrity_advance(bio, bytes);
1180 
1181 	bio_crypt_advance(bio, bytes);
1182 	bio_advance_iter(bio, &bio->bi_iter, bytes);
1183 }
1184 EXPORT_SYMBOL(bio_advance);
1185 
1186 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1187 			struct bio *src, struct bvec_iter *src_iter)
1188 {
1189 	while (src_iter->bi_size && dst_iter->bi_size) {
1190 		struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
1191 		struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
1192 		unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1193 		void *src_buf;
1194 
1195 		src_buf = bvec_kmap_local(&src_bv);
1196 		memcpy_to_bvec(&dst_bv, src_buf);
1197 		kunmap_local(src_buf);
1198 
1199 		bio_advance_iter_single(src, src_iter, bytes);
1200 		bio_advance_iter_single(dst, dst_iter, bytes);
1201 	}
1202 }
1203 EXPORT_SYMBOL(bio_copy_data_iter);
1204 
1205 /**
1206  * bio_copy_data - copy contents of data buffers from one bio to another
1207  * @src: source bio
1208  * @dst: destination bio
1209  *
1210  * Stops when it reaches the end of either @src or @dst - that is, copies
1211  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1212  */
1213 void bio_copy_data(struct bio *dst, struct bio *src)
1214 {
1215 	struct bvec_iter src_iter = src->bi_iter;
1216 	struct bvec_iter dst_iter = dst->bi_iter;
1217 
1218 	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1219 }
1220 EXPORT_SYMBOL(bio_copy_data);
1221 
1222 void bio_free_pages(struct bio *bio)
1223 {
1224 	struct bio_vec *bvec;
1225 	struct bvec_iter_all iter_all;
1226 
1227 	bio_for_each_segment_all(bvec, bio, iter_all)
1228 		__free_page(bvec->bv_page);
1229 }
1230 EXPORT_SYMBOL(bio_free_pages);
1231 
1232 /*
1233  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1234  * for performing direct-IO in BIOs.
1235  *
1236  * The problem is that we cannot run set_page_dirty() from interrupt context
1237  * because the required locks are not interrupt-safe.  So what we can do is to
1238  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1239  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1240  * in process context.
1241  *
1242  * We special-case compound pages here: normally this means reads into hugetlb
1243  * pages.  The logic in here doesn't really work right for compound pages
1244  * because the VM does not uniformly chase down the head page in all cases.
1245  * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1246  * handle them at all.  So we skip compound pages here at an early stage.
1247  *
1248  * Note that this code is very hard to test under normal circumstances because
1249  * direct-io pins the pages with get_user_pages().  This makes
1250  * is_page_cache_freeable return false, and the VM will not clean the pages.
1251  * But other code (eg, flusher threads) could clean the pages if they are mapped
1252  * pagecache.
1253  *
1254  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1255  * deferred bio dirtying paths.
1256  */
1257 
1258 /*
1259  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1260  */
1261 void bio_set_pages_dirty(struct bio *bio)
1262 {
1263 	struct bio_vec *bvec;
1264 	struct bvec_iter_all iter_all;
1265 
1266 	bio_for_each_segment_all(bvec, bio, iter_all) {
1267 		if (!PageCompound(bvec->bv_page))
1268 			set_page_dirty_lock(bvec->bv_page);
1269 	}
1270 }
1271 
1272 /*
1273  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1274  * If they are, then fine.  If, however, some pages are clean then they must
1275  * have been written out during the direct-IO read.  So we take another ref on
1276  * the BIO and re-dirty the pages in process context.
1277  *
1278  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1279  * here on.  It will run one put_page() against each page and will run one
1280  * bio_put() against the BIO.
1281  */
1282 
1283 static void bio_dirty_fn(struct work_struct *work);
1284 
1285 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1286 static DEFINE_SPINLOCK(bio_dirty_lock);
1287 static struct bio *bio_dirty_list;
1288 
1289 /*
1290  * This runs in process context
1291  */
1292 static void bio_dirty_fn(struct work_struct *work)
1293 {
1294 	struct bio *bio, *next;
1295 
1296 	spin_lock_irq(&bio_dirty_lock);
1297 	next = bio_dirty_list;
1298 	bio_dirty_list = NULL;
1299 	spin_unlock_irq(&bio_dirty_lock);
1300 
1301 	while ((bio = next) != NULL) {
1302 		next = bio->bi_private;
1303 
1304 		bio_release_pages(bio, true);
1305 		bio_put(bio);
1306 	}
1307 }
1308 
1309 void bio_check_pages_dirty(struct bio *bio)
1310 {
1311 	struct bio_vec *bvec;
1312 	unsigned long flags;
1313 	struct bvec_iter_all iter_all;
1314 
1315 	bio_for_each_segment_all(bvec, bio, iter_all) {
1316 		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1317 			goto defer;
1318 	}
1319 
1320 	bio_release_pages(bio, false);
1321 	bio_put(bio);
1322 	return;
1323 defer:
1324 	spin_lock_irqsave(&bio_dirty_lock, flags);
1325 	bio->bi_private = bio_dirty_list;
1326 	bio_dirty_list = bio;
1327 	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1328 	schedule_work(&bio_dirty_work);
1329 }
1330 
1331 static inline bool bio_remaining_done(struct bio *bio)
1332 {
1333 	/*
1334 	 * If we're not chaining, then ->__bi_remaining is always 1 and
1335 	 * we always end io on the first invocation.
1336 	 */
1337 	if (!bio_flagged(bio, BIO_CHAIN))
1338 		return true;
1339 
1340 	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1341 
1342 	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1343 		bio_clear_flag(bio, BIO_CHAIN);
1344 		return true;
1345 	}
1346 
1347 	return false;
1348 }
1349 
1350 /**
1351  * bio_endio - end I/O on a bio
1352  * @bio:	bio
1353  *
1354  * Description:
1355  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1356  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1357  *   bio unless they own it and thus know that it has an end_io function.
1358  *
1359  *   bio_endio() can be called several times on a bio that has been chained
1360  *   using bio_chain().  The ->bi_end_io() function will only be called the
1361  *   last time.
1362  **/
1363 void bio_endio(struct bio *bio)
1364 {
1365 again:
1366 	if (!bio_remaining_done(bio))
1367 		return;
1368 	if (!bio_integrity_endio(bio))
1369 		return;
1370 
1371 	if (bio->bi_bdev)
1372 		rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
1373 
1374 	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1375 		trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
1376 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1377 	}
1378 
1379 	/*
1380 	 * Need to have a real endio function for chained bios, otherwise
1381 	 * various corner cases will break (like stacking block devices that
1382 	 * save/restore bi_end_io) - however, we want to avoid unbounded
1383 	 * recursion and blowing the stack. Tail call optimization would
1384 	 * handle this, but compiling with frame pointers also disables
1385 	 * gcc's sibling call optimization.
1386 	 */
1387 	if (bio->bi_end_io == bio_chain_endio) {
1388 		bio = __bio_chain_endio(bio);
1389 		goto again;
1390 	}
1391 
1392 	blk_throtl_bio_endio(bio);
1393 	/* release cgroup info */
1394 	bio_uninit(bio);
1395 	if (bio->bi_end_io)
1396 		bio->bi_end_io(bio);
1397 }
1398 EXPORT_SYMBOL(bio_endio);
1399 
1400 /**
1401  * bio_split - split a bio
1402  * @bio:	bio to split
1403  * @sectors:	number of sectors to split from the front of @bio
1404  * @gfp:	gfp mask
1405  * @bs:		bio set to allocate from
1406  *
1407  * Allocates and returns a new bio which represents @sectors from the start of
1408  * @bio, and updates @bio to represent the remaining sectors.
1409  *
1410  * Unless this is a discard request the newly allocated bio will point
1411  * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1412  * neither @bio nor @bs are freed before the split bio.
1413  */
1414 struct bio *bio_split(struct bio *bio, int sectors,
1415 		      gfp_t gfp, struct bio_set *bs)
1416 {
1417 	struct bio *split;
1418 
1419 	BUG_ON(sectors <= 0);
1420 	BUG_ON(sectors >= bio_sectors(bio));
1421 
1422 	/* Zone append commands cannot be split */
1423 	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1424 		return NULL;
1425 
1426 	split = bio_clone_fast(bio, gfp, bs);
1427 	if (!split)
1428 		return NULL;
1429 
1430 	split->bi_iter.bi_size = sectors << 9;
1431 
1432 	if (bio_integrity(split))
1433 		bio_integrity_trim(split);
1434 
1435 	bio_advance(bio, split->bi_iter.bi_size);
1436 
1437 	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1438 		bio_set_flag(split, BIO_TRACE_COMPLETION);
1439 
1440 	return split;
1441 }
1442 EXPORT_SYMBOL(bio_split);
1443 
1444 /**
1445  * bio_trim - trim a bio
1446  * @bio:	bio to trim
1447  * @offset:	number of sectors to trim from the front of @bio
1448  * @size:	size we want to trim @bio to, in sectors
1449  */
1450 void bio_trim(struct bio *bio, int offset, int size)
1451 {
1452 	/* 'bio' is a cloned bio which we need to trim to match
1453 	 * the given offset and size.
1454 	 */
1455 
1456 	size <<= 9;
1457 	if (offset == 0 && size == bio->bi_iter.bi_size)
1458 		return;
1459 
1460 	bio_advance(bio, offset << 9);
1461 	bio->bi_iter.bi_size = size;
1462 
1463 	if (bio_integrity(bio))
1464 		bio_integrity_trim(bio);
1465 
1466 }
1467 EXPORT_SYMBOL_GPL(bio_trim);
1468 
1469 /*
1470  * create memory pools for biovec's in a bio_set.
1471  * use the global biovec slabs created for general use.
1472  */
1473 int biovec_init_pool(mempool_t *pool, int pool_entries)
1474 {
1475 	struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1476 
1477 	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1478 }
1479 
1480 /*
1481  * bioset_exit - exit a bioset initialized with bioset_init()
1482  *
1483  * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1484  * kzalloc()).
1485  */
1486 void bioset_exit(struct bio_set *bs)
1487 {
1488 	if (bs->rescue_workqueue)
1489 		destroy_workqueue(bs->rescue_workqueue);
1490 	bs->rescue_workqueue = NULL;
1491 
1492 	mempool_exit(&bs->bio_pool);
1493 	mempool_exit(&bs->bvec_pool);
1494 
1495 	bioset_integrity_free(bs);
1496 	if (bs->bio_slab)
1497 		bio_put_slab(bs);
1498 	bs->bio_slab = NULL;
1499 }
1500 EXPORT_SYMBOL(bioset_exit);
1501 
1502 /**
1503  * bioset_init - Initialize a bio_set
1504  * @bs:		pool to initialize
1505  * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1506  * @front_pad:	Number of bytes to allocate in front of the returned bio
1507  * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1508  *              and %BIOSET_NEED_RESCUER
1509  *
1510  * Description:
1511  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1512  *    to ask for a number of bytes to be allocated in front of the bio.
1513  *    Front pad allocation is useful for embedding the bio inside
1514  *    another structure, to avoid allocating extra data to go with the bio.
1515  *    Note that the bio must be embedded at the END of that structure always,
1516  *    or things will break badly.
1517  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1518  *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
1519  *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1520  *    dispatch queued requests when the mempool runs out of space.
1521  *
1522  */
1523 int bioset_init(struct bio_set *bs,
1524 		unsigned int pool_size,
1525 		unsigned int front_pad,
1526 		int flags)
1527 {
1528 	bs->front_pad = front_pad;
1529 	if (flags & BIOSET_NEED_BVECS)
1530 		bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1531 	else
1532 		bs->back_pad = 0;
1533 
1534 	spin_lock_init(&bs->rescue_lock);
1535 	bio_list_init(&bs->rescue_list);
1536 	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1537 
1538 	bs->bio_slab = bio_find_or_create_slab(bs);
1539 	if (!bs->bio_slab)
1540 		return -ENOMEM;
1541 
1542 	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1543 		goto bad;
1544 
1545 	if ((flags & BIOSET_NEED_BVECS) &&
1546 	    biovec_init_pool(&bs->bvec_pool, pool_size))
1547 		goto bad;
1548 
1549 	if (!(flags & BIOSET_NEED_RESCUER))
1550 		return 0;
1551 
1552 	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1553 	if (!bs->rescue_workqueue)
1554 		goto bad;
1555 
1556 	return 0;
1557 bad:
1558 	bioset_exit(bs);
1559 	return -ENOMEM;
1560 }
1561 EXPORT_SYMBOL(bioset_init);
1562 
1563 /*
1564  * Initialize and setup a new bio_set, based on the settings from
1565  * another bio_set.
1566  */
1567 int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
1568 {
1569 	int flags;
1570 
1571 	flags = 0;
1572 	if (src->bvec_pool.min_nr)
1573 		flags |= BIOSET_NEED_BVECS;
1574 	if (src->rescue_workqueue)
1575 		flags |= BIOSET_NEED_RESCUER;
1576 
1577 	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
1578 }
1579 EXPORT_SYMBOL(bioset_init_from_src);
1580 
1581 static int __init init_bio(void)
1582 {
1583 	int i;
1584 
1585 	bio_integrity_init();
1586 
1587 	for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1588 		struct biovec_slab *bvs = bvec_slabs + i;
1589 
1590 		bvs->slab = kmem_cache_create(bvs->name,
1591 				bvs->nr_vecs * sizeof(struct bio_vec), 0,
1592 				SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1593 	}
1594 
1595 	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1596 		panic("bio: can't allocate bios\n");
1597 
1598 	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
1599 		panic("bio: can't create integrity pool\n");
1600 
1601 	return 0;
1602 }
1603 subsys_initcall(init_bio);
1604