xref: /openbmc/linux/block/bio.c (revision a6978d1b7bb8f3a25305e8ff7d367f7289614c5d)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4   */
5  #include <linux/mm.h>
6  #include <linux/swap.h>
7  #include <linux/bio.h>
8  #include <linux/blkdev.h>
9  #include <linux/uio.h>
10  #include <linux/iocontext.h>
11  #include <linux/slab.h>
12  #include <linux/init.h>
13  #include <linux/kernel.h>
14  #include <linux/export.h>
15  #include <linux/mempool.h>
16  #include <linux/workqueue.h>
17  #include <linux/cgroup.h>
18  #include <linux/highmem.h>
19  #include <linux/sched/sysctl.h>
20  #include <linux/blk-crypto.h>
21  #include <linux/xarray.h>
22  
23  #include <trace/events/block.h>
24  #include "blk.h"
25  #include "blk-rq-qos.h"
26  #include "blk-cgroup.h"
27  
28  #define ALLOC_CACHE_THRESHOLD	16
29  #define ALLOC_CACHE_MAX		256
30  
31  struct bio_alloc_cache {
32  	struct bio		*free_list;
33  	struct bio		*free_list_irq;
34  	unsigned int		nr;
35  	unsigned int		nr_irq;
36  };
37  
38  static struct biovec_slab {
39  	int nr_vecs;
40  	char *name;
41  	struct kmem_cache *slab;
42  } bvec_slabs[] __read_mostly = {
43  	{ .nr_vecs = 16, .name = "biovec-16" },
44  	{ .nr_vecs = 64, .name = "biovec-64" },
45  	{ .nr_vecs = 128, .name = "biovec-128" },
46  	{ .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
47  };
48  
49  static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
50  {
51  	switch (nr_vecs) {
52  	/* smaller bios use inline vecs */
53  	case 5 ... 16:
54  		return &bvec_slabs[0];
55  	case 17 ... 64:
56  		return &bvec_slabs[1];
57  	case 65 ... 128:
58  		return &bvec_slabs[2];
59  	case 129 ... BIO_MAX_VECS:
60  		return &bvec_slabs[3];
61  	default:
62  		BUG();
63  		return NULL;
64  	}
65  }
66  
67  /*
68   * fs_bio_set is the bio_set containing bio and iovec memory pools used by
69   * IO code that does not need private memory pools.
70   */
71  struct bio_set fs_bio_set;
72  EXPORT_SYMBOL(fs_bio_set);
73  
74  /*
75   * Our slab pool management
76   */
77  struct bio_slab {
78  	struct kmem_cache *slab;
79  	unsigned int slab_ref;
80  	unsigned int slab_size;
81  	char name[8];
82  };
83  static DEFINE_MUTEX(bio_slab_lock);
84  static DEFINE_XARRAY(bio_slabs);
85  
86  static struct bio_slab *create_bio_slab(unsigned int size)
87  {
88  	struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
89  
90  	if (!bslab)
91  		return NULL;
92  
93  	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
94  	bslab->slab = kmem_cache_create(bslab->name, size,
95  			ARCH_KMALLOC_MINALIGN,
96  			SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL);
97  	if (!bslab->slab)
98  		goto fail_alloc_slab;
99  
100  	bslab->slab_ref = 1;
101  	bslab->slab_size = size;
102  
103  	if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
104  		return bslab;
105  
106  	kmem_cache_destroy(bslab->slab);
107  
108  fail_alloc_slab:
109  	kfree(bslab);
110  	return NULL;
111  }
112  
113  static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
114  {
115  	return bs->front_pad + sizeof(struct bio) + bs->back_pad;
116  }
117  
118  static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
119  {
120  	unsigned int size = bs_bio_slab_size(bs);
121  	struct bio_slab *bslab;
122  
123  	mutex_lock(&bio_slab_lock);
124  	bslab = xa_load(&bio_slabs, size);
125  	if (bslab)
126  		bslab->slab_ref++;
127  	else
128  		bslab = create_bio_slab(size);
129  	mutex_unlock(&bio_slab_lock);
130  
131  	if (bslab)
132  		return bslab->slab;
133  	return NULL;
134  }
135  
136  static void bio_put_slab(struct bio_set *bs)
137  {
138  	struct bio_slab *bslab = NULL;
139  	unsigned int slab_size = bs_bio_slab_size(bs);
140  
141  	mutex_lock(&bio_slab_lock);
142  
143  	bslab = xa_load(&bio_slabs, slab_size);
144  	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
145  		goto out;
146  
147  	WARN_ON_ONCE(bslab->slab != bs->bio_slab);
148  
149  	WARN_ON(!bslab->slab_ref);
150  
151  	if (--bslab->slab_ref)
152  		goto out;
153  
154  	xa_erase(&bio_slabs, slab_size);
155  
156  	kmem_cache_destroy(bslab->slab);
157  	kfree(bslab);
158  
159  out:
160  	mutex_unlock(&bio_slab_lock);
161  }
162  
163  void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
164  {
165  	BUG_ON(nr_vecs > BIO_MAX_VECS);
166  
167  	if (nr_vecs == BIO_MAX_VECS)
168  		mempool_free(bv, pool);
169  	else if (nr_vecs > BIO_INLINE_VECS)
170  		kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
171  }
172  
173  /*
174   * Make the first allocation restricted and don't dump info on allocation
175   * failures, since we'll fall back to the mempool in case of failure.
176   */
177  static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
178  {
179  	return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
180  		__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
181  }
182  
183  struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
184  		gfp_t gfp_mask)
185  {
186  	struct biovec_slab *bvs = biovec_slab(*nr_vecs);
187  
188  	if (WARN_ON_ONCE(!bvs))
189  		return NULL;
190  
191  	/*
192  	 * Upgrade the nr_vecs request to take full advantage of the allocation.
193  	 * We also rely on this in the bvec_free path.
194  	 */
195  	*nr_vecs = bvs->nr_vecs;
196  
197  	/*
198  	 * Try a slab allocation first for all smaller allocations.  If that
199  	 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
200  	 * The mempool is sized to handle up to BIO_MAX_VECS entries.
201  	 */
202  	if (*nr_vecs < BIO_MAX_VECS) {
203  		struct bio_vec *bvl;
204  
205  		bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
206  		if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
207  			return bvl;
208  		*nr_vecs = BIO_MAX_VECS;
209  	}
210  
211  	return mempool_alloc(pool, gfp_mask);
212  }
213  
214  void bio_uninit(struct bio *bio)
215  {
216  #ifdef CONFIG_BLK_CGROUP
217  	if (bio->bi_blkg) {
218  		blkg_put(bio->bi_blkg);
219  		bio->bi_blkg = NULL;
220  	}
221  #endif
222  	if (bio_integrity(bio))
223  		bio_integrity_free(bio);
224  
225  	bio_crypt_free_ctx(bio);
226  }
227  EXPORT_SYMBOL(bio_uninit);
228  
229  static void bio_free(struct bio *bio)
230  {
231  	struct bio_set *bs = bio->bi_pool;
232  	void *p = bio;
233  
234  	WARN_ON_ONCE(!bs);
235  
236  	bio_uninit(bio);
237  	bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
238  	mempool_free(p - bs->front_pad, &bs->bio_pool);
239  }
240  
241  /*
242   * Users of this function have their own bio allocation. Subsequently,
243   * they must remember to pair any call to bio_init() with bio_uninit()
244   * when IO has completed, or when the bio is released.
245   */
246  void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
247  	      unsigned short max_vecs, blk_opf_t opf)
248  {
249  	bio->bi_next = NULL;
250  	bio->bi_bdev = bdev;
251  	bio->bi_opf = opf;
252  	bio->bi_flags = 0;
253  	bio->bi_ioprio = 0;
254  	bio->bi_status = 0;
255  	bio->bi_iter.bi_sector = 0;
256  	bio->bi_iter.bi_size = 0;
257  	bio->bi_iter.bi_idx = 0;
258  	bio->bi_iter.bi_bvec_done = 0;
259  	bio->bi_end_io = NULL;
260  	bio->bi_private = NULL;
261  #ifdef CONFIG_BLK_CGROUP
262  	bio->bi_blkg = NULL;
263  	bio->bi_issue.value = 0;
264  	if (bdev)
265  		bio_associate_blkg(bio);
266  #ifdef CONFIG_BLK_CGROUP_IOCOST
267  	bio->bi_iocost_cost = 0;
268  #endif
269  #endif
270  #ifdef CONFIG_BLK_INLINE_ENCRYPTION
271  	bio->bi_crypt_context = NULL;
272  #endif
273  #ifdef CONFIG_BLK_DEV_INTEGRITY
274  	bio->bi_integrity = NULL;
275  #endif
276  	bio->bi_vcnt = 0;
277  
278  	atomic_set(&bio->__bi_remaining, 1);
279  	atomic_set(&bio->__bi_cnt, 1);
280  	bio->bi_cookie = BLK_QC_T_NONE;
281  
282  	bio->bi_max_vecs = max_vecs;
283  	bio->bi_io_vec = table;
284  	bio->bi_pool = NULL;
285  }
286  EXPORT_SYMBOL(bio_init);
287  
288  /**
289   * bio_reset - reinitialize a bio
290   * @bio:	bio to reset
291   * @bdev:	block device to use the bio for
292   * @opf:	operation and flags for bio
293   *
294   * Description:
295   *   After calling bio_reset(), @bio will be in the same state as a freshly
296   *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
297   *   preserved are the ones that are initialized by bio_alloc_bioset(). See
298   *   comment in struct bio.
299   */
300  void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
301  {
302  	bio_uninit(bio);
303  	memset(bio, 0, BIO_RESET_BYTES);
304  	atomic_set(&bio->__bi_remaining, 1);
305  	bio->bi_bdev = bdev;
306  	if (bio->bi_bdev)
307  		bio_associate_blkg(bio);
308  	bio->bi_opf = opf;
309  }
310  EXPORT_SYMBOL(bio_reset);
311  
312  static struct bio *__bio_chain_endio(struct bio *bio)
313  {
314  	struct bio *parent = bio->bi_private;
315  
316  	if (bio->bi_status && !parent->bi_status)
317  		parent->bi_status = bio->bi_status;
318  	bio_put(bio);
319  	return parent;
320  }
321  
322  static void bio_chain_endio(struct bio *bio)
323  {
324  	bio_endio(__bio_chain_endio(bio));
325  }
326  
327  /**
328   * bio_chain - chain bio completions
329   * @bio: the target bio
330   * @parent: the parent bio of @bio
331   *
332   * The caller won't have a bi_end_io called when @bio completes - instead,
333   * @parent's bi_end_io won't be called until both @parent and @bio have
334   * completed; the chained bio will also be freed when it completes.
335   *
336   * The caller must not set bi_private or bi_end_io in @bio.
337   */
338  void bio_chain(struct bio *bio, struct bio *parent)
339  {
340  	BUG_ON(bio->bi_private || bio->bi_end_io);
341  
342  	bio->bi_private = parent;
343  	bio->bi_end_io	= bio_chain_endio;
344  	bio_inc_remaining(parent);
345  }
346  EXPORT_SYMBOL(bio_chain);
347  
348  struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
349  		unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
350  {
351  	struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
352  
353  	if (bio) {
354  		bio_chain(bio, new);
355  		submit_bio(bio);
356  	}
357  
358  	return new;
359  }
360  EXPORT_SYMBOL_GPL(blk_next_bio);
361  
362  static void bio_alloc_rescue(struct work_struct *work)
363  {
364  	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
365  	struct bio *bio;
366  
367  	while (1) {
368  		spin_lock(&bs->rescue_lock);
369  		bio = bio_list_pop(&bs->rescue_list);
370  		spin_unlock(&bs->rescue_lock);
371  
372  		if (!bio)
373  			break;
374  
375  		submit_bio_noacct(bio);
376  	}
377  }
378  
379  static void punt_bios_to_rescuer(struct bio_set *bs)
380  {
381  	struct bio_list punt, nopunt;
382  	struct bio *bio;
383  
384  	if (WARN_ON_ONCE(!bs->rescue_workqueue))
385  		return;
386  	/*
387  	 * In order to guarantee forward progress we must punt only bios that
388  	 * were allocated from this bio_set; otherwise, if there was a bio on
389  	 * there for a stacking driver higher up in the stack, processing it
390  	 * could require allocating bios from this bio_set, and doing that from
391  	 * our own rescuer would be bad.
392  	 *
393  	 * Since bio lists are singly linked, pop them all instead of trying to
394  	 * remove from the middle of the list:
395  	 */
396  
397  	bio_list_init(&punt);
398  	bio_list_init(&nopunt);
399  
400  	while ((bio = bio_list_pop(&current->bio_list[0])))
401  		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
402  	current->bio_list[0] = nopunt;
403  
404  	bio_list_init(&nopunt);
405  	while ((bio = bio_list_pop(&current->bio_list[1])))
406  		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
407  	current->bio_list[1] = nopunt;
408  
409  	spin_lock(&bs->rescue_lock);
410  	bio_list_merge(&bs->rescue_list, &punt);
411  	spin_unlock(&bs->rescue_lock);
412  
413  	queue_work(bs->rescue_workqueue, &bs->rescue_work);
414  }
415  
416  static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache)
417  {
418  	unsigned long flags;
419  
420  	/* cache->free_list must be empty */
421  	if (WARN_ON_ONCE(cache->free_list))
422  		return;
423  
424  	local_irq_save(flags);
425  	cache->free_list = cache->free_list_irq;
426  	cache->free_list_irq = NULL;
427  	cache->nr += cache->nr_irq;
428  	cache->nr_irq = 0;
429  	local_irq_restore(flags);
430  }
431  
432  static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
433  		unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
434  		struct bio_set *bs)
435  {
436  	struct bio_alloc_cache *cache;
437  	struct bio *bio;
438  
439  	cache = per_cpu_ptr(bs->cache, get_cpu());
440  	if (!cache->free_list) {
441  		if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD)
442  			bio_alloc_irq_cache_splice(cache);
443  		if (!cache->free_list) {
444  			put_cpu();
445  			return NULL;
446  		}
447  	}
448  	bio = cache->free_list;
449  	cache->free_list = bio->bi_next;
450  	cache->nr--;
451  	put_cpu();
452  
453  	bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf);
454  	bio->bi_pool = bs;
455  	return bio;
456  }
457  
458  /**
459   * bio_alloc_bioset - allocate a bio for I/O
460   * @bdev:	block device to allocate the bio for (can be %NULL)
461   * @nr_vecs:	number of bvecs to pre-allocate
462   * @opf:	operation and flags for bio
463   * @gfp_mask:   the GFP_* mask given to the slab allocator
464   * @bs:		the bio_set to allocate from.
465   *
466   * Allocate a bio from the mempools in @bs.
467   *
468   * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
469   * allocate a bio.  This is due to the mempool guarantees.  To make this work,
470   * callers must never allocate more than 1 bio at a time from the general pool.
471   * Callers that need to allocate more than 1 bio must always submit the
472   * previously allocated bio for IO before attempting to allocate a new one.
473   * Failure to do so can cause deadlocks under memory pressure.
474   *
475   * Note that when running under submit_bio_noacct() (i.e. any block driver),
476   * bios are not submitted until after you return - see the code in
477   * submit_bio_noacct() that converts recursion into iteration, to prevent
478   * stack overflows.
479   *
480   * This would normally mean allocating multiple bios under submit_bio_noacct()
481   * would be susceptible to deadlocks, but we have
482   * deadlock avoidance code that resubmits any blocked bios from a rescuer
483   * thread.
484   *
485   * However, we do not guarantee forward progress for allocations from other
486   * mempools. Doing multiple allocations from the same mempool under
487   * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
488   * for per bio allocations.
489   *
490   * Returns: Pointer to new bio on success, NULL on failure.
491   */
492  struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
493  			     blk_opf_t opf, gfp_t gfp_mask,
494  			     struct bio_set *bs)
495  {
496  	gfp_t saved_gfp = gfp_mask;
497  	struct bio *bio;
498  	void *p;
499  
500  	/* should not use nobvec bioset for nr_vecs > 0 */
501  	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
502  		return NULL;
503  
504  	if (opf & REQ_ALLOC_CACHE) {
505  		if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
506  			bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
507  						     gfp_mask, bs);
508  			if (bio)
509  				return bio;
510  			/*
511  			 * No cached bio available, bio returned below marked with
512  			 * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache.
513  			 */
514  		} else {
515  			opf &= ~REQ_ALLOC_CACHE;
516  		}
517  	}
518  
519  	/*
520  	 * submit_bio_noacct() converts recursion to iteration; this means if
521  	 * we're running beneath it, any bios we allocate and submit will not be
522  	 * submitted (and thus freed) until after we return.
523  	 *
524  	 * This exposes us to a potential deadlock if we allocate multiple bios
525  	 * from the same bio_set() while running underneath submit_bio_noacct().
526  	 * If we were to allocate multiple bios (say a stacking block driver
527  	 * that was splitting bios), we would deadlock if we exhausted the
528  	 * mempool's reserve.
529  	 *
530  	 * We solve this, and guarantee forward progress, with a rescuer
531  	 * workqueue per bio_set. If we go to allocate and there are bios on
532  	 * current->bio_list, we first try the allocation without
533  	 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
534  	 * blocking to the rescuer workqueue before we retry with the original
535  	 * gfp_flags.
536  	 */
537  	if (current->bio_list &&
538  	    (!bio_list_empty(&current->bio_list[0]) ||
539  	     !bio_list_empty(&current->bio_list[1])) &&
540  	    bs->rescue_workqueue)
541  		gfp_mask &= ~__GFP_DIRECT_RECLAIM;
542  
543  	p = mempool_alloc(&bs->bio_pool, gfp_mask);
544  	if (!p && gfp_mask != saved_gfp) {
545  		punt_bios_to_rescuer(bs);
546  		gfp_mask = saved_gfp;
547  		p = mempool_alloc(&bs->bio_pool, gfp_mask);
548  	}
549  	if (unlikely(!p))
550  		return NULL;
551  	if (!mempool_is_saturated(&bs->bio_pool))
552  		opf &= ~REQ_ALLOC_CACHE;
553  
554  	bio = p + bs->front_pad;
555  	if (nr_vecs > BIO_INLINE_VECS) {
556  		struct bio_vec *bvl = NULL;
557  
558  		bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
559  		if (!bvl && gfp_mask != saved_gfp) {
560  			punt_bios_to_rescuer(bs);
561  			gfp_mask = saved_gfp;
562  			bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
563  		}
564  		if (unlikely(!bvl))
565  			goto err_free;
566  
567  		bio_init(bio, bdev, bvl, nr_vecs, opf);
568  	} else if (nr_vecs) {
569  		bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
570  	} else {
571  		bio_init(bio, bdev, NULL, 0, opf);
572  	}
573  
574  	bio->bi_pool = bs;
575  	return bio;
576  
577  err_free:
578  	mempool_free(p, &bs->bio_pool);
579  	return NULL;
580  }
581  EXPORT_SYMBOL(bio_alloc_bioset);
582  
583  /**
584   * bio_kmalloc - kmalloc a bio
585   * @nr_vecs:	number of bio_vecs to allocate
586   * @gfp_mask:   the GFP_* mask given to the slab allocator
587   *
588   * Use kmalloc to allocate a bio (including bvecs).  The bio must be initialized
589   * using bio_init() before use.  To free a bio returned from this function use
590   * kfree() after calling bio_uninit().  A bio returned from this function can
591   * be reused by calling bio_uninit() before calling bio_init() again.
592   *
593   * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
594   * function are not backed by a mempool can fail.  Do not use this function
595   * for allocations in the file system I/O path.
596   *
597   * Returns: Pointer to new bio on success, NULL on failure.
598   */
599  struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
600  {
601  	struct bio *bio;
602  
603  	if (nr_vecs > UIO_MAXIOV)
604  		return NULL;
605  	return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
606  }
607  EXPORT_SYMBOL(bio_kmalloc);
608  
609  void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
610  {
611  	struct bio_vec bv;
612  	struct bvec_iter iter;
613  
614  	__bio_for_each_segment(bv, bio, iter, start)
615  		memzero_bvec(&bv);
616  }
617  EXPORT_SYMBOL(zero_fill_bio_iter);
618  
619  /**
620   * bio_truncate - truncate the bio to small size of @new_size
621   * @bio:	the bio to be truncated
622   * @new_size:	new size for truncating the bio
623   *
624   * Description:
625   *   Truncate the bio to new size of @new_size. If bio_op(bio) is
626   *   REQ_OP_READ, zero the truncated part. This function should only
627   *   be used for handling corner cases, such as bio eod.
628   */
629  static void bio_truncate(struct bio *bio, unsigned new_size)
630  {
631  	struct bio_vec bv;
632  	struct bvec_iter iter;
633  	unsigned int done = 0;
634  	bool truncated = false;
635  
636  	if (new_size >= bio->bi_iter.bi_size)
637  		return;
638  
639  	if (bio_op(bio) != REQ_OP_READ)
640  		goto exit;
641  
642  	bio_for_each_segment(bv, bio, iter) {
643  		if (done + bv.bv_len > new_size) {
644  			unsigned offset;
645  
646  			if (!truncated)
647  				offset = new_size - done;
648  			else
649  				offset = 0;
650  			zero_user(bv.bv_page, bv.bv_offset + offset,
651  				  bv.bv_len - offset);
652  			truncated = true;
653  		}
654  		done += bv.bv_len;
655  	}
656  
657   exit:
658  	/*
659  	 * Don't touch bvec table here and make it really immutable, since
660  	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
661  	 * in its .end_bio() callback.
662  	 *
663  	 * It is enough to truncate bio by updating .bi_size since we can make
664  	 * correct bvec with the updated .bi_size for drivers.
665  	 */
666  	bio->bi_iter.bi_size = new_size;
667  }
668  
669  /**
670   * guard_bio_eod - truncate a BIO to fit the block device
671   * @bio:	bio to truncate
672   *
673   * This allows us to do IO even on the odd last sectors of a device, even if the
674   * block size is some multiple of the physical sector size.
675   *
676   * We'll just truncate the bio to the size of the device, and clear the end of
677   * the buffer head manually.  Truly out-of-range accesses will turn into actual
678   * I/O errors, this only handles the "we need to be able to do I/O at the final
679   * sector" case.
680   */
681  void guard_bio_eod(struct bio *bio)
682  {
683  	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
684  
685  	if (!maxsector)
686  		return;
687  
688  	/*
689  	 * If the *whole* IO is past the end of the device,
690  	 * let it through, and the IO layer will turn it into
691  	 * an EIO.
692  	 */
693  	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
694  		return;
695  
696  	maxsector -= bio->bi_iter.bi_sector;
697  	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
698  		return;
699  
700  	bio_truncate(bio, maxsector << 9);
701  }
702  
703  static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache,
704  				   unsigned int nr)
705  {
706  	unsigned int i = 0;
707  	struct bio *bio;
708  
709  	while ((bio = cache->free_list) != NULL) {
710  		cache->free_list = bio->bi_next;
711  		cache->nr--;
712  		bio_free(bio);
713  		if (++i == nr)
714  			break;
715  	}
716  	return i;
717  }
718  
719  static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
720  				  unsigned int nr)
721  {
722  	nr -= __bio_alloc_cache_prune(cache, nr);
723  	if (!READ_ONCE(cache->free_list)) {
724  		bio_alloc_irq_cache_splice(cache);
725  		__bio_alloc_cache_prune(cache, nr);
726  	}
727  }
728  
729  static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
730  {
731  	struct bio_set *bs;
732  
733  	bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
734  	if (bs->cache) {
735  		struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
736  
737  		bio_alloc_cache_prune(cache, -1U);
738  	}
739  	return 0;
740  }
741  
742  static void bio_alloc_cache_destroy(struct bio_set *bs)
743  {
744  	int cpu;
745  
746  	if (!bs->cache)
747  		return;
748  
749  	cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
750  	for_each_possible_cpu(cpu) {
751  		struct bio_alloc_cache *cache;
752  
753  		cache = per_cpu_ptr(bs->cache, cpu);
754  		bio_alloc_cache_prune(cache, -1U);
755  	}
756  	free_percpu(bs->cache);
757  	bs->cache = NULL;
758  }
759  
760  static inline void bio_put_percpu_cache(struct bio *bio)
761  {
762  	struct bio_alloc_cache *cache;
763  
764  	cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
765  	if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX) {
766  		put_cpu();
767  		bio_free(bio);
768  		return;
769  	}
770  
771  	bio_uninit(bio);
772  
773  	if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) {
774  		bio->bi_next = cache->free_list;
775  		bio->bi_bdev = NULL;
776  		cache->free_list = bio;
777  		cache->nr++;
778  	} else {
779  		unsigned long flags;
780  
781  		local_irq_save(flags);
782  		bio->bi_next = cache->free_list_irq;
783  		cache->free_list_irq = bio;
784  		cache->nr_irq++;
785  		local_irq_restore(flags);
786  	}
787  	put_cpu();
788  }
789  
790  /**
791   * bio_put - release a reference to a bio
792   * @bio:   bio to release reference to
793   *
794   * Description:
795   *   Put a reference to a &struct bio, either one you have gotten with
796   *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
797   **/
798  void bio_put(struct bio *bio)
799  {
800  	if (unlikely(bio_flagged(bio, BIO_REFFED))) {
801  		BUG_ON(!atomic_read(&bio->__bi_cnt));
802  		if (!atomic_dec_and_test(&bio->__bi_cnt))
803  			return;
804  	}
805  	if (bio->bi_opf & REQ_ALLOC_CACHE)
806  		bio_put_percpu_cache(bio);
807  	else
808  		bio_free(bio);
809  }
810  EXPORT_SYMBOL(bio_put);
811  
812  static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
813  {
814  	bio_set_flag(bio, BIO_CLONED);
815  	bio->bi_ioprio = bio_src->bi_ioprio;
816  	bio->bi_iter = bio_src->bi_iter;
817  
818  	if (bio->bi_bdev) {
819  		if (bio->bi_bdev == bio_src->bi_bdev &&
820  		    bio_flagged(bio_src, BIO_REMAPPED))
821  			bio_set_flag(bio, BIO_REMAPPED);
822  		bio_clone_blkg_association(bio, bio_src);
823  	}
824  
825  	if (bio_crypt_clone(bio, bio_src, gfp) < 0)
826  		return -ENOMEM;
827  	if (bio_integrity(bio_src) &&
828  	    bio_integrity_clone(bio, bio_src, gfp) < 0)
829  		return -ENOMEM;
830  	return 0;
831  }
832  
833  /**
834   * bio_alloc_clone - clone a bio that shares the original bio's biovec
835   * @bdev: block_device to clone onto
836   * @bio_src: bio to clone from
837   * @gfp: allocation priority
838   * @bs: bio_set to allocate from
839   *
840   * Allocate a new bio that is a clone of @bio_src. The caller owns the returned
841   * bio, but not the actual data it points to.
842   *
843   * The caller must ensure that the return bio is not freed before @bio_src.
844   */
845  struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
846  		gfp_t gfp, struct bio_set *bs)
847  {
848  	struct bio *bio;
849  
850  	bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
851  	if (!bio)
852  		return NULL;
853  
854  	if (__bio_clone(bio, bio_src, gfp) < 0) {
855  		bio_put(bio);
856  		return NULL;
857  	}
858  	bio->bi_io_vec = bio_src->bi_io_vec;
859  
860  	return bio;
861  }
862  EXPORT_SYMBOL(bio_alloc_clone);
863  
864  /**
865   * bio_init_clone - clone a bio that shares the original bio's biovec
866   * @bdev: block_device to clone onto
867   * @bio: bio to clone into
868   * @bio_src: bio to clone from
869   * @gfp: allocation priority
870   *
871   * Initialize a new bio in caller provided memory that is a clone of @bio_src.
872   * The caller owns the returned bio, but not the actual data it points to.
873   *
874   * The caller must ensure that @bio_src is not freed before @bio.
875   */
876  int bio_init_clone(struct block_device *bdev, struct bio *bio,
877  		struct bio *bio_src, gfp_t gfp)
878  {
879  	int ret;
880  
881  	bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
882  	ret = __bio_clone(bio, bio_src, gfp);
883  	if (ret)
884  		bio_uninit(bio);
885  	return ret;
886  }
887  EXPORT_SYMBOL(bio_init_clone);
888  
889  /**
890   * bio_full - check if the bio is full
891   * @bio:	bio to check
892   * @len:	length of one segment to be added
893   *
894   * Return true if @bio is full and one segment with @len bytes can't be
895   * added to the bio, otherwise return false
896   */
897  static inline bool bio_full(struct bio *bio, unsigned len)
898  {
899  	if (bio->bi_vcnt >= bio->bi_max_vecs)
900  		return true;
901  	if (bio->bi_iter.bi_size > UINT_MAX - len)
902  		return true;
903  	return false;
904  }
905  
906  static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
907  		unsigned int len, unsigned int off, bool *same_page)
908  {
909  	size_t bv_end = bv->bv_offset + bv->bv_len;
910  	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
911  	phys_addr_t page_addr = page_to_phys(page);
912  
913  	if (vec_end_addr + 1 != page_addr + off)
914  		return false;
915  	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
916  		return false;
917  	if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
918  		return false;
919  
920  	*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
921  	if (!*same_page) {
922  		if (IS_ENABLED(CONFIG_KMSAN))
923  			return false;
924  		if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
925  			return false;
926  	}
927  
928  	bv->bv_len += len;
929  	return true;
930  }
931  
932  /*
933   * Try to merge a page into a segment, while obeying the hardware segment
934   * size limit.  This is not for normal read/write bios, but for passthrough
935   * or Zone Append operations that we can't split.
936   */
937  bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
938  		struct page *page, unsigned len, unsigned offset,
939  		bool *same_page)
940  {
941  	unsigned long mask = queue_segment_boundary(q);
942  	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
943  	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
944  
945  	if ((addr1 | mask) != (addr2 | mask))
946  		return false;
947  	if (len > queue_max_segment_size(q) - bv->bv_len)
948  		return false;
949  	return bvec_try_merge_page(bv, page, len, offset, same_page);
950  }
951  
952  /**
953   * bio_add_hw_page - attempt to add a page to a bio with hw constraints
954   * @q: the target queue
955   * @bio: destination bio
956   * @page: page to add
957   * @len: vec entry length
958   * @offset: vec entry offset
959   * @max_sectors: maximum number of sectors that can be added
960   * @same_page: return if the segment has been merged inside the same page
961   *
962   * Add a page to a bio while respecting the hardware max_sectors, max_segment
963   * and gap limitations.
964   */
965  int bio_add_hw_page(struct request_queue *q, struct bio *bio,
966  		struct page *page, unsigned int len, unsigned int offset,
967  		unsigned int max_sectors, bool *same_page)
968  {
969  	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
970  		return 0;
971  
972  	if (((bio->bi_iter.bi_size + len) >> SECTOR_SHIFT) > max_sectors)
973  		return 0;
974  
975  	if (bio->bi_vcnt > 0) {
976  		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
977  
978  		if (bvec_try_merge_hw_page(q, bv, page, len, offset,
979  				same_page)) {
980  			bio->bi_iter.bi_size += len;
981  			return len;
982  		}
983  
984  		if (bio->bi_vcnt >=
985  		    min(bio->bi_max_vecs, queue_max_segments(q)))
986  			return 0;
987  
988  		/*
989  		 * If the queue doesn't support SG gaps and adding this segment
990  		 * would create a gap, disallow it.
991  		 */
992  		if (bvec_gap_to_prev(&q->limits, bv, offset))
993  			return 0;
994  	}
995  
996  	bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset);
997  	bio->bi_vcnt++;
998  	bio->bi_iter.bi_size += len;
999  	return len;
1000  }
1001  
1002  /**
1003   * bio_add_pc_page	- attempt to add page to passthrough bio
1004   * @q: the target queue
1005   * @bio: destination bio
1006   * @page: page to add
1007   * @len: vec entry length
1008   * @offset: vec entry offset
1009   *
1010   * Attempt to add a page to the bio_vec maplist. This can fail for a
1011   * number of reasons, such as the bio being full or target block device
1012   * limitations. The target block device must allow bio's up to PAGE_SIZE,
1013   * so it is always possible to add a single page to an empty bio.
1014   *
1015   * This should only be used by passthrough bios.
1016   */
1017  int bio_add_pc_page(struct request_queue *q, struct bio *bio,
1018  		struct page *page, unsigned int len, unsigned int offset)
1019  {
1020  	bool same_page = false;
1021  	return bio_add_hw_page(q, bio, page, len, offset,
1022  			queue_max_hw_sectors(q), &same_page);
1023  }
1024  EXPORT_SYMBOL(bio_add_pc_page);
1025  
1026  /**
1027   * bio_add_zone_append_page - attempt to add page to zone-append bio
1028   * @bio: destination bio
1029   * @page: page to add
1030   * @len: vec entry length
1031   * @offset: vec entry offset
1032   *
1033   * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
1034   * for a zone-append request. This can fail for a number of reasons, such as the
1035   * bio being full or the target block device is not a zoned block device or
1036   * other limitations of the target block device. The target block device must
1037   * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
1038   * to an empty bio.
1039   *
1040   * Returns: number of bytes added to the bio, or 0 in case of a failure.
1041   */
1042  int bio_add_zone_append_page(struct bio *bio, struct page *page,
1043  			     unsigned int len, unsigned int offset)
1044  {
1045  	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1046  	bool same_page = false;
1047  
1048  	if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
1049  		return 0;
1050  
1051  	if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev)))
1052  		return 0;
1053  
1054  	return bio_add_hw_page(q, bio, page, len, offset,
1055  			       queue_max_zone_append_sectors(q), &same_page);
1056  }
1057  EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
1058  
1059  /**
1060   * __bio_add_page - add page(s) to a bio in a new segment
1061   * @bio: destination bio
1062   * @page: start page to add
1063   * @len: length of the data to add, may cross pages
1064   * @off: offset of the data relative to @page, may cross pages
1065   *
1066   * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
1067   * that @bio has space for another bvec.
1068   */
1069  void __bio_add_page(struct bio *bio, struct page *page,
1070  		unsigned int len, unsigned int off)
1071  {
1072  	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
1073  	WARN_ON_ONCE(bio_full(bio, len));
1074  
1075  	bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off);
1076  	bio->bi_iter.bi_size += len;
1077  	bio->bi_vcnt++;
1078  }
1079  EXPORT_SYMBOL_GPL(__bio_add_page);
1080  
1081  /**
1082   *	bio_add_page	-	attempt to add page(s) to bio
1083   *	@bio: destination bio
1084   *	@page: start page to add
1085   *	@len: vec entry length, may cross pages
1086   *	@offset: vec entry offset relative to @page, may cross pages
1087   *
1088   *	Attempt to add page(s) to the bio_vec maplist. This will only fail
1089   *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1090   */
1091  int bio_add_page(struct bio *bio, struct page *page,
1092  		 unsigned int len, unsigned int offset)
1093  {
1094  	bool same_page = false;
1095  
1096  	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1097  		return 0;
1098  	if (bio->bi_iter.bi_size > UINT_MAX - len)
1099  		return 0;
1100  
1101  	if (bio->bi_vcnt > 0 &&
1102  	    bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
1103  				page, len, offset, &same_page)) {
1104  		bio->bi_iter.bi_size += len;
1105  		return len;
1106  	}
1107  
1108  	if (bio->bi_vcnt >= bio->bi_max_vecs)
1109  		return 0;
1110  	__bio_add_page(bio, page, len, offset);
1111  	return len;
1112  }
1113  EXPORT_SYMBOL(bio_add_page);
1114  
1115  void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
1116  			  size_t off)
1117  {
1118  	WARN_ON_ONCE(len > UINT_MAX);
1119  	WARN_ON_ONCE(off > UINT_MAX);
1120  	__bio_add_page(bio, &folio->page, len, off);
1121  }
1122  
1123  /**
1124   * bio_add_folio - Attempt to add part of a folio to a bio.
1125   * @bio: BIO to add to.
1126   * @folio: Folio to add.
1127   * @len: How many bytes from the folio to add.
1128   * @off: First byte in this folio to add.
1129   *
1130   * Filesystems that use folios can call this function instead of calling
1131   * bio_add_page() for each page in the folio.  If @off is bigger than
1132   * PAGE_SIZE, this function can create a bio_vec that starts in a page
1133   * after the bv_page.  BIOs do not support folios that are 4GiB or larger.
1134   *
1135   * Return: Whether the addition was successful.
1136   */
1137  bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
1138  		   size_t off)
1139  {
1140  	if (len > UINT_MAX || off > UINT_MAX)
1141  		return false;
1142  	return bio_add_page(bio, &folio->page, len, off) > 0;
1143  }
1144  EXPORT_SYMBOL(bio_add_folio);
1145  
1146  void __bio_release_pages(struct bio *bio, bool mark_dirty)
1147  {
1148  	struct folio_iter fi;
1149  
1150  	bio_for_each_folio_all(fi, bio) {
1151  		struct page *page;
1152  		size_t nr_pages;
1153  
1154  		if (mark_dirty) {
1155  			folio_lock(fi.folio);
1156  			folio_mark_dirty(fi.folio);
1157  			folio_unlock(fi.folio);
1158  		}
1159  		page = folio_page(fi.folio, fi.offset / PAGE_SIZE);
1160  		nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
1161  			   fi.offset / PAGE_SIZE + 1;
1162  		do {
1163  			bio_release_page(bio, page++);
1164  		} while (--nr_pages != 0);
1165  	}
1166  }
1167  EXPORT_SYMBOL_GPL(__bio_release_pages);
1168  
1169  void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
1170  {
1171  	size_t size = iov_iter_count(iter);
1172  
1173  	WARN_ON_ONCE(bio->bi_max_vecs);
1174  
1175  	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1176  		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1177  		size_t max_sectors = queue_max_zone_append_sectors(q);
1178  
1179  		size = min(size, max_sectors << SECTOR_SHIFT);
1180  	}
1181  
1182  	bio->bi_vcnt = iter->nr_segs;
1183  	bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1184  	bio->bi_iter.bi_bvec_done = iter->iov_offset;
1185  	bio->bi_iter.bi_size = size;
1186  	bio_set_flag(bio, BIO_CLONED);
1187  }
1188  
1189  static int bio_iov_add_page(struct bio *bio, struct page *page,
1190  		unsigned int len, unsigned int offset)
1191  {
1192  	bool same_page = false;
1193  
1194  	if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len))
1195  		return -EIO;
1196  
1197  	if (bio->bi_vcnt > 0 &&
1198  	    bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
1199  				page, len, offset, &same_page)) {
1200  		bio->bi_iter.bi_size += len;
1201  		if (same_page)
1202  			bio_release_page(bio, page);
1203  		return 0;
1204  	}
1205  	__bio_add_page(bio, page, len, offset);
1206  	return 0;
1207  }
1208  
1209  static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page,
1210  		unsigned int len, unsigned int offset)
1211  {
1212  	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1213  	bool same_page = false;
1214  
1215  	if (bio_add_hw_page(q, bio, page, len, offset,
1216  			queue_max_zone_append_sectors(q), &same_page) != len)
1217  		return -EINVAL;
1218  	if (same_page)
1219  		bio_release_page(bio, page);
1220  	return 0;
1221  }
1222  
1223  #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
1224  
1225  /**
1226   * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
1227   * @bio: bio to add pages to
1228   * @iter: iov iterator describing the region to be mapped
1229   *
1230   * Extracts pages from *iter and appends them to @bio's bvec array.  The pages
1231   * will have to be cleaned up in the way indicated by the BIO_PAGE_PINNED flag.
1232   * For a multi-segment *iter, this function only adds pages from the next
1233   * non-empty segment of the iov iterator.
1234   */
1235  static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1236  {
1237  	iov_iter_extraction_t extraction_flags = 0;
1238  	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1239  	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1240  	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1241  	struct page **pages = (struct page **)bv;
1242  	ssize_t size, left;
1243  	unsigned len, i = 0;
1244  	size_t offset;
1245  	int ret = 0;
1246  
1247  	/*
1248  	 * Move page array up in the allocated memory for the bio vecs as far as
1249  	 * possible so that we can start filling biovecs from the beginning
1250  	 * without overwriting the temporary page array.
1251  	 */
1252  	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1253  	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1254  
1255  	if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
1256  		extraction_flags |= ITER_ALLOW_P2PDMA;
1257  
1258  	/*
1259  	 * Each segment in the iov is required to be a block size multiple.
1260  	 * However, we may not be able to get the entire segment if it spans
1261  	 * more pages than bi_max_vecs allows, so we have to ALIGN_DOWN the
1262  	 * result to ensure the bio's total size is correct. The remainder of
1263  	 * the iov data will be picked up in the next bio iteration.
1264  	 */
1265  	size = iov_iter_extract_pages(iter, &pages,
1266  				      UINT_MAX - bio->bi_iter.bi_size,
1267  				      nr_pages, extraction_flags, &offset);
1268  	if (unlikely(size <= 0))
1269  		return size ? size : -EFAULT;
1270  
1271  	nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
1272  
1273  	if (bio->bi_bdev) {
1274  		size_t trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
1275  		iov_iter_revert(iter, trim);
1276  		size -= trim;
1277  	}
1278  
1279  	if (unlikely(!size)) {
1280  		ret = -EFAULT;
1281  		goto out;
1282  	}
1283  
1284  	for (left = size, i = 0; left > 0; left -= len, i++) {
1285  		struct page *page = pages[i];
1286  
1287  		len = min_t(size_t, PAGE_SIZE - offset, left);
1288  		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1289  			ret = bio_iov_add_zone_append_page(bio, page, len,
1290  					offset);
1291  			if (ret)
1292  				break;
1293  		} else
1294  			bio_iov_add_page(bio, page, len, offset);
1295  
1296  		offset = 0;
1297  	}
1298  
1299  	iov_iter_revert(iter, left);
1300  out:
1301  	while (i < nr_pages)
1302  		bio_release_page(bio, pages[i++]);
1303  
1304  	return ret;
1305  }
1306  
1307  /**
1308   * bio_iov_iter_get_pages - add user or kernel pages to a bio
1309   * @bio: bio to add pages to
1310   * @iter: iov iterator describing the region to be added
1311   *
1312   * This takes either an iterator pointing to user memory, or one pointing to
1313   * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1314   * map them into the kernel. On IO completion, the caller should put those
1315   * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1316   * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1317   * to ensure the bvecs and pages stay referenced until the submitted I/O is
1318   * completed by a call to ->ki_complete() or returns with an error other than
1319   * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1320   * on IO completion. If it isn't, then pages should be released.
1321   *
1322   * The function tries, but does not guarantee, to pin as many pages as
1323   * fit into the bio, or are requested in @iter, whatever is smaller. If
1324   * MM encounters an error pinning the requested pages, it stops. Error
1325   * is returned only if 0 pages could be pinned.
1326   */
1327  int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1328  {
1329  	int ret = 0;
1330  
1331  	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1332  		return -EIO;
1333  
1334  	if (iov_iter_is_bvec(iter)) {
1335  		bio_iov_bvec_set(bio, iter);
1336  		iov_iter_advance(iter, bio->bi_iter.bi_size);
1337  		return 0;
1338  	}
1339  
1340  	if (iov_iter_extract_will_pin(iter))
1341  		bio_set_flag(bio, BIO_PAGE_PINNED);
1342  	do {
1343  		ret = __bio_iov_iter_get_pages(bio, iter);
1344  	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1345  
1346  	return bio->bi_vcnt ? 0 : ret;
1347  }
1348  EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
1349  
1350  static void submit_bio_wait_endio(struct bio *bio)
1351  {
1352  	complete(bio->bi_private);
1353  }
1354  
1355  /**
1356   * submit_bio_wait - submit a bio, and wait until it completes
1357   * @bio: The &struct bio which describes the I/O
1358   *
1359   * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1360   * bio_endio() on failure.
1361   *
1362   * WARNING: Unlike to how submit_bio() is usually used, this function does not
1363   * result in bio reference to be consumed. The caller must drop the reference
1364   * on his own.
1365   */
1366  int submit_bio_wait(struct bio *bio)
1367  {
1368  	DECLARE_COMPLETION_ONSTACK_MAP(done,
1369  			bio->bi_bdev->bd_disk->lockdep_map);
1370  	unsigned long hang_check;
1371  
1372  	bio->bi_private = &done;
1373  	bio->bi_end_io = submit_bio_wait_endio;
1374  	bio->bi_opf |= REQ_SYNC;
1375  	submit_bio(bio);
1376  
1377  	/* Prevent hang_check timer from firing at us during very long I/O */
1378  	hang_check = sysctl_hung_task_timeout_secs;
1379  	if (hang_check)
1380  		while (!wait_for_completion_io_timeout(&done,
1381  					hang_check * (HZ/2)))
1382  			;
1383  	else
1384  		wait_for_completion_io(&done);
1385  
1386  	return blk_status_to_errno(bio->bi_status);
1387  }
1388  EXPORT_SYMBOL(submit_bio_wait);
1389  
1390  void __bio_advance(struct bio *bio, unsigned bytes)
1391  {
1392  	if (bio_integrity(bio))
1393  		bio_integrity_advance(bio, bytes);
1394  
1395  	bio_crypt_advance(bio, bytes);
1396  	bio_advance_iter(bio, &bio->bi_iter, bytes);
1397  }
1398  EXPORT_SYMBOL(__bio_advance);
1399  
1400  void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1401  			struct bio *src, struct bvec_iter *src_iter)
1402  {
1403  	while (src_iter->bi_size && dst_iter->bi_size) {
1404  		struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
1405  		struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
1406  		unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1407  		void *src_buf = bvec_kmap_local(&src_bv);
1408  		void *dst_buf = bvec_kmap_local(&dst_bv);
1409  
1410  		memcpy(dst_buf, src_buf, bytes);
1411  
1412  		kunmap_local(dst_buf);
1413  		kunmap_local(src_buf);
1414  
1415  		bio_advance_iter_single(src, src_iter, bytes);
1416  		bio_advance_iter_single(dst, dst_iter, bytes);
1417  	}
1418  }
1419  EXPORT_SYMBOL(bio_copy_data_iter);
1420  
1421  /**
1422   * bio_copy_data - copy contents of data buffers from one bio to another
1423   * @src: source bio
1424   * @dst: destination bio
1425   *
1426   * Stops when it reaches the end of either @src or @dst - that is, copies
1427   * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1428   */
1429  void bio_copy_data(struct bio *dst, struct bio *src)
1430  {
1431  	struct bvec_iter src_iter = src->bi_iter;
1432  	struct bvec_iter dst_iter = dst->bi_iter;
1433  
1434  	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1435  }
1436  EXPORT_SYMBOL(bio_copy_data);
1437  
1438  void bio_free_pages(struct bio *bio)
1439  {
1440  	struct bio_vec *bvec;
1441  	struct bvec_iter_all iter_all;
1442  
1443  	bio_for_each_segment_all(bvec, bio, iter_all)
1444  		__free_page(bvec->bv_page);
1445  }
1446  EXPORT_SYMBOL(bio_free_pages);
1447  
1448  /*
1449   * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1450   * for performing direct-IO in BIOs.
1451   *
1452   * The problem is that we cannot run folio_mark_dirty() from interrupt context
1453   * because the required locks are not interrupt-safe.  So what we can do is to
1454   * mark the pages dirty _before_ performing IO.  And in interrupt context,
1455   * check that the pages are still dirty.   If so, fine.  If not, redirty them
1456   * in process context.
1457   *
1458   * Note that this code is very hard to test under normal circumstances because
1459   * direct-io pins the pages with get_user_pages().  This makes
1460   * is_page_cache_freeable return false, and the VM will not clean the pages.
1461   * But other code (eg, flusher threads) could clean the pages if they are mapped
1462   * pagecache.
1463   *
1464   * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1465   * deferred bio dirtying paths.
1466   */
1467  
1468  /*
1469   * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1470   */
1471  void bio_set_pages_dirty(struct bio *bio)
1472  {
1473  	struct folio_iter fi;
1474  
1475  	bio_for_each_folio_all(fi, bio) {
1476  		folio_lock(fi.folio);
1477  		folio_mark_dirty(fi.folio);
1478  		folio_unlock(fi.folio);
1479  	}
1480  }
1481  EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
1482  
1483  /*
1484   * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1485   * If they are, then fine.  If, however, some pages are clean then they must
1486   * have been written out during the direct-IO read.  So we take another ref on
1487   * the BIO and re-dirty the pages in process context.
1488   *
1489   * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1490   * here on.  It will unpin each page and will run one bio_put() against the
1491   * BIO.
1492   */
1493  
1494  static void bio_dirty_fn(struct work_struct *work);
1495  
1496  static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1497  static DEFINE_SPINLOCK(bio_dirty_lock);
1498  static struct bio *bio_dirty_list;
1499  
1500  /*
1501   * This runs in process context
1502   */
1503  static void bio_dirty_fn(struct work_struct *work)
1504  {
1505  	struct bio *bio, *next;
1506  
1507  	spin_lock_irq(&bio_dirty_lock);
1508  	next = bio_dirty_list;
1509  	bio_dirty_list = NULL;
1510  	spin_unlock_irq(&bio_dirty_lock);
1511  
1512  	while ((bio = next) != NULL) {
1513  		next = bio->bi_private;
1514  
1515  		bio_release_pages(bio, true);
1516  		bio_put(bio);
1517  	}
1518  }
1519  
1520  void bio_check_pages_dirty(struct bio *bio)
1521  {
1522  	struct folio_iter fi;
1523  	unsigned long flags;
1524  
1525  	bio_for_each_folio_all(fi, bio) {
1526  		if (!folio_test_dirty(fi.folio))
1527  			goto defer;
1528  	}
1529  
1530  	bio_release_pages(bio, false);
1531  	bio_put(bio);
1532  	return;
1533  defer:
1534  	spin_lock_irqsave(&bio_dirty_lock, flags);
1535  	bio->bi_private = bio_dirty_list;
1536  	bio_dirty_list = bio;
1537  	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1538  	schedule_work(&bio_dirty_work);
1539  }
1540  EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
1541  
1542  static inline bool bio_remaining_done(struct bio *bio)
1543  {
1544  	/*
1545  	 * If we're not chaining, then ->__bi_remaining is always 1 and
1546  	 * we always end io on the first invocation.
1547  	 */
1548  	if (!bio_flagged(bio, BIO_CHAIN))
1549  		return true;
1550  
1551  	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1552  
1553  	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1554  		bio_clear_flag(bio, BIO_CHAIN);
1555  		return true;
1556  	}
1557  
1558  	return false;
1559  }
1560  
1561  /**
1562   * bio_endio - end I/O on a bio
1563   * @bio:	bio
1564   *
1565   * Description:
1566   *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1567   *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1568   *   bio unless they own it and thus know that it has an end_io function.
1569   *
1570   *   bio_endio() can be called several times on a bio that has been chained
1571   *   using bio_chain().  The ->bi_end_io() function will only be called the
1572   *   last time.
1573   **/
1574  void bio_endio(struct bio *bio)
1575  {
1576  again:
1577  	if (!bio_remaining_done(bio))
1578  		return;
1579  	if (!bio_integrity_endio(bio))
1580  		return;
1581  
1582  	rq_qos_done_bio(bio);
1583  
1584  	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1585  		trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
1586  		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1587  	}
1588  
1589  	/*
1590  	 * Need to have a real endio function for chained bios, otherwise
1591  	 * various corner cases will break (like stacking block devices that
1592  	 * save/restore bi_end_io) - however, we want to avoid unbounded
1593  	 * recursion and blowing the stack. Tail call optimization would
1594  	 * handle this, but compiling with frame pointers also disables
1595  	 * gcc's sibling call optimization.
1596  	 */
1597  	if (bio->bi_end_io == bio_chain_endio) {
1598  		bio = __bio_chain_endio(bio);
1599  		goto again;
1600  	}
1601  
1602  	blk_throtl_bio_endio(bio);
1603  	/* release cgroup info */
1604  	bio_uninit(bio);
1605  	if (bio->bi_end_io)
1606  		bio->bi_end_io(bio);
1607  }
1608  EXPORT_SYMBOL(bio_endio);
1609  
1610  /**
1611   * bio_split - split a bio
1612   * @bio:	bio to split
1613   * @sectors:	number of sectors to split from the front of @bio
1614   * @gfp:	gfp mask
1615   * @bs:		bio set to allocate from
1616   *
1617   * Allocates and returns a new bio which represents @sectors from the start of
1618   * @bio, and updates @bio to represent the remaining sectors.
1619   *
1620   * Unless this is a discard request the newly allocated bio will point
1621   * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1622   * neither @bio nor @bs are freed before the split bio.
1623   */
1624  struct bio *bio_split(struct bio *bio, int sectors,
1625  		      gfp_t gfp, struct bio_set *bs)
1626  {
1627  	struct bio *split;
1628  
1629  	BUG_ON(sectors <= 0);
1630  	BUG_ON(sectors >= bio_sectors(bio));
1631  
1632  	/* Zone append commands cannot be split */
1633  	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1634  		return NULL;
1635  
1636  	split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
1637  	if (!split)
1638  		return NULL;
1639  
1640  	split->bi_iter.bi_size = sectors << 9;
1641  
1642  	if (bio_integrity(split))
1643  		bio_integrity_trim(split);
1644  
1645  	bio_advance(bio, split->bi_iter.bi_size);
1646  
1647  	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1648  		bio_set_flag(split, BIO_TRACE_COMPLETION);
1649  
1650  	return split;
1651  }
1652  EXPORT_SYMBOL(bio_split);
1653  
1654  /**
1655   * bio_trim - trim a bio
1656   * @bio:	bio to trim
1657   * @offset:	number of sectors to trim from the front of @bio
1658   * @size:	size we want to trim @bio to, in sectors
1659   *
1660   * This function is typically used for bios that are cloned and submitted
1661   * to the underlying device in parts.
1662   */
1663  void bio_trim(struct bio *bio, sector_t offset, sector_t size)
1664  {
1665  	if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
1666  			 offset + size > bio_sectors(bio)))
1667  		return;
1668  
1669  	size <<= 9;
1670  	if (offset == 0 && size == bio->bi_iter.bi_size)
1671  		return;
1672  
1673  	bio_advance(bio, offset << 9);
1674  	bio->bi_iter.bi_size = size;
1675  
1676  	if (bio_integrity(bio))
1677  		bio_integrity_trim(bio);
1678  }
1679  EXPORT_SYMBOL_GPL(bio_trim);
1680  
1681  /*
1682   * create memory pools for biovec's in a bio_set.
1683   * use the global biovec slabs created for general use.
1684   */
1685  int biovec_init_pool(mempool_t *pool, int pool_entries)
1686  {
1687  	struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1688  
1689  	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1690  }
1691  
1692  /*
1693   * bioset_exit - exit a bioset initialized with bioset_init()
1694   *
1695   * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1696   * kzalloc()).
1697   */
1698  void bioset_exit(struct bio_set *bs)
1699  {
1700  	bio_alloc_cache_destroy(bs);
1701  	if (bs->rescue_workqueue)
1702  		destroy_workqueue(bs->rescue_workqueue);
1703  	bs->rescue_workqueue = NULL;
1704  
1705  	mempool_exit(&bs->bio_pool);
1706  	mempool_exit(&bs->bvec_pool);
1707  
1708  	bioset_integrity_free(bs);
1709  	if (bs->bio_slab)
1710  		bio_put_slab(bs);
1711  	bs->bio_slab = NULL;
1712  }
1713  EXPORT_SYMBOL(bioset_exit);
1714  
1715  /**
1716   * bioset_init - Initialize a bio_set
1717   * @bs:		pool to initialize
1718   * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1719   * @front_pad:	Number of bytes to allocate in front of the returned bio
1720   * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1721   *              and %BIOSET_NEED_RESCUER
1722   *
1723   * Description:
1724   *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1725   *    to ask for a number of bytes to be allocated in front of the bio.
1726   *    Front pad allocation is useful for embedding the bio inside
1727   *    another structure, to avoid allocating extra data to go with the bio.
1728   *    Note that the bio must be embedded at the END of that structure always,
1729   *    or things will break badly.
1730   *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1731   *    for allocating iovecs.  This pool is not needed e.g. for bio_init_clone().
1732   *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used
1733   *    to dispatch queued requests when the mempool runs out of space.
1734   *
1735   */
1736  int bioset_init(struct bio_set *bs,
1737  		unsigned int pool_size,
1738  		unsigned int front_pad,
1739  		int flags)
1740  {
1741  	bs->front_pad = front_pad;
1742  	if (flags & BIOSET_NEED_BVECS)
1743  		bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1744  	else
1745  		bs->back_pad = 0;
1746  
1747  	spin_lock_init(&bs->rescue_lock);
1748  	bio_list_init(&bs->rescue_list);
1749  	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1750  
1751  	bs->bio_slab = bio_find_or_create_slab(bs);
1752  	if (!bs->bio_slab)
1753  		return -ENOMEM;
1754  
1755  	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1756  		goto bad;
1757  
1758  	if ((flags & BIOSET_NEED_BVECS) &&
1759  	    biovec_init_pool(&bs->bvec_pool, pool_size))
1760  		goto bad;
1761  
1762  	if (flags & BIOSET_NEED_RESCUER) {
1763  		bs->rescue_workqueue = alloc_workqueue("bioset",
1764  							WQ_MEM_RECLAIM, 0);
1765  		if (!bs->rescue_workqueue)
1766  			goto bad;
1767  	}
1768  	if (flags & BIOSET_PERCPU_CACHE) {
1769  		bs->cache = alloc_percpu(struct bio_alloc_cache);
1770  		if (!bs->cache)
1771  			goto bad;
1772  		cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1773  	}
1774  
1775  	return 0;
1776  bad:
1777  	bioset_exit(bs);
1778  	return -ENOMEM;
1779  }
1780  EXPORT_SYMBOL(bioset_init);
1781  
1782  static int __init init_bio(void)
1783  {
1784  	int i;
1785  
1786  	BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags));
1787  
1788  	bio_integrity_init();
1789  
1790  	for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1791  		struct biovec_slab *bvs = bvec_slabs + i;
1792  
1793  		bvs->slab = kmem_cache_create(bvs->name,
1794  				bvs->nr_vecs * sizeof(struct bio_vec), 0,
1795  				SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1796  	}
1797  
1798  	cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1799  					bio_cpu_dead);
1800  
1801  	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0,
1802  			BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE))
1803  		panic("bio: can't allocate bios\n");
1804  
1805  	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
1806  		panic("bio: can't create integrity pool\n");
1807  
1808  	return 0;
1809  }
1810  subsys_initcall(init_bio);
1811