xref: /openbmc/linux/fs/xfs/xfs_buf.c (revision 552b8b36)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36 
37 #include "xfs_format.h"
38 #include "xfs_log_format.h"
39 #include "xfs_trans_resv.h"
40 #include "xfs_sb.h"
41 #include "xfs_mount.h"
42 #include "xfs_trace.h"
43 #include "xfs_log.h"
44 
45 static kmem_zone_t *xfs_buf_zone;
46 
47 #ifdef XFS_BUF_LOCK_TRACKING
48 # define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)
49 # define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1)
50 # define XB_GET_OWNER(bp)	((bp)->b_last_holder)
51 #else
52 # define XB_SET_OWNER(bp)	do { } while (0)
53 # define XB_CLEAR_OWNER(bp)	do { } while (0)
54 # define XB_GET_OWNER(bp)	do { } while (0)
55 #endif
56 
57 #define xb_to_gfp(flags) \
58 	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
59 
60 
61 static inline int
62 xfs_buf_is_vmapped(
63 	struct xfs_buf	*bp)
64 {
65 	/*
66 	 * Return true if the buffer is vmapped.
67 	 *
68 	 * b_addr is null if the buffer is not mapped, but the code is clever
69 	 * enough to know it doesn't have to map a single page, so the check has
70 	 * to be both for b_addr and bp->b_page_count > 1.
71 	 */
72 	return bp->b_addr && bp->b_page_count > 1;
73 }
74 
75 static inline int
76 xfs_buf_vmap_len(
77 	struct xfs_buf	*bp)
78 {
79 	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
80 }
81 
82 /*
83  * When we mark a buffer stale, we remove the buffer from the LRU and clear the
84  * b_lru_ref count so that the buffer is freed immediately when the buffer
85  * reference count falls to zero. If the buffer is already on the LRU, we need
86  * to remove the reference that LRU holds on the buffer.
87  *
88  * This prevents build-up of stale buffers on the LRU.
89  */
90 void
91 xfs_buf_stale(
92 	struct xfs_buf	*bp)
93 {
94 	ASSERT(xfs_buf_islocked(bp));
95 
96 	bp->b_flags |= XBF_STALE;
97 
98 	/*
99 	 * Clear the delwri status so that a delwri queue walker will not
100 	 * flush this buffer to disk now that it is stale. The delwri queue has
101 	 * a reference to the buffer, so this is safe to do.
102 	 */
103 	bp->b_flags &= ~_XBF_DELWRI_Q;
104 
105 	spin_lock(&bp->b_lock);
106 	atomic_set(&bp->b_lru_ref, 0);
107 	if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
108 	    (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
109 		atomic_dec(&bp->b_hold);
110 
111 	ASSERT(atomic_read(&bp->b_hold) >= 1);
112 	spin_unlock(&bp->b_lock);
113 }
114 
115 static int
116 xfs_buf_get_maps(
117 	struct xfs_buf		*bp,
118 	int			map_count)
119 {
120 	ASSERT(bp->b_maps == NULL);
121 	bp->b_map_count = map_count;
122 
123 	if (map_count == 1) {
124 		bp->b_maps = &bp->__b_map;
125 		return 0;
126 	}
127 
128 	bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
129 				KM_NOFS);
130 	if (!bp->b_maps)
131 		return -ENOMEM;
132 	return 0;
133 }
134 
135 /*
136  *	Frees b_pages if it was allocated.
137  */
138 static void
139 xfs_buf_free_maps(
140 	struct xfs_buf	*bp)
141 {
142 	if (bp->b_maps != &bp->__b_map) {
143 		kmem_free(bp->b_maps);
144 		bp->b_maps = NULL;
145 	}
146 }
147 
148 struct xfs_buf *
149 _xfs_buf_alloc(
150 	struct xfs_buftarg	*target,
151 	struct xfs_buf_map	*map,
152 	int			nmaps,
153 	xfs_buf_flags_t		flags)
154 {
155 	struct xfs_buf		*bp;
156 	int			error;
157 	int			i;
158 
159 	bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
160 	if (unlikely(!bp))
161 		return NULL;
162 
163 	/*
164 	 * We don't want certain flags to appear in b_flags unless they are
165 	 * specifically set by later operations on the buffer.
166 	 */
167 	flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
168 
169 	atomic_set(&bp->b_hold, 1);
170 	atomic_set(&bp->b_lru_ref, 1);
171 	init_completion(&bp->b_iowait);
172 	INIT_LIST_HEAD(&bp->b_lru);
173 	INIT_LIST_HEAD(&bp->b_list);
174 	RB_CLEAR_NODE(&bp->b_rbnode);
175 	sema_init(&bp->b_sema, 0); /* held, no waiters */
176 	spin_lock_init(&bp->b_lock);
177 	XB_SET_OWNER(bp);
178 	bp->b_target = target;
179 	bp->b_flags = flags;
180 
181 	/*
182 	 * Set length and io_length to the same value initially.
183 	 * I/O routines should use io_length, which will be the same in
184 	 * most cases but may be reset (e.g. XFS recovery).
185 	 */
186 	error = xfs_buf_get_maps(bp, nmaps);
187 	if (error)  {
188 		kmem_zone_free(xfs_buf_zone, bp);
189 		return NULL;
190 	}
191 
192 	bp->b_bn = map[0].bm_bn;
193 	bp->b_length = 0;
194 	for (i = 0; i < nmaps; i++) {
195 		bp->b_maps[i].bm_bn = map[i].bm_bn;
196 		bp->b_maps[i].bm_len = map[i].bm_len;
197 		bp->b_length += map[i].bm_len;
198 	}
199 	bp->b_io_length = bp->b_length;
200 
201 	atomic_set(&bp->b_pin_count, 0);
202 	init_waitqueue_head(&bp->b_waiters);
203 
204 	XFS_STATS_INC(xb_create);
205 	trace_xfs_buf_init(bp, _RET_IP_);
206 
207 	return bp;
208 }
209 
210 /*
211  *	Allocate a page array capable of holding a specified number
212  *	of pages, and point the page buf at it.
213  */
214 STATIC int
215 _xfs_buf_get_pages(
216 	xfs_buf_t		*bp,
217 	int			page_count)
218 {
219 	/* Make sure that we have a page list */
220 	if (bp->b_pages == NULL) {
221 		bp->b_page_count = page_count;
222 		if (page_count <= XB_PAGES) {
223 			bp->b_pages = bp->b_page_array;
224 		} else {
225 			bp->b_pages = kmem_alloc(sizeof(struct page *) *
226 						 page_count, KM_NOFS);
227 			if (bp->b_pages == NULL)
228 				return -ENOMEM;
229 		}
230 		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
231 	}
232 	return 0;
233 }
234 
235 /*
236  *	Frees b_pages if it was allocated.
237  */
238 STATIC void
239 _xfs_buf_free_pages(
240 	xfs_buf_t	*bp)
241 {
242 	if (bp->b_pages != bp->b_page_array) {
243 		kmem_free(bp->b_pages);
244 		bp->b_pages = NULL;
245 	}
246 }
247 
248 /*
249  *	Releases the specified buffer.
250  *
251  * 	The modification state of any associated pages is left unchanged.
252  * 	The buffer must not be on any hash - use xfs_buf_rele instead for
253  * 	hashed and refcounted buffers
254  */
255 void
256 xfs_buf_free(
257 	xfs_buf_t		*bp)
258 {
259 	trace_xfs_buf_free(bp, _RET_IP_);
260 
261 	ASSERT(list_empty(&bp->b_lru));
262 
263 	if (bp->b_flags & _XBF_PAGES) {
264 		uint		i;
265 
266 		if (xfs_buf_is_vmapped(bp))
267 			vm_unmap_ram(bp->b_addr - bp->b_offset,
268 					bp->b_page_count);
269 
270 		for (i = 0; i < bp->b_page_count; i++) {
271 			struct page	*page = bp->b_pages[i];
272 
273 			__free_page(page);
274 		}
275 	} else if (bp->b_flags & _XBF_KMEM)
276 		kmem_free(bp->b_addr);
277 	_xfs_buf_free_pages(bp);
278 	xfs_buf_free_maps(bp);
279 	kmem_zone_free(xfs_buf_zone, bp);
280 }
281 
282 /*
283  * Allocates all the pages for buffer in question and builds it's page list.
284  */
285 STATIC int
286 xfs_buf_allocate_memory(
287 	xfs_buf_t		*bp,
288 	uint			flags)
289 {
290 	size_t			size;
291 	size_t			nbytes, offset;
292 	gfp_t			gfp_mask = xb_to_gfp(flags);
293 	unsigned short		page_count, i;
294 	xfs_off_t		start, end;
295 	int			error;
296 
297 	/*
298 	 * for buffers that are contained within a single page, just allocate
299 	 * the memory from the heap - there's no need for the complexity of
300 	 * page arrays to keep allocation down to order 0.
301 	 */
302 	size = BBTOB(bp->b_length);
303 	if (size < PAGE_SIZE) {
304 		bp->b_addr = kmem_alloc(size, KM_NOFS);
305 		if (!bp->b_addr) {
306 			/* low memory - use alloc_page loop instead */
307 			goto use_alloc_page;
308 		}
309 
310 		if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
311 		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
312 			/* b_addr spans two pages - use alloc_page instead */
313 			kmem_free(bp->b_addr);
314 			bp->b_addr = NULL;
315 			goto use_alloc_page;
316 		}
317 		bp->b_offset = offset_in_page(bp->b_addr);
318 		bp->b_pages = bp->b_page_array;
319 		bp->b_pages[0] = virt_to_page(bp->b_addr);
320 		bp->b_page_count = 1;
321 		bp->b_flags |= _XBF_KMEM;
322 		return 0;
323 	}
324 
325 use_alloc_page:
326 	start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
327 	end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
328 								>> PAGE_SHIFT;
329 	page_count = end - start;
330 	error = _xfs_buf_get_pages(bp, page_count);
331 	if (unlikely(error))
332 		return error;
333 
334 	offset = bp->b_offset;
335 	bp->b_flags |= _XBF_PAGES;
336 
337 	for (i = 0; i < bp->b_page_count; i++) {
338 		struct page	*page;
339 		uint		retries = 0;
340 retry:
341 		page = alloc_page(gfp_mask);
342 		if (unlikely(page == NULL)) {
343 			if (flags & XBF_READ_AHEAD) {
344 				bp->b_page_count = i;
345 				error = -ENOMEM;
346 				goto out_free_pages;
347 			}
348 
349 			/*
350 			 * This could deadlock.
351 			 *
352 			 * But until all the XFS lowlevel code is revamped to
353 			 * handle buffer allocation failures we can't do much.
354 			 */
355 			if (!(++retries % 100))
356 				xfs_err(NULL,
357 		"possible memory allocation deadlock in %s (mode:0x%x)",
358 					__func__, gfp_mask);
359 
360 			XFS_STATS_INC(xb_page_retries);
361 			congestion_wait(BLK_RW_ASYNC, HZ/50);
362 			goto retry;
363 		}
364 
365 		XFS_STATS_INC(xb_page_found);
366 
367 		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
368 		size -= nbytes;
369 		bp->b_pages[i] = page;
370 		offset = 0;
371 	}
372 	return 0;
373 
374 out_free_pages:
375 	for (i = 0; i < bp->b_page_count; i++)
376 		__free_page(bp->b_pages[i]);
377 	return error;
378 }
379 
380 /*
381  *	Map buffer into kernel address-space if necessary.
382  */
383 STATIC int
384 _xfs_buf_map_pages(
385 	xfs_buf_t		*bp,
386 	uint			flags)
387 {
388 	ASSERT(bp->b_flags & _XBF_PAGES);
389 	if (bp->b_page_count == 1) {
390 		/* A single page buffer is always mappable */
391 		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
392 	} else if (flags & XBF_UNMAPPED) {
393 		bp->b_addr = NULL;
394 	} else {
395 		int retried = 0;
396 		unsigned noio_flag;
397 
398 		/*
399 		 * vm_map_ram() will allocate auxillary structures (e.g.
400 		 * pagetables) with GFP_KERNEL, yet we are likely to be under
401 		 * GFP_NOFS context here. Hence we need to tell memory reclaim
402 		 * that we are in such a context via PF_MEMALLOC_NOIO to prevent
403 		 * memory reclaim re-entering the filesystem here and
404 		 * potentially deadlocking.
405 		 */
406 		noio_flag = memalloc_noio_save();
407 		do {
408 			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
409 						-1, PAGE_KERNEL);
410 			if (bp->b_addr)
411 				break;
412 			vm_unmap_aliases();
413 		} while (retried++ <= 1);
414 		memalloc_noio_restore(noio_flag);
415 
416 		if (!bp->b_addr)
417 			return -ENOMEM;
418 		bp->b_addr += bp->b_offset;
419 	}
420 
421 	return 0;
422 }
423 
424 /*
425  *	Finding and Reading Buffers
426  */
427 
428 /*
429  *	Look up, and creates if absent, a lockable buffer for
430  *	a given range of an inode.  The buffer is returned
431  *	locked.	No I/O is implied by this call.
432  */
433 xfs_buf_t *
434 _xfs_buf_find(
435 	struct xfs_buftarg	*btp,
436 	struct xfs_buf_map	*map,
437 	int			nmaps,
438 	xfs_buf_flags_t		flags,
439 	xfs_buf_t		*new_bp)
440 {
441 	struct xfs_perag	*pag;
442 	struct rb_node		**rbp;
443 	struct rb_node		*parent;
444 	xfs_buf_t		*bp;
445 	xfs_daddr_t		blkno = map[0].bm_bn;
446 	xfs_daddr_t		eofs;
447 	int			numblks = 0;
448 	int			i;
449 
450 	for (i = 0; i < nmaps; i++)
451 		numblks += map[i].bm_len;
452 
453 	/* Check for IOs smaller than the sector size / not sector aligned */
454 	ASSERT(!(BBTOB(numblks) < btp->bt_meta_sectorsize));
455 	ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));
456 
457 	/*
458 	 * Corrupted block numbers can get through to here, unfortunately, so we
459 	 * have to check that the buffer falls within the filesystem bounds.
460 	 */
461 	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
462 	if (blkno < 0 || blkno >= eofs) {
463 		/*
464 		 * XXX (dgc): we should really be returning -EFSCORRUPTED here,
465 		 * but none of the higher level infrastructure supports
466 		 * returning a specific error on buffer lookup failures.
467 		 */
468 		xfs_alert(btp->bt_mount,
469 			  "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
470 			  __func__, blkno, eofs);
471 		WARN_ON(1);
472 		return NULL;
473 	}
474 
475 	/* get tree root */
476 	pag = xfs_perag_get(btp->bt_mount,
477 				xfs_daddr_to_agno(btp->bt_mount, blkno));
478 
479 	/* walk tree */
480 	spin_lock(&pag->pag_buf_lock);
481 	rbp = &pag->pag_buf_tree.rb_node;
482 	parent = NULL;
483 	bp = NULL;
484 	while (*rbp) {
485 		parent = *rbp;
486 		bp = rb_entry(parent, struct xfs_buf, b_rbnode);
487 
488 		if (blkno < bp->b_bn)
489 			rbp = &(*rbp)->rb_left;
490 		else if (blkno > bp->b_bn)
491 			rbp = &(*rbp)->rb_right;
492 		else {
493 			/*
494 			 * found a block number match. If the range doesn't
495 			 * match, the only way this is allowed is if the buffer
496 			 * in the cache is stale and the transaction that made
497 			 * it stale has not yet committed. i.e. we are
498 			 * reallocating a busy extent. Skip this buffer and
499 			 * continue searching to the right for an exact match.
500 			 */
501 			if (bp->b_length != numblks) {
502 				ASSERT(bp->b_flags & XBF_STALE);
503 				rbp = &(*rbp)->rb_right;
504 				continue;
505 			}
506 			atomic_inc(&bp->b_hold);
507 			goto found;
508 		}
509 	}
510 
511 	/* No match found */
512 	if (new_bp) {
513 		rb_link_node(&new_bp->b_rbnode, parent, rbp);
514 		rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
515 		/* the buffer keeps the perag reference until it is freed */
516 		new_bp->b_pag = pag;
517 		spin_unlock(&pag->pag_buf_lock);
518 	} else {
519 		XFS_STATS_INC(xb_miss_locked);
520 		spin_unlock(&pag->pag_buf_lock);
521 		xfs_perag_put(pag);
522 	}
523 	return new_bp;
524 
525 found:
526 	spin_unlock(&pag->pag_buf_lock);
527 	xfs_perag_put(pag);
528 
529 	if (!xfs_buf_trylock(bp)) {
530 		if (flags & XBF_TRYLOCK) {
531 			xfs_buf_rele(bp);
532 			XFS_STATS_INC(xb_busy_locked);
533 			return NULL;
534 		}
535 		xfs_buf_lock(bp);
536 		XFS_STATS_INC(xb_get_locked_waited);
537 	}
538 
539 	/*
540 	 * if the buffer is stale, clear all the external state associated with
541 	 * it. We need to keep flags such as how we allocated the buffer memory
542 	 * intact here.
543 	 */
544 	if (bp->b_flags & XBF_STALE) {
545 		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
546 		ASSERT(bp->b_iodone == NULL);
547 		bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
548 		bp->b_ops = NULL;
549 	}
550 
551 	trace_xfs_buf_find(bp, flags, _RET_IP_);
552 	XFS_STATS_INC(xb_get_locked);
553 	return bp;
554 }
555 
556 /*
557  * Assembles a buffer covering the specified range. The code is optimised for
558  * cache hits, as metadata intensive workloads will see 3 orders of magnitude
559  * more hits than misses.
560  */
561 struct xfs_buf *
562 xfs_buf_get_map(
563 	struct xfs_buftarg	*target,
564 	struct xfs_buf_map	*map,
565 	int			nmaps,
566 	xfs_buf_flags_t		flags)
567 {
568 	struct xfs_buf		*bp;
569 	struct xfs_buf		*new_bp;
570 	int			error = 0;
571 
572 	bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
573 	if (likely(bp))
574 		goto found;
575 
576 	new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
577 	if (unlikely(!new_bp))
578 		return NULL;
579 
580 	error = xfs_buf_allocate_memory(new_bp, flags);
581 	if (error) {
582 		xfs_buf_free(new_bp);
583 		return NULL;
584 	}
585 
586 	bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
587 	if (!bp) {
588 		xfs_buf_free(new_bp);
589 		return NULL;
590 	}
591 
592 	if (bp != new_bp)
593 		xfs_buf_free(new_bp);
594 
595 found:
596 	if (!bp->b_addr) {
597 		error = _xfs_buf_map_pages(bp, flags);
598 		if (unlikely(error)) {
599 			xfs_warn(target->bt_mount,
600 				"%s: failed to map pagesn", __func__);
601 			xfs_buf_relse(bp);
602 			return NULL;
603 		}
604 	}
605 
606 	XFS_STATS_INC(xb_get);
607 	trace_xfs_buf_get(bp, flags, _RET_IP_);
608 	return bp;
609 }
610 
611 STATIC int
612 _xfs_buf_read(
613 	xfs_buf_t		*bp,
614 	xfs_buf_flags_t		flags)
615 {
616 	ASSERT(!(flags & XBF_WRITE));
617 	ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
618 
619 	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
620 	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
621 
622 	if (flags & XBF_ASYNC) {
623 		xfs_buf_submit(bp);
624 		return 0;
625 	}
626 	return xfs_buf_submit_wait(bp);
627 }
628 
629 xfs_buf_t *
630 xfs_buf_read_map(
631 	struct xfs_buftarg	*target,
632 	struct xfs_buf_map	*map,
633 	int			nmaps,
634 	xfs_buf_flags_t		flags,
635 	const struct xfs_buf_ops *ops)
636 {
637 	struct xfs_buf		*bp;
638 
639 	flags |= XBF_READ;
640 
641 	bp = xfs_buf_get_map(target, map, nmaps, flags);
642 	if (bp) {
643 		trace_xfs_buf_read(bp, flags, _RET_IP_);
644 
645 		if (!XFS_BUF_ISDONE(bp)) {
646 			XFS_STATS_INC(xb_get_read);
647 			bp->b_ops = ops;
648 			_xfs_buf_read(bp, flags);
649 		} else if (flags & XBF_ASYNC) {
650 			/*
651 			 * Read ahead call which is already satisfied,
652 			 * drop the buffer
653 			 */
654 			xfs_buf_relse(bp);
655 			return NULL;
656 		} else {
657 			/* We do not want read in the flags */
658 			bp->b_flags &= ~XBF_READ;
659 		}
660 	}
661 
662 	return bp;
663 }
664 
665 /*
666  *	If we are not low on memory then do the readahead in a deadlock
667  *	safe manner.
668  */
669 void
670 xfs_buf_readahead_map(
671 	struct xfs_buftarg	*target,
672 	struct xfs_buf_map	*map,
673 	int			nmaps,
674 	const struct xfs_buf_ops *ops)
675 {
676 	if (bdi_read_congested(target->bt_bdi))
677 		return;
678 
679 	xfs_buf_read_map(target, map, nmaps,
680 		     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
681 }
682 
683 /*
684  * Read an uncached buffer from disk. Allocates and returns a locked
685  * buffer containing the disk contents or nothing.
686  */
687 int
688 xfs_buf_read_uncached(
689 	struct xfs_buftarg	*target,
690 	xfs_daddr_t		daddr,
691 	size_t			numblks,
692 	int			flags,
693 	struct xfs_buf		**bpp,
694 	const struct xfs_buf_ops *ops)
695 {
696 	struct xfs_buf		*bp;
697 
698 	*bpp = NULL;
699 
700 	bp = xfs_buf_get_uncached(target, numblks, flags);
701 	if (!bp)
702 		return -ENOMEM;
703 
704 	/* set up the buffer for a read IO */
705 	ASSERT(bp->b_map_count == 1);
706 	bp->b_bn = XFS_BUF_DADDR_NULL;  /* always null for uncached buffers */
707 	bp->b_maps[0].bm_bn = daddr;
708 	bp->b_flags |= XBF_READ;
709 	bp->b_ops = ops;
710 
711 	xfs_buf_submit_wait(bp);
712 	if (bp->b_error) {
713 		int	error = bp->b_error;
714 		xfs_buf_relse(bp);
715 		return error;
716 	}
717 
718 	*bpp = bp;
719 	return 0;
720 }
721 
722 /*
723  * Return a buffer allocated as an empty buffer and associated to external
724  * memory via xfs_buf_associate_memory() back to it's empty state.
725  */
726 void
727 xfs_buf_set_empty(
728 	struct xfs_buf		*bp,
729 	size_t			numblks)
730 {
731 	if (bp->b_pages)
732 		_xfs_buf_free_pages(bp);
733 
734 	bp->b_pages = NULL;
735 	bp->b_page_count = 0;
736 	bp->b_addr = NULL;
737 	bp->b_length = numblks;
738 	bp->b_io_length = numblks;
739 
740 	ASSERT(bp->b_map_count == 1);
741 	bp->b_bn = XFS_BUF_DADDR_NULL;
742 	bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
743 	bp->b_maps[0].bm_len = bp->b_length;
744 }
745 
746 static inline struct page *
747 mem_to_page(
748 	void			*addr)
749 {
750 	if ((!is_vmalloc_addr(addr))) {
751 		return virt_to_page(addr);
752 	} else {
753 		return vmalloc_to_page(addr);
754 	}
755 }
756 
757 int
758 xfs_buf_associate_memory(
759 	xfs_buf_t		*bp,
760 	void			*mem,
761 	size_t			len)
762 {
763 	int			rval;
764 	int			i = 0;
765 	unsigned long		pageaddr;
766 	unsigned long		offset;
767 	size_t			buflen;
768 	int			page_count;
769 
770 	pageaddr = (unsigned long)mem & PAGE_MASK;
771 	offset = (unsigned long)mem - pageaddr;
772 	buflen = PAGE_ALIGN(len + offset);
773 	page_count = buflen >> PAGE_SHIFT;
774 
775 	/* Free any previous set of page pointers */
776 	if (bp->b_pages)
777 		_xfs_buf_free_pages(bp);
778 
779 	bp->b_pages = NULL;
780 	bp->b_addr = mem;
781 
782 	rval = _xfs_buf_get_pages(bp, page_count);
783 	if (rval)
784 		return rval;
785 
786 	bp->b_offset = offset;
787 
788 	for (i = 0; i < bp->b_page_count; i++) {
789 		bp->b_pages[i] = mem_to_page((void *)pageaddr);
790 		pageaddr += PAGE_SIZE;
791 	}
792 
793 	bp->b_io_length = BTOBB(len);
794 	bp->b_length = BTOBB(buflen);
795 
796 	return 0;
797 }
798 
799 xfs_buf_t *
800 xfs_buf_get_uncached(
801 	struct xfs_buftarg	*target,
802 	size_t			numblks,
803 	int			flags)
804 {
805 	unsigned long		page_count;
806 	int			error, i;
807 	struct xfs_buf		*bp;
808 	DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
809 
810 	bp = _xfs_buf_alloc(target, &map, 1, 0);
811 	if (unlikely(bp == NULL))
812 		goto fail;
813 
814 	page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
815 	error = _xfs_buf_get_pages(bp, page_count);
816 	if (error)
817 		goto fail_free_buf;
818 
819 	for (i = 0; i < page_count; i++) {
820 		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
821 		if (!bp->b_pages[i])
822 			goto fail_free_mem;
823 	}
824 	bp->b_flags |= _XBF_PAGES;
825 
826 	error = _xfs_buf_map_pages(bp, 0);
827 	if (unlikely(error)) {
828 		xfs_warn(target->bt_mount,
829 			"%s: failed to map pages", __func__);
830 		goto fail_free_mem;
831 	}
832 
833 	trace_xfs_buf_get_uncached(bp, _RET_IP_);
834 	return bp;
835 
836  fail_free_mem:
837 	while (--i >= 0)
838 		__free_page(bp->b_pages[i]);
839 	_xfs_buf_free_pages(bp);
840  fail_free_buf:
841 	xfs_buf_free_maps(bp);
842 	kmem_zone_free(xfs_buf_zone, bp);
843  fail:
844 	return NULL;
845 }
846 
847 /*
848  *	Increment reference count on buffer, to hold the buffer concurrently
849  *	with another thread which may release (free) the buffer asynchronously.
850  *	Must hold the buffer already to call this function.
851  */
852 void
853 xfs_buf_hold(
854 	xfs_buf_t		*bp)
855 {
856 	trace_xfs_buf_hold(bp, _RET_IP_);
857 	atomic_inc(&bp->b_hold);
858 }
859 
860 /*
861  *	Releases a hold on the specified buffer.  If the
862  *	the hold count is 1, calls xfs_buf_free.
863  */
864 void
865 xfs_buf_rele(
866 	xfs_buf_t		*bp)
867 {
868 	struct xfs_perag	*pag = bp->b_pag;
869 
870 	trace_xfs_buf_rele(bp, _RET_IP_);
871 
872 	if (!pag) {
873 		ASSERT(list_empty(&bp->b_lru));
874 		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
875 		if (atomic_dec_and_test(&bp->b_hold))
876 			xfs_buf_free(bp);
877 		return;
878 	}
879 
880 	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
881 
882 	ASSERT(atomic_read(&bp->b_hold) > 0);
883 	if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
884 		spin_lock(&bp->b_lock);
885 		if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
886 			/*
887 			 * If the buffer is added to the LRU take a new
888 			 * reference to the buffer for the LRU and clear the
889 			 * (now stale) dispose list state flag
890 			 */
891 			if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
892 				bp->b_state &= ~XFS_BSTATE_DISPOSE;
893 				atomic_inc(&bp->b_hold);
894 			}
895 			spin_unlock(&bp->b_lock);
896 			spin_unlock(&pag->pag_buf_lock);
897 		} else {
898 			/*
899 			 * most of the time buffers will already be removed from
900 			 * the LRU, so optimise that case by checking for the
901 			 * XFS_BSTATE_DISPOSE flag indicating the last list the
902 			 * buffer was on was the disposal list
903 			 */
904 			if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
905 				list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
906 			} else {
907 				ASSERT(list_empty(&bp->b_lru));
908 			}
909 			spin_unlock(&bp->b_lock);
910 
911 			ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
912 			rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
913 			spin_unlock(&pag->pag_buf_lock);
914 			xfs_perag_put(pag);
915 			xfs_buf_free(bp);
916 		}
917 	}
918 }
919 
920 
921 /*
922  *	Lock a buffer object, if it is not already locked.
923  *
924  *	If we come across a stale, pinned, locked buffer, we know that we are
925  *	being asked to lock a buffer that has been reallocated. Because it is
926  *	pinned, we know that the log has not been pushed to disk and hence it
927  *	will still be locked.  Rather than continuing to have trylock attempts
928  *	fail until someone else pushes the log, push it ourselves before
929  *	returning.  This means that the xfsaild will not get stuck trying
930  *	to push on stale inode buffers.
931  */
932 int
933 xfs_buf_trylock(
934 	struct xfs_buf		*bp)
935 {
936 	int			locked;
937 
938 	locked = down_trylock(&bp->b_sema) == 0;
939 	if (locked)
940 		XB_SET_OWNER(bp);
941 
942 	trace_xfs_buf_trylock(bp, _RET_IP_);
943 	return locked;
944 }
945 
946 /*
947  *	Lock a buffer object.
948  *
949  *	If we come across a stale, pinned, locked buffer, we know that we
950  *	are being asked to lock a buffer that has been reallocated. Because
951  *	it is pinned, we know that the log has not been pushed to disk and
952  *	hence it will still be locked. Rather than sleeping until someone
953  *	else pushes the log, push it ourselves before trying to get the lock.
954  */
955 void
956 xfs_buf_lock(
957 	struct xfs_buf		*bp)
958 {
959 	trace_xfs_buf_lock(bp, _RET_IP_);
960 
961 	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
962 		xfs_log_force(bp->b_target->bt_mount, 0);
963 	down(&bp->b_sema);
964 	XB_SET_OWNER(bp);
965 
966 	trace_xfs_buf_lock_done(bp, _RET_IP_);
967 }
968 
969 void
970 xfs_buf_unlock(
971 	struct xfs_buf		*bp)
972 {
973 	XB_CLEAR_OWNER(bp);
974 	up(&bp->b_sema);
975 
976 	trace_xfs_buf_unlock(bp, _RET_IP_);
977 }
978 
979 STATIC void
980 xfs_buf_wait_unpin(
981 	xfs_buf_t		*bp)
982 {
983 	DECLARE_WAITQUEUE	(wait, current);
984 
985 	if (atomic_read(&bp->b_pin_count) == 0)
986 		return;
987 
988 	add_wait_queue(&bp->b_waiters, &wait);
989 	for (;;) {
990 		set_current_state(TASK_UNINTERRUPTIBLE);
991 		if (atomic_read(&bp->b_pin_count) == 0)
992 			break;
993 		io_schedule();
994 	}
995 	remove_wait_queue(&bp->b_waiters, &wait);
996 	set_current_state(TASK_RUNNING);
997 }
998 
999 /*
1000  *	Buffer Utility Routines
1001  */
1002 
1003 void
1004 xfs_buf_ioend(
1005 	struct xfs_buf	*bp)
1006 {
1007 	bool		read = bp->b_flags & XBF_READ;
1008 
1009 	trace_xfs_buf_iodone(bp, _RET_IP_);
1010 
1011 	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1012 
1013 	/*
1014 	 * Pull in IO completion errors now. We are guaranteed to be running
1015 	 * single threaded, so we don't need the lock to read b_io_error.
1016 	 */
1017 	if (!bp->b_error && bp->b_io_error)
1018 		xfs_buf_ioerror(bp, bp->b_io_error);
1019 
1020 	/* Only validate buffers that were read without errors */
1021 	if (read && !bp->b_error && bp->b_ops) {
1022 		ASSERT(!bp->b_iodone);
1023 		bp->b_ops->verify_read(bp);
1024 	}
1025 
1026 	if (!bp->b_error)
1027 		bp->b_flags |= XBF_DONE;
1028 
1029 	if (bp->b_iodone)
1030 		(*(bp->b_iodone))(bp);
1031 	else if (bp->b_flags & XBF_ASYNC)
1032 		xfs_buf_relse(bp);
1033 	else
1034 		complete(&bp->b_iowait);
1035 }
1036 
1037 static void
1038 xfs_buf_ioend_work(
1039 	struct work_struct	*work)
1040 {
1041 	struct xfs_buf		*bp =
1042 		container_of(work, xfs_buf_t, b_ioend_work);
1043 
1044 	xfs_buf_ioend(bp);
1045 }
1046 
1047 void
1048 xfs_buf_ioend_async(
1049 	struct xfs_buf	*bp)
1050 {
1051 	INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1052 	queue_work(bp->b_ioend_wq, &bp->b_ioend_work);
1053 }
1054 
1055 void
1056 xfs_buf_ioerror(
1057 	xfs_buf_t		*bp,
1058 	int			error)
1059 {
1060 	ASSERT(error <= 0 && error >= -1000);
1061 	bp->b_error = error;
1062 	trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1063 }
1064 
1065 void
1066 xfs_buf_ioerror_alert(
1067 	struct xfs_buf		*bp,
1068 	const char		*func)
1069 {
1070 	xfs_alert(bp->b_target->bt_mount,
1071 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1072 		(__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
1073 }
1074 
1075 int
1076 xfs_bwrite(
1077 	struct xfs_buf		*bp)
1078 {
1079 	int			error;
1080 
1081 	ASSERT(xfs_buf_islocked(bp));
1082 
1083 	bp->b_flags |= XBF_WRITE;
1084 	bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1085 			 XBF_WRITE_FAIL | XBF_DONE);
1086 
1087 	error = xfs_buf_submit_wait(bp);
1088 	if (error) {
1089 		xfs_force_shutdown(bp->b_target->bt_mount,
1090 				   SHUTDOWN_META_IO_ERROR);
1091 	}
1092 	return error;
1093 }
1094 
1095 STATIC void
1096 xfs_buf_bio_end_io(
1097 	struct bio		*bio)
1098 {
1099 	xfs_buf_t		*bp = (xfs_buf_t *)bio->bi_private;
1100 
1101 	/*
1102 	 * don't overwrite existing errors - otherwise we can lose errors on
1103 	 * buffers that require multiple bios to complete.
1104 	 */
1105 	if (bio->bi_error) {
1106 		spin_lock(&bp->b_lock);
1107 		if (!bp->b_io_error)
1108 			bp->b_io_error = bio->bi_error;
1109 		spin_unlock(&bp->b_lock);
1110 	}
1111 
1112 	if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1113 		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1114 
1115 	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1116 		xfs_buf_ioend_async(bp);
1117 	bio_put(bio);
1118 }
1119 
1120 static void
1121 xfs_buf_ioapply_map(
1122 	struct xfs_buf	*bp,
1123 	int		map,
1124 	int		*buf_offset,
1125 	int		*count,
1126 	int		rw)
1127 {
1128 	int		page_index;
1129 	int		total_nr_pages = bp->b_page_count;
1130 	int		nr_pages;
1131 	struct bio	*bio;
1132 	sector_t	sector =  bp->b_maps[map].bm_bn;
1133 	int		size;
1134 	int		offset;
1135 
1136 	total_nr_pages = bp->b_page_count;
1137 
1138 	/* skip the pages in the buffer before the start offset */
1139 	page_index = 0;
1140 	offset = *buf_offset;
1141 	while (offset >= PAGE_SIZE) {
1142 		page_index++;
1143 		offset -= PAGE_SIZE;
1144 	}
1145 
1146 	/*
1147 	 * Limit the IO size to the length of the current vector, and update the
1148 	 * remaining IO count for the next time around.
1149 	 */
1150 	size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1151 	*count -= size;
1152 	*buf_offset += size;
1153 
1154 next_chunk:
1155 	atomic_inc(&bp->b_io_remaining);
1156 	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1157 	if (nr_pages > total_nr_pages)
1158 		nr_pages = total_nr_pages;
1159 
1160 	bio = bio_alloc(GFP_NOIO, nr_pages);
1161 	bio->bi_bdev = bp->b_target->bt_bdev;
1162 	bio->bi_iter.bi_sector = sector;
1163 	bio->bi_end_io = xfs_buf_bio_end_io;
1164 	bio->bi_private = bp;
1165 
1166 
1167 	for (; size && nr_pages; nr_pages--, page_index++) {
1168 		int	rbytes, nbytes = PAGE_SIZE - offset;
1169 
1170 		if (nbytes > size)
1171 			nbytes = size;
1172 
1173 		rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1174 				      offset);
1175 		if (rbytes < nbytes)
1176 			break;
1177 
1178 		offset = 0;
1179 		sector += BTOBB(nbytes);
1180 		size -= nbytes;
1181 		total_nr_pages--;
1182 	}
1183 
1184 	if (likely(bio->bi_iter.bi_size)) {
1185 		if (xfs_buf_is_vmapped(bp)) {
1186 			flush_kernel_vmap_range(bp->b_addr,
1187 						xfs_buf_vmap_len(bp));
1188 		}
1189 		submit_bio(rw, bio);
1190 		if (size)
1191 			goto next_chunk;
1192 	} else {
1193 		/*
1194 		 * This is guaranteed not to be the last io reference count
1195 		 * because the caller (xfs_buf_submit) holds a count itself.
1196 		 */
1197 		atomic_dec(&bp->b_io_remaining);
1198 		xfs_buf_ioerror(bp, -EIO);
1199 		bio_put(bio);
1200 	}
1201 
1202 }
1203 
1204 STATIC void
1205 _xfs_buf_ioapply(
1206 	struct xfs_buf	*bp)
1207 {
1208 	struct blk_plug	plug;
1209 	int		rw;
1210 	int		offset;
1211 	int		size;
1212 	int		i;
1213 
1214 	/*
1215 	 * Make sure we capture only current IO errors rather than stale errors
1216 	 * left over from previous use of the buffer (e.g. failed readahead).
1217 	 */
1218 	bp->b_error = 0;
1219 
1220 	/*
1221 	 * Initialize the I/O completion workqueue if we haven't yet or the
1222 	 * submitter has not opted to specify a custom one.
1223 	 */
1224 	if (!bp->b_ioend_wq)
1225 		bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
1226 
1227 	if (bp->b_flags & XBF_WRITE) {
1228 		if (bp->b_flags & XBF_SYNCIO)
1229 			rw = WRITE_SYNC;
1230 		else
1231 			rw = WRITE;
1232 		if (bp->b_flags & XBF_FUA)
1233 			rw |= REQ_FUA;
1234 		if (bp->b_flags & XBF_FLUSH)
1235 			rw |= REQ_FLUSH;
1236 
1237 		/*
1238 		 * Run the write verifier callback function if it exists. If
1239 		 * this function fails it will mark the buffer with an error and
1240 		 * the IO should not be dispatched.
1241 		 */
1242 		if (bp->b_ops) {
1243 			bp->b_ops->verify_write(bp);
1244 			if (bp->b_error) {
1245 				xfs_force_shutdown(bp->b_target->bt_mount,
1246 						   SHUTDOWN_CORRUPT_INCORE);
1247 				return;
1248 			}
1249 		} else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
1250 			struct xfs_mount *mp = bp->b_target->bt_mount;
1251 
1252 			/*
1253 			 * non-crc filesystems don't attach verifiers during
1254 			 * log recovery, so don't warn for such filesystems.
1255 			 */
1256 			if (xfs_sb_version_hascrc(&mp->m_sb)) {
1257 				xfs_warn(mp,
1258 					"%s: no ops on block 0x%llx/0x%x",
1259 					__func__, bp->b_bn, bp->b_length);
1260 				xfs_hex_dump(bp->b_addr, 64);
1261 				dump_stack();
1262 			}
1263 		}
1264 	} else if (bp->b_flags & XBF_READ_AHEAD) {
1265 		rw = READA;
1266 	} else {
1267 		rw = READ;
1268 	}
1269 
1270 	/* we only use the buffer cache for meta-data */
1271 	rw |= REQ_META;
1272 
1273 	/*
1274 	 * Walk all the vectors issuing IO on them. Set up the initial offset
1275 	 * into the buffer and the desired IO size before we start -
1276 	 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1277 	 * subsequent call.
1278 	 */
1279 	offset = bp->b_offset;
1280 	size = BBTOB(bp->b_io_length);
1281 	blk_start_plug(&plug);
1282 	for (i = 0; i < bp->b_map_count; i++) {
1283 		xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1284 		if (bp->b_error)
1285 			break;
1286 		if (size <= 0)
1287 			break;	/* all done */
1288 	}
1289 	blk_finish_plug(&plug);
1290 }
1291 
1292 /*
1293  * Asynchronous IO submission path. This transfers the buffer lock ownership and
1294  * the current reference to the IO. It is not safe to reference the buffer after
1295  * a call to this function unless the caller holds an additional reference
1296  * itself.
1297  */
1298 void
1299 xfs_buf_submit(
1300 	struct xfs_buf	*bp)
1301 {
1302 	trace_xfs_buf_submit(bp, _RET_IP_);
1303 
1304 	ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1305 	ASSERT(bp->b_flags & XBF_ASYNC);
1306 
1307 	/* on shutdown we stale and complete the buffer immediately */
1308 	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1309 		xfs_buf_ioerror(bp, -EIO);
1310 		bp->b_flags &= ~XBF_DONE;
1311 		xfs_buf_stale(bp);
1312 		xfs_buf_ioend(bp);
1313 		return;
1314 	}
1315 
1316 	if (bp->b_flags & XBF_WRITE)
1317 		xfs_buf_wait_unpin(bp);
1318 
1319 	/* clear the internal error state to avoid spurious errors */
1320 	bp->b_io_error = 0;
1321 
1322 	/*
1323 	 * The caller's reference is released during I/O completion.
1324 	 * This occurs some time after the last b_io_remaining reference is
1325 	 * released, so after we drop our Io reference we have to have some
1326 	 * other reference to ensure the buffer doesn't go away from underneath
1327 	 * us. Take a direct reference to ensure we have safe access to the
1328 	 * buffer until we are finished with it.
1329 	 */
1330 	xfs_buf_hold(bp);
1331 
1332 	/*
1333 	 * Set the count to 1 initially, this will stop an I/O completion
1334 	 * callout which happens before we have started all the I/O from calling
1335 	 * xfs_buf_ioend too early.
1336 	 */
1337 	atomic_set(&bp->b_io_remaining, 1);
1338 	_xfs_buf_ioapply(bp);
1339 
1340 	/*
1341 	 * If _xfs_buf_ioapply failed, we can get back here with only the IO
1342 	 * reference we took above. If we drop it to zero, run completion so
1343 	 * that we don't return to the caller with completion still pending.
1344 	 */
1345 	if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1346 		if (bp->b_error)
1347 			xfs_buf_ioend(bp);
1348 		else
1349 			xfs_buf_ioend_async(bp);
1350 	}
1351 
1352 	xfs_buf_rele(bp);
1353 	/* Note: it is not safe to reference bp now we've dropped our ref */
1354 }
1355 
1356 /*
1357  * Synchronous buffer IO submission path, read or write.
1358  */
1359 int
1360 xfs_buf_submit_wait(
1361 	struct xfs_buf	*bp)
1362 {
1363 	int		error;
1364 
1365 	trace_xfs_buf_submit_wait(bp, _RET_IP_);
1366 
1367 	ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
1368 
1369 	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1370 		xfs_buf_ioerror(bp, -EIO);
1371 		xfs_buf_stale(bp);
1372 		bp->b_flags &= ~XBF_DONE;
1373 		return -EIO;
1374 	}
1375 
1376 	if (bp->b_flags & XBF_WRITE)
1377 		xfs_buf_wait_unpin(bp);
1378 
1379 	/* clear the internal error state to avoid spurious errors */
1380 	bp->b_io_error = 0;
1381 
1382 	/*
1383 	 * For synchronous IO, the IO does not inherit the submitters reference
1384 	 * count, nor the buffer lock. Hence we cannot release the reference we
1385 	 * are about to take until we've waited for all IO completion to occur,
1386 	 * including any xfs_buf_ioend_async() work that may be pending.
1387 	 */
1388 	xfs_buf_hold(bp);
1389 
1390 	/*
1391 	 * Set the count to 1 initially, this will stop an I/O completion
1392 	 * callout which happens before we have started all the I/O from calling
1393 	 * xfs_buf_ioend too early.
1394 	 */
1395 	atomic_set(&bp->b_io_remaining, 1);
1396 	_xfs_buf_ioapply(bp);
1397 
1398 	/*
1399 	 * make sure we run completion synchronously if it raced with us and is
1400 	 * already complete.
1401 	 */
1402 	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1403 		xfs_buf_ioend(bp);
1404 
1405 	/* wait for completion before gathering the error from the buffer */
1406 	trace_xfs_buf_iowait(bp, _RET_IP_);
1407 	wait_for_completion(&bp->b_iowait);
1408 	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1409 	error = bp->b_error;
1410 
1411 	/*
1412 	 * all done now, we can release the hold that keeps the buffer
1413 	 * referenced for the entire IO.
1414 	 */
1415 	xfs_buf_rele(bp);
1416 	return error;
1417 }
1418 
1419 void *
1420 xfs_buf_offset(
1421 	struct xfs_buf		*bp,
1422 	size_t			offset)
1423 {
1424 	struct page		*page;
1425 
1426 	if (bp->b_addr)
1427 		return bp->b_addr + offset;
1428 
1429 	offset += bp->b_offset;
1430 	page = bp->b_pages[offset >> PAGE_SHIFT];
1431 	return page_address(page) + (offset & (PAGE_SIZE-1));
1432 }
1433 
1434 /*
1435  *	Move data into or out of a buffer.
1436  */
1437 void
1438 xfs_buf_iomove(
1439 	xfs_buf_t		*bp,	/* buffer to process		*/
1440 	size_t			boff,	/* starting buffer offset	*/
1441 	size_t			bsize,	/* length to copy		*/
1442 	void			*data,	/* data address			*/
1443 	xfs_buf_rw_t		mode)	/* read/write/zero flag		*/
1444 {
1445 	size_t			bend;
1446 
1447 	bend = boff + bsize;
1448 	while (boff < bend) {
1449 		struct page	*page;
1450 		int		page_index, page_offset, csize;
1451 
1452 		page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1453 		page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1454 		page = bp->b_pages[page_index];
1455 		csize = min_t(size_t, PAGE_SIZE - page_offset,
1456 				      BBTOB(bp->b_io_length) - boff);
1457 
1458 		ASSERT((csize + page_offset) <= PAGE_SIZE);
1459 
1460 		switch (mode) {
1461 		case XBRW_ZERO:
1462 			memset(page_address(page) + page_offset, 0, csize);
1463 			break;
1464 		case XBRW_READ:
1465 			memcpy(data, page_address(page) + page_offset, csize);
1466 			break;
1467 		case XBRW_WRITE:
1468 			memcpy(page_address(page) + page_offset, data, csize);
1469 		}
1470 
1471 		boff += csize;
1472 		data += csize;
1473 	}
1474 }
1475 
1476 /*
1477  *	Handling of buffer targets (buftargs).
1478  */
1479 
1480 /*
1481  * Wait for any bufs with callbacks that have been submitted but have not yet
1482  * returned. These buffers will have an elevated hold count, so wait on those
1483  * while freeing all the buffers only held by the LRU.
1484  */
1485 static enum lru_status
1486 xfs_buftarg_wait_rele(
1487 	struct list_head	*item,
1488 	struct list_lru_one	*lru,
1489 	spinlock_t		*lru_lock,
1490 	void			*arg)
1491 
1492 {
1493 	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1494 	struct list_head	*dispose = arg;
1495 
1496 	if (atomic_read(&bp->b_hold) > 1) {
1497 		/* need to wait, so skip it this pass */
1498 		trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1499 		return LRU_SKIP;
1500 	}
1501 	if (!spin_trylock(&bp->b_lock))
1502 		return LRU_SKIP;
1503 
1504 	/*
1505 	 * clear the LRU reference count so the buffer doesn't get
1506 	 * ignored in xfs_buf_rele().
1507 	 */
1508 	atomic_set(&bp->b_lru_ref, 0);
1509 	bp->b_state |= XFS_BSTATE_DISPOSE;
1510 	list_lru_isolate_move(lru, item, dispose);
1511 	spin_unlock(&bp->b_lock);
1512 	return LRU_REMOVED;
1513 }
1514 
1515 void
1516 xfs_wait_buftarg(
1517 	struct xfs_buftarg	*btp)
1518 {
1519 	LIST_HEAD(dispose);
1520 	int loop = 0;
1521 
1522 	/* loop until there is nothing left on the lru list. */
1523 	while (list_lru_count(&btp->bt_lru)) {
1524 		list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1525 			      &dispose, LONG_MAX);
1526 
1527 		while (!list_empty(&dispose)) {
1528 			struct xfs_buf *bp;
1529 			bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1530 			list_del_init(&bp->b_lru);
1531 			if (bp->b_flags & XBF_WRITE_FAIL) {
1532 				xfs_alert(btp->bt_mount,
1533 "Corruption Alert: Buffer at block 0x%llx had permanent write failures!",
1534 					(long long)bp->b_bn);
1535 				xfs_alert(btp->bt_mount,
1536 "Please run xfs_repair to determine the extent of the problem.");
1537 			}
1538 			xfs_buf_rele(bp);
1539 		}
1540 		if (loop++ != 0)
1541 			delay(100);
1542 	}
1543 }
1544 
1545 static enum lru_status
1546 xfs_buftarg_isolate(
1547 	struct list_head	*item,
1548 	struct list_lru_one	*lru,
1549 	spinlock_t		*lru_lock,
1550 	void			*arg)
1551 {
1552 	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1553 	struct list_head	*dispose = arg;
1554 
1555 	/*
1556 	 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1557 	 * If we fail to get the lock, just skip it.
1558 	 */
1559 	if (!spin_trylock(&bp->b_lock))
1560 		return LRU_SKIP;
1561 	/*
1562 	 * Decrement the b_lru_ref count unless the value is already
1563 	 * zero. If the value is already zero, we need to reclaim the
1564 	 * buffer, otherwise it gets another trip through the LRU.
1565 	 */
1566 	if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1567 		spin_unlock(&bp->b_lock);
1568 		return LRU_ROTATE;
1569 	}
1570 
1571 	bp->b_state |= XFS_BSTATE_DISPOSE;
1572 	list_lru_isolate_move(lru, item, dispose);
1573 	spin_unlock(&bp->b_lock);
1574 	return LRU_REMOVED;
1575 }
1576 
1577 static unsigned long
1578 xfs_buftarg_shrink_scan(
1579 	struct shrinker		*shrink,
1580 	struct shrink_control	*sc)
1581 {
1582 	struct xfs_buftarg	*btp = container_of(shrink,
1583 					struct xfs_buftarg, bt_shrinker);
1584 	LIST_HEAD(dispose);
1585 	unsigned long		freed;
1586 
1587 	freed = list_lru_shrink_walk(&btp->bt_lru, sc,
1588 				     xfs_buftarg_isolate, &dispose);
1589 
1590 	while (!list_empty(&dispose)) {
1591 		struct xfs_buf *bp;
1592 		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1593 		list_del_init(&bp->b_lru);
1594 		xfs_buf_rele(bp);
1595 	}
1596 
1597 	return freed;
1598 }
1599 
1600 static unsigned long
1601 xfs_buftarg_shrink_count(
1602 	struct shrinker		*shrink,
1603 	struct shrink_control	*sc)
1604 {
1605 	struct xfs_buftarg	*btp = container_of(shrink,
1606 					struct xfs_buftarg, bt_shrinker);
1607 	return list_lru_shrink_count(&btp->bt_lru, sc);
1608 }
1609 
1610 void
1611 xfs_free_buftarg(
1612 	struct xfs_mount	*mp,
1613 	struct xfs_buftarg	*btp)
1614 {
1615 	unregister_shrinker(&btp->bt_shrinker);
1616 	list_lru_destroy(&btp->bt_lru);
1617 
1618 	if (mp->m_flags & XFS_MOUNT_BARRIER)
1619 		xfs_blkdev_issue_flush(btp);
1620 
1621 	kmem_free(btp);
1622 }
1623 
1624 int
1625 xfs_setsize_buftarg(
1626 	xfs_buftarg_t		*btp,
1627 	unsigned int		sectorsize)
1628 {
1629 	/* Set up metadata sector size info */
1630 	btp->bt_meta_sectorsize = sectorsize;
1631 	btp->bt_meta_sectormask = sectorsize - 1;
1632 
1633 	if (set_blocksize(btp->bt_bdev, sectorsize)) {
1634 		char name[BDEVNAME_SIZE];
1635 
1636 		bdevname(btp->bt_bdev, name);
1637 
1638 		xfs_warn(btp->bt_mount,
1639 			"Cannot set_blocksize to %u on device %s",
1640 			sectorsize, name);
1641 		return -EINVAL;
1642 	}
1643 
1644 	/* Set up device logical sector size mask */
1645 	btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
1646 	btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
1647 
1648 	return 0;
1649 }
1650 
1651 /*
1652  * When allocating the initial buffer target we have not yet
1653  * read in the superblock, so don't know what sized sectors
1654  * are being used at this early stage.  Play safe.
1655  */
1656 STATIC int
1657 xfs_setsize_buftarg_early(
1658 	xfs_buftarg_t		*btp,
1659 	struct block_device	*bdev)
1660 {
1661 	return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
1662 }
1663 
1664 xfs_buftarg_t *
1665 xfs_alloc_buftarg(
1666 	struct xfs_mount	*mp,
1667 	struct block_device	*bdev)
1668 {
1669 	xfs_buftarg_t		*btp;
1670 
1671 	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
1672 
1673 	btp->bt_mount = mp;
1674 	btp->bt_dev =  bdev->bd_dev;
1675 	btp->bt_bdev = bdev;
1676 	btp->bt_bdi = blk_get_backing_dev_info(bdev);
1677 
1678 	if (xfs_setsize_buftarg_early(btp, bdev))
1679 		goto error;
1680 
1681 	if (list_lru_init(&btp->bt_lru))
1682 		goto error;
1683 
1684 	btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1685 	btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1686 	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1687 	btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1688 	register_shrinker(&btp->bt_shrinker);
1689 	return btp;
1690 
1691 error:
1692 	kmem_free(btp);
1693 	return NULL;
1694 }
1695 
1696 /*
1697  * Add a buffer to the delayed write list.
1698  *
1699  * This queues a buffer for writeout if it hasn't already been.  Note that
1700  * neither this routine nor the buffer list submission functions perform
1701  * any internal synchronization.  It is expected that the lists are thread-local
1702  * to the callers.
1703  *
1704  * Returns true if we queued up the buffer, or false if it already had
1705  * been on the buffer list.
1706  */
1707 bool
1708 xfs_buf_delwri_queue(
1709 	struct xfs_buf		*bp,
1710 	struct list_head	*list)
1711 {
1712 	ASSERT(xfs_buf_islocked(bp));
1713 	ASSERT(!(bp->b_flags & XBF_READ));
1714 
1715 	/*
1716 	 * If the buffer is already marked delwri it already is queued up
1717 	 * by someone else for imediate writeout.  Just ignore it in that
1718 	 * case.
1719 	 */
1720 	if (bp->b_flags & _XBF_DELWRI_Q) {
1721 		trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1722 		return false;
1723 	}
1724 
1725 	trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1726 
1727 	/*
1728 	 * If a buffer gets written out synchronously or marked stale while it
1729 	 * is on a delwri list we lazily remove it. To do this, the other party
1730 	 * clears the  _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1731 	 * It remains referenced and on the list.  In a rare corner case it
1732 	 * might get readded to a delwri list after the synchronous writeout, in
1733 	 * which case we need just need to re-add the flag here.
1734 	 */
1735 	bp->b_flags |= _XBF_DELWRI_Q;
1736 	if (list_empty(&bp->b_list)) {
1737 		atomic_inc(&bp->b_hold);
1738 		list_add_tail(&bp->b_list, list);
1739 	}
1740 
1741 	return true;
1742 }
1743 
1744 /*
1745  * Compare function is more complex than it needs to be because
1746  * the return value is only 32 bits and we are doing comparisons
1747  * on 64 bit values
1748  */
1749 static int
1750 xfs_buf_cmp(
1751 	void		*priv,
1752 	struct list_head *a,
1753 	struct list_head *b)
1754 {
1755 	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
1756 	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
1757 	xfs_daddr_t		diff;
1758 
1759 	diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1760 	if (diff < 0)
1761 		return -1;
1762 	if (diff > 0)
1763 		return 1;
1764 	return 0;
1765 }
1766 
1767 static int
1768 __xfs_buf_delwri_submit(
1769 	struct list_head	*buffer_list,
1770 	struct list_head	*io_list,
1771 	bool			wait)
1772 {
1773 	struct blk_plug		plug;
1774 	struct xfs_buf		*bp, *n;
1775 	int			pinned = 0;
1776 
1777 	list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1778 		if (!wait) {
1779 			if (xfs_buf_ispinned(bp)) {
1780 				pinned++;
1781 				continue;
1782 			}
1783 			if (!xfs_buf_trylock(bp))
1784 				continue;
1785 		} else {
1786 			xfs_buf_lock(bp);
1787 		}
1788 
1789 		/*
1790 		 * Someone else might have written the buffer synchronously or
1791 		 * marked it stale in the meantime.  In that case only the
1792 		 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1793 		 * reference and remove it from the list here.
1794 		 */
1795 		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1796 			list_del_init(&bp->b_list);
1797 			xfs_buf_relse(bp);
1798 			continue;
1799 		}
1800 
1801 		list_move_tail(&bp->b_list, io_list);
1802 		trace_xfs_buf_delwri_split(bp, _RET_IP_);
1803 	}
1804 
1805 	list_sort(NULL, io_list, xfs_buf_cmp);
1806 
1807 	blk_start_plug(&plug);
1808 	list_for_each_entry_safe(bp, n, io_list, b_list) {
1809 		bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
1810 		bp->b_flags |= XBF_WRITE | XBF_ASYNC;
1811 
1812 		/*
1813 		 * we do all Io submission async. This means if we need to wait
1814 		 * for IO completion we need to take an extra reference so the
1815 		 * buffer is still valid on the other side.
1816 		 */
1817 		if (wait)
1818 			xfs_buf_hold(bp);
1819 		else
1820 			list_del_init(&bp->b_list);
1821 
1822 		xfs_buf_submit(bp);
1823 	}
1824 	blk_finish_plug(&plug);
1825 
1826 	return pinned;
1827 }
1828 
1829 /*
1830  * Write out a buffer list asynchronously.
1831  *
1832  * This will take the @buffer_list, write all non-locked and non-pinned buffers
1833  * out and not wait for I/O completion on any of the buffers.  This interface
1834  * is only safely useable for callers that can track I/O completion by higher
1835  * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1836  * function.
1837  */
1838 int
1839 xfs_buf_delwri_submit_nowait(
1840 	struct list_head	*buffer_list)
1841 {
1842 	LIST_HEAD		(io_list);
1843 	return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1844 }
1845 
1846 /*
1847  * Write out a buffer list synchronously.
1848  *
1849  * This will take the @buffer_list, write all buffers out and wait for I/O
1850  * completion on all of the buffers. @buffer_list is consumed by the function,
1851  * so callers must have some other way of tracking buffers if they require such
1852  * functionality.
1853  */
1854 int
1855 xfs_buf_delwri_submit(
1856 	struct list_head	*buffer_list)
1857 {
1858 	LIST_HEAD		(io_list);
1859 	int			error = 0, error2;
1860 	struct xfs_buf		*bp;
1861 
1862 	__xfs_buf_delwri_submit(buffer_list, &io_list, true);
1863 
1864 	/* Wait for IO to complete. */
1865 	while (!list_empty(&io_list)) {
1866 		bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1867 
1868 		list_del_init(&bp->b_list);
1869 
1870 		/* locking the buffer will wait for async IO completion. */
1871 		xfs_buf_lock(bp);
1872 		error2 = bp->b_error;
1873 		xfs_buf_relse(bp);
1874 		if (!error)
1875 			error = error2;
1876 	}
1877 
1878 	return error;
1879 }
1880 
1881 int __init
1882 xfs_buf_init(void)
1883 {
1884 	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1885 						KM_ZONE_HWALIGN, NULL);
1886 	if (!xfs_buf_zone)
1887 		goto out;
1888 
1889 	return 0;
1890 
1891  out:
1892 	return -ENOMEM;
1893 }
1894 
1895 void
1896 xfs_buf_terminate(void)
1897 {
1898 	kmem_zone_destroy(xfs_buf_zone);
1899 }
1900