xref: /openbmc/linux/fs/xfs/xfs_buf.c (revision 840ef8b7)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36 
37 #include "xfs_sb.h"
38 #include "xfs_log.h"
39 #include "xfs_ag.h"
40 #include "xfs_mount.h"
41 #include "xfs_trace.h"
42 
43 static kmem_zone_t *xfs_buf_zone;
44 
45 static struct workqueue_struct *xfslogd_workqueue;
46 
47 #ifdef XFS_BUF_LOCK_TRACKING
48 # define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)
49 # define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1)
50 # define XB_GET_OWNER(bp)	((bp)->b_last_holder)
51 #else
52 # define XB_SET_OWNER(bp)	do { } while (0)
53 # define XB_CLEAR_OWNER(bp)	do { } while (0)
54 # define XB_GET_OWNER(bp)	do { } while (0)
55 #endif
56 
57 #define xb_to_gfp(flags) \
58 	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
59 
60 
61 static inline int
62 xfs_buf_is_vmapped(
63 	struct xfs_buf	*bp)
64 {
65 	/*
66 	 * Return true if the buffer is vmapped.
67 	 *
68 	 * b_addr is null if the buffer is not mapped, but the code is clever
69 	 * enough to know it doesn't have to map a single page, so the check has
70 	 * to be both for b_addr and bp->b_page_count > 1.
71 	 */
72 	return bp->b_addr && bp->b_page_count > 1;
73 }
74 
75 static inline int
76 xfs_buf_vmap_len(
77 	struct xfs_buf	*bp)
78 {
79 	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
80 }
81 
82 /*
83  * xfs_buf_lru_add - add a buffer to the LRU.
84  *
85  * The LRU takes a new reference to the buffer so that it will only be freed
86  * once the shrinker takes the buffer off the LRU.
87  */
88 STATIC void
89 xfs_buf_lru_add(
90 	struct xfs_buf	*bp)
91 {
92 	struct xfs_buftarg *btp = bp->b_target;
93 
94 	spin_lock(&btp->bt_lru_lock);
95 	if (list_empty(&bp->b_lru)) {
96 		atomic_inc(&bp->b_hold);
97 		list_add_tail(&bp->b_lru, &btp->bt_lru);
98 		btp->bt_lru_nr++;
99 		bp->b_lru_flags &= ~_XBF_LRU_DISPOSE;
100 	}
101 	spin_unlock(&btp->bt_lru_lock);
102 }
103 
104 /*
105  * xfs_buf_lru_del - remove a buffer from the LRU
106  *
107  * The unlocked check is safe here because it only occurs when there are not
108  * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
109  * to optimise the shrinker removing the buffer from the LRU and calling
110  * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
111  * bt_lru_lock.
112  */
113 STATIC void
114 xfs_buf_lru_del(
115 	struct xfs_buf	*bp)
116 {
117 	struct xfs_buftarg *btp = bp->b_target;
118 
119 	if (list_empty(&bp->b_lru))
120 		return;
121 
122 	spin_lock(&btp->bt_lru_lock);
123 	if (!list_empty(&bp->b_lru)) {
124 		list_del_init(&bp->b_lru);
125 		btp->bt_lru_nr--;
126 	}
127 	spin_unlock(&btp->bt_lru_lock);
128 }
129 
130 /*
131  * When we mark a buffer stale, we remove the buffer from the LRU and clear the
132  * b_lru_ref count so that the buffer is freed immediately when the buffer
133  * reference count falls to zero. If the buffer is already on the LRU, we need
134  * to remove the reference that LRU holds on the buffer.
135  *
136  * This prevents build-up of stale buffers on the LRU.
137  */
138 void
139 xfs_buf_stale(
140 	struct xfs_buf	*bp)
141 {
142 	ASSERT(xfs_buf_islocked(bp));
143 
144 	bp->b_flags |= XBF_STALE;
145 
146 	/*
147 	 * Clear the delwri status so that a delwri queue walker will not
148 	 * flush this buffer to disk now that it is stale. The delwri queue has
149 	 * a reference to the buffer, so this is safe to do.
150 	 */
151 	bp->b_flags &= ~_XBF_DELWRI_Q;
152 
153 	atomic_set(&(bp)->b_lru_ref, 0);
154 	if (!list_empty(&bp->b_lru)) {
155 		struct xfs_buftarg *btp = bp->b_target;
156 
157 		spin_lock(&btp->bt_lru_lock);
158 		if (!list_empty(&bp->b_lru) &&
159 		    !(bp->b_lru_flags & _XBF_LRU_DISPOSE)) {
160 			list_del_init(&bp->b_lru);
161 			btp->bt_lru_nr--;
162 			atomic_dec(&bp->b_hold);
163 		}
164 		spin_unlock(&btp->bt_lru_lock);
165 	}
166 	ASSERT(atomic_read(&bp->b_hold) >= 1);
167 }
168 
169 static int
170 xfs_buf_get_maps(
171 	struct xfs_buf		*bp,
172 	int			map_count)
173 {
174 	ASSERT(bp->b_maps == NULL);
175 	bp->b_map_count = map_count;
176 
177 	if (map_count == 1) {
178 		bp->b_maps = &bp->__b_map;
179 		return 0;
180 	}
181 
182 	bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
183 				KM_NOFS);
184 	if (!bp->b_maps)
185 		return ENOMEM;
186 	return 0;
187 }
188 
189 /*
190  *	Frees b_pages if it was allocated.
191  */
192 static void
193 xfs_buf_free_maps(
194 	struct xfs_buf	*bp)
195 {
196 	if (bp->b_maps != &bp->__b_map) {
197 		kmem_free(bp->b_maps);
198 		bp->b_maps = NULL;
199 	}
200 }
201 
202 struct xfs_buf *
203 _xfs_buf_alloc(
204 	struct xfs_buftarg	*target,
205 	struct xfs_buf_map	*map,
206 	int			nmaps,
207 	xfs_buf_flags_t		flags)
208 {
209 	struct xfs_buf		*bp;
210 	int			error;
211 	int			i;
212 
213 	bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
214 	if (unlikely(!bp))
215 		return NULL;
216 
217 	/*
218 	 * We don't want certain flags to appear in b_flags unless they are
219 	 * specifically set by later operations on the buffer.
220 	 */
221 	flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
222 
223 	atomic_set(&bp->b_hold, 1);
224 	atomic_set(&bp->b_lru_ref, 1);
225 	init_completion(&bp->b_iowait);
226 	INIT_LIST_HEAD(&bp->b_lru);
227 	INIT_LIST_HEAD(&bp->b_list);
228 	RB_CLEAR_NODE(&bp->b_rbnode);
229 	sema_init(&bp->b_sema, 0); /* held, no waiters */
230 	XB_SET_OWNER(bp);
231 	bp->b_target = target;
232 	bp->b_flags = flags;
233 
234 	/*
235 	 * Set length and io_length to the same value initially.
236 	 * I/O routines should use io_length, which will be the same in
237 	 * most cases but may be reset (e.g. XFS recovery).
238 	 */
239 	error = xfs_buf_get_maps(bp, nmaps);
240 	if (error)  {
241 		kmem_zone_free(xfs_buf_zone, bp);
242 		return NULL;
243 	}
244 
245 	bp->b_bn = map[0].bm_bn;
246 	bp->b_length = 0;
247 	for (i = 0; i < nmaps; i++) {
248 		bp->b_maps[i].bm_bn = map[i].bm_bn;
249 		bp->b_maps[i].bm_len = map[i].bm_len;
250 		bp->b_length += map[i].bm_len;
251 	}
252 	bp->b_io_length = bp->b_length;
253 
254 	atomic_set(&bp->b_pin_count, 0);
255 	init_waitqueue_head(&bp->b_waiters);
256 
257 	XFS_STATS_INC(xb_create);
258 	trace_xfs_buf_init(bp, _RET_IP_);
259 
260 	return bp;
261 }
262 
263 /*
264  *	Allocate a page array capable of holding a specified number
265  *	of pages, and point the page buf at it.
266  */
267 STATIC int
268 _xfs_buf_get_pages(
269 	xfs_buf_t		*bp,
270 	int			page_count,
271 	xfs_buf_flags_t		flags)
272 {
273 	/* Make sure that we have a page list */
274 	if (bp->b_pages == NULL) {
275 		bp->b_page_count = page_count;
276 		if (page_count <= XB_PAGES) {
277 			bp->b_pages = bp->b_page_array;
278 		} else {
279 			bp->b_pages = kmem_alloc(sizeof(struct page *) *
280 						 page_count, KM_NOFS);
281 			if (bp->b_pages == NULL)
282 				return -ENOMEM;
283 		}
284 		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
285 	}
286 	return 0;
287 }
288 
289 /*
290  *	Frees b_pages if it was allocated.
291  */
292 STATIC void
293 _xfs_buf_free_pages(
294 	xfs_buf_t	*bp)
295 {
296 	if (bp->b_pages != bp->b_page_array) {
297 		kmem_free(bp->b_pages);
298 		bp->b_pages = NULL;
299 	}
300 }
301 
302 /*
303  *	Releases the specified buffer.
304  *
305  * 	The modification state of any associated pages is left unchanged.
306  * 	The buffer most not be on any hash - use xfs_buf_rele instead for
307  * 	hashed and refcounted buffers
308  */
309 void
310 xfs_buf_free(
311 	xfs_buf_t		*bp)
312 {
313 	trace_xfs_buf_free(bp, _RET_IP_);
314 
315 	ASSERT(list_empty(&bp->b_lru));
316 
317 	if (bp->b_flags & _XBF_PAGES) {
318 		uint		i;
319 
320 		if (xfs_buf_is_vmapped(bp))
321 			vm_unmap_ram(bp->b_addr - bp->b_offset,
322 					bp->b_page_count);
323 
324 		for (i = 0; i < bp->b_page_count; i++) {
325 			struct page	*page = bp->b_pages[i];
326 
327 			__free_page(page);
328 		}
329 	} else if (bp->b_flags & _XBF_KMEM)
330 		kmem_free(bp->b_addr);
331 	_xfs_buf_free_pages(bp);
332 	xfs_buf_free_maps(bp);
333 	kmem_zone_free(xfs_buf_zone, bp);
334 }
335 
336 /*
337  * Allocates all the pages for buffer in question and builds it's page list.
338  */
339 STATIC int
340 xfs_buf_allocate_memory(
341 	xfs_buf_t		*bp,
342 	uint			flags)
343 {
344 	size_t			size;
345 	size_t			nbytes, offset;
346 	gfp_t			gfp_mask = xb_to_gfp(flags);
347 	unsigned short		page_count, i;
348 	xfs_off_t		start, end;
349 	int			error;
350 
351 	/*
352 	 * for buffers that are contained within a single page, just allocate
353 	 * the memory from the heap - there's no need for the complexity of
354 	 * page arrays to keep allocation down to order 0.
355 	 */
356 	size = BBTOB(bp->b_length);
357 	if (size < PAGE_SIZE) {
358 		bp->b_addr = kmem_alloc(size, KM_NOFS);
359 		if (!bp->b_addr) {
360 			/* low memory - use alloc_page loop instead */
361 			goto use_alloc_page;
362 		}
363 
364 		if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
365 		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
366 			/* b_addr spans two pages - use alloc_page instead */
367 			kmem_free(bp->b_addr);
368 			bp->b_addr = NULL;
369 			goto use_alloc_page;
370 		}
371 		bp->b_offset = offset_in_page(bp->b_addr);
372 		bp->b_pages = bp->b_page_array;
373 		bp->b_pages[0] = virt_to_page(bp->b_addr);
374 		bp->b_page_count = 1;
375 		bp->b_flags |= _XBF_KMEM;
376 		return 0;
377 	}
378 
379 use_alloc_page:
380 	start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
381 	end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
382 								>> PAGE_SHIFT;
383 	page_count = end - start;
384 	error = _xfs_buf_get_pages(bp, page_count, flags);
385 	if (unlikely(error))
386 		return error;
387 
388 	offset = bp->b_offset;
389 	bp->b_flags |= _XBF_PAGES;
390 
391 	for (i = 0; i < bp->b_page_count; i++) {
392 		struct page	*page;
393 		uint		retries = 0;
394 retry:
395 		page = alloc_page(gfp_mask);
396 		if (unlikely(page == NULL)) {
397 			if (flags & XBF_READ_AHEAD) {
398 				bp->b_page_count = i;
399 				error = ENOMEM;
400 				goto out_free_pages;
401 			}
402 
403 			/*
404 			 * This could deadlock.
405 			 *
406 			 * But until all the XFS lowlevel code is revamped to
407 			 * handle buffer allocation failures we can't do much.
408 			 */
409 			if (!(++retries % 100))
410 				xfs_err(NULL,
411 		"possible memory allocation deadlock in %s (mode:0x%x)",
412 					__func__, gfp_mask);
413 
414 			XFS_STATS_INC(xb_page_retries);
415 			congestion_wait(BLK_RW_ASYNC, HZ/50);
416 			goto retry;
417 		}
418 
419 		XFS_STATS_INC(xb_page_found);
420 
421 		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
422 		size -= nbytes;
423 		bp->b_pages[i] = page;
424 		offset = 0;
425 	}
426 	return 0;
427 
428 out_free_pages:
429 	for (i = 0; i < bp->b_page_count; i++)
430 		__free_page(bp->b_pages[i]);
431 	return error;
432 }
433 
434 /*
435  *	Map buffer into kernel address-space if necessary.
436  */
437 STATIC int
438 _xfs_buf_map_pages(
439 	xfs_buf_t		*bp,
440 	uint			flags)
441 {
442 	ASSERT(bp->b_flags & _XBF_PAGES);
443 	if (bp->b_page_count == 1) {
444 		/* A single page buffer is always mappable */
445 		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
446 	} else if (flags & XBF_UNMAPPED) {
447 		bp->b_addr = NULL;
448 	} else {
449 		int retried = 0;
450 
451 		do {
452 			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
453 						-1, PAGE_KERNEL);
454 			if (bp->b_addr)
455 				break;
456 			vm_unmap_aliases();
457 		} while (retried++ <= 1);
458 
459 		if (!bp->b_addr)
460 			return -ENOMEM;
461 		bp->b_addr += bp->b_offset;
462 	}
463 
464 	return 0;
465 }
466 
467 /*
468  *	Finding and Reading Buffers
469  */
470 
471 /*
472  *	Look up, and creates if absent, a lockable buffer for
473  *	a given range of an inode.  The buffer is returned
474  *	locked.	No I/O is implied by this call.
475  */
476 xfs_buf_t *
477 _xfs_buf_find(
478 	struct xfs_buftarg	*btp,
479 	struct xfs_buf_map	*map,
480 	int			nmaps,
481 	xfs_buf_flags_t		flags,
482 	xfs_buf_t		*new_bp)
483 {
484 	size_t			numbytes;
485 	struct xfs_perag	*pag;
486 	struct rb_node		**rbp;
487 	struct rb_node		*parent;
488 	xfs_buf_t		*bp;
489 	xfs_daddr_t		blkno = map[0].bm_bn;
490 	xfs_daddr_t		eofs;
491 	int			numblks = 0;
492 	int			i;
493 
494 	for (i = 0; i < nmaps; i++)
495 		numblks += map[i].bm_len;
496 	numbytes = BBTOB(numblks);
497 
498 	/* Check for IOs smaller than the sector size / not sector aligned */
499 	ASSERT(!(numbytes < (1 << btp->bt_sshift)));
500 	ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
501 
502 	/*
503 	 * Corrupted block numbers can get through to here, unfortunately, so we
504 	 * have to check that the buffer falls within the filesystem bounds.
505 	 */
506 	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
507 	if (blkno >= eofs) {
508 		/*
509 		 * XXX (dgc): we should really be returning EFSCORRUPTED here,
510 		 * but none of the higher level infrastructure supports
511 		 * returning a specific error on buffer lookup failures.
512 		 */
513 		xfs_alert(btp->bt_mount,
514 			  "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
515 			  __func__, blkno, eofs);
516 		return NULL;
517 	}
518 
519 	/* get tree root */
520 	pag = xfs_perag_get(btp->bt_mount,
521 				xfs_daddr_to_agno(btp->bt_mount, blkno));
522 
523 	/* walk tree */
524 	spin_lock(&pag->pag_buf_lock);
525 	rbp = &pag->pag_buf_tree.rb_node;
526 	parent = NULL;
527 	bp = NULL;
528 	while (*rbp) {
529 		parent = *rbp;
530 		bp = rb_entry(parent, struct xfs_buf, b_rbnode);
531 
532 		if (blkno < bp->b_bn)
533 			rbp = &(*rbp)->rb_left;
534 		else if (blkno > bp->b_bn)
535 			rbp = &(*rbp)->rb_right;
536 		else {
537 			/*
538 			 * found a block number match. If the range doesn't
539 			 * match, the only way this is allowed is if the buffer
540 			 * in the cache is stale and the transaction that made
541 			 * it stale has not yet committed. i.e. we are
542 			 * reallocating a busy extent. Skip this buffer and
543 			 * continue searching to the right for an exact match.
544 			 */
545 			if (bp->b_length != numblks) {
546 				ASSERT(bp->b_flags & XBF_STALE);
547 				rbp = &(*rbp)->rb_right;
548 				continue;
549 			}
550 			atomic_inc(&bp->b_hold);
551 			goto found;
552 		}
553 	}
554 
555 	/* No match found */
556 	if (new_bp) {
557 		rb_link_node(&new_bp->b_rbnode, parent, rbp);
558 		rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
559 		/* the buffer keeps the perag reference until it is freed */
560 		new_bp->b_pag = pag;
561 		spin_unlock(&pag->pag_buf_lock);
562 	} else {
563 		XFS_STATS_INC(xb_miss_locked);
564 		spin_unlock(&pag->pag_buf_lock);
565 		xfs_perag_put(pag);
566 	}
567 	return new_bp;
568 
569 found:
570 	spin_unlock(&pag->pag_buf_lock);
571 	xfs_perag_put(pag);
572 
573 	if (!xfs_buf_trylock(bp)) {
574 		if (flags & XBF_TRYLOCK) {
575 			xfs_buf_rele(bp);
576 			XFS_STATS_INC(xb_busy_locked);
577 			return NULL;
578 		}
579 		xfs_buf_lock(bp);
580 		XFS_STATS_INC(xb_get_locked_waited);
581 	}
582 
583 	/*
584 	 * if the buffer is stale, clear all the external state associated with
585 	 * it. We need to keep flags such as how we allocated the buffer memory
586 	 * intact here.
587 	 */
588 	if (bp->b_flags & XBF_STALE) {
589 		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
590 		ASSERT(bp->b_iodone == NULL);
591 		bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
592 		bp->b_ops = NULL;
593 	}
594 
595 	trace_xfs_buf_find(bp, flags, _RET_IP_);
596 	XFS_STATS_INC(xb_get_locked);
597 	return bp;
598 }
599 
600 /*
601  * Assembles a buffer covering the specified range. The code is optimised for
602  * cache hits, as metadata intensive workloads will see 3 orders of magnitude
603  * more hits than misses.
604  */
605 struct xfs_buf *
606 xfs_buf_get_map(
607 	struct xfs_buftarg	*target,
608 	struct xfs_buf_map	*map,
609 	int			nmaps,
610 	xfs_buf_flags_t		flags)
611 {
612 	struct xfs_buf		*bp;
613 	struct xfs_buf		*new_bp;
614 	int			error = 0;
615 
616 	bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
617 	if (likely(bp))
618 		goto found;
619 
620 	new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
621 	if (unlikely(!new_bp))
622 		return NULL;
623 
624 	error = xfs_buf_allocate_memory(new_bp, flags);
625 	if (error) {
626 		xfs_buf_free(new_bp);
627 		return NULL;
628 	}
629 
630 	bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
631 	if (!bp) {
632 		xfs_buf_free(new_bp);
633 		return NULL;
634 	}
635 
636 	if (bp != new_bp)
637 		xfs_buf_free(new_bp);
638 
639 found:
640 	if (!bp->b_addr) {
641 		error = _xfs_buf_map_pages(bp, flags);
642 		if (unlikely(error)) {
643 			xfs_warn(target->bt_mount,
644 				"%s: failed to map pages\n", __func__);
645 			xfs_buf_relse(bp);
646 			return NULL;
647 		}
648 	}
649 
650 	XFS_STATS_INC(xb_get);
651 	trace_xfs_buf_get(bp, flags, _RET_IP_);
652 	return bp;
653 }
654 
655 STATIC int
656 _xfs_buf_read(
657 	xfs_buf_t		*bp,
658 	xfs_buf_flags_t		flags)
659 {
660 	ASSERT(!(flags & XBF_WRITE));
661 	ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
662 
663 	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
664 	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
665 
666 	xfs_buf_iorequest(bp);
667 	if (flags & XBF_ASYNC)
668 		return 0;
669 	return xfs_buf_iowait(bp);
670 }
671 
672 xfs_buf_t *
673 xfs_buf_read_map(
674 	struct xfs_buftarg	*target,
675 	struct xfs_buf_map	*map,
676 	int			nmaps,
677 	xfs_buf_flags_t		flags,
678 	const struct xfs_buf_ops *ops)
679 {
680 	struct xfs_buf		*bp;
681 
682 	flags |= XBF_READ;
683 
684 	bp = xfs_buf_get_map(target, map, nmaps, flags);
685 	if (bp) {
686 		trace_xfs_buf_read(bp, flags, _RET_IP_);
687 
688 		if (!XFS_BUF_ISDONE(bp)) {
689 			XFS_STATS_INC(xb_get_read);
690 			bp->b_ops = ops;
691 			_xfs_buf_read(bp, flags);
692 		} else if (flags & XBF_ASYNC) {
693 			/*
694 			 * Read ahead call which is already satisfied,
695 			 * drop the buffer
696 			 */
697 			xfs_buf_relse(bp);
698 			return NULL;
699 		} else {
700 			/* We do not want read in the flags */
701 			bp->b_flags &= ~XBF_READ;
702 		}
703 	}
704 
705 	return bp;
706 }
707 
708 /*
709  *	If we are not low on memory then do the readahead in a deadlock
710  *	safe manner.
711  */
712 void
713 xfs_buf_readahead_map(
714 	struct xfs_buftarg	*target,
715 	struct xfs_buf_map	*map,
716 	int			nmaps,
717 	const struct xfs_buf_ops *ops)
718 {
719 	if (bdi_read_congested(target->bt_bdi))
720 		return;
721 
722 	xfs_buf_read_map(target, map, nmaps,
723 		     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
724 }
725 
726 /*
727  * Read an uncached buffer from disk. Allocates and returns a locked
728  * buffer containing the disk contents or nothing.
729  */
730 struct xfs_buf *
731 xfs_buf_read_uncached(
732 	struct xfs_buftarg	*target,
733 	xfs_daddr_t		daddr,
734 	size_t			numblks,
735 	int			flags,
736 	const struct xfs_buf_ops *ops)
737 {
738 	struct xfs_buf		*bp;
739 
740 	bp = xfs_buf_get_uncached(target, numblks, flags);
741 	if (!bp)
742 		return NULL;
743 
744 	/* set up the buffer for a read IO */
745 	ASSERT(bp->b_map_count == 1);
746 	bp->b_bn = daddr;
747 	bp->b_maps[0].bm_bn = daddr;
748 	bp->b_flags |= XBF_READ;
749 	bp->b_ops = ops;
750 
751 	xfsbdstrat(target->bt_mount, bp);
752 	xfs_buf_iowait(bp);
753 	return bp;
754 }
755 
756 /*
757  * Return a buffer allocated as an empty buffer and associated to external
758  * memory via xfs_buf_associate_memory() back to it's empty state.
759  */
760 void
761 xfs_buf_set_empty(
762 	struct xfs_buf		*bp,
763 	size_t			numblks)
764 {
765 	if (bp->b_pages)
766 		_xfs_buf_free_pages(bp);
767 
768 	bp->b_pages = NULL;
769 	bp->b_page_count = 0;
770 	bp->b_addr = NULL;
771 	bp->b_length = numblks;
772 	bp->b_io_length = numblks;
773 
774 	ASSERT(bp->b_map_count == 1);
775 	bp->b_bn = XFS_BUF_DADDR_NULL;
776 	bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
777 	bp->b_maps[0].bm_len = bp->b_length;
778 }
779 
780 static inline struct page *
781 mem_to_page(
782 	void			*addr)
783 {
784 	if ((!is_vmalloc_addr(addr))) {
785 		return virt_to_page(addr);
786 	} else {
787 		return vmalloc_to_page(addr);
788 	}
789 }
790 
791 int
792 xfs_buf_associate_memory(
793 	xfs_buf_t		*bp,
794 	void			*mem,
795 	size_t			len)
796 {
797 	int			rval;
798 	int			i = 0;
799 	unsigned long		pageaddr;
800 	unsigned long		offset;
801 	size_t			buflen;
802 	int			page_count;
803 
804 	pageaddr = (unsigned long)mem & PAGE_MASK;
805 	offset = (unsigned long)mem - pageaddr;
806 	buflen = PAGE_ALIGN(len + offset);
807 	page_count = buflen >> PAGE_SHIFT;
808 
809 	/* Free any previous set of page pointers */
810 	if (bp->b_pages)
811 		_xfs_buf_free_pages(bp);
812 
813 	bp->b_pages = NULL;
814 	bp->b_addr = mem;
815 
816 	rval = _xfs_buf_get_pages(bp, page_count, 0);
817 	if (rval)
818 		return rval;
819 
820 	bp->b_offset = offset;
821 
822 	for (i = 0; i < bp->b_page_count; i++) {
823 		bp->b_pages[i] = mem_to_page((void *)pageaddr);
824 		pageaddr += PAGE_SIZE;
825 	}
826 
827 	bp->b_io_length = BTOBB(len);
828 	bp->b_length = BTOBB(buflen);
829 
830 	return 0;
831 }
832 
833 xfs_buf_t *
834 xfs_buf_get_uncached(
835 	struct xfs_buftarg	*target,
836 	size_t			numblks,
837 	int			flags)
838 {
839 	unsigned long		page_count;
840 	int			error, i;
841 	struct xfs_buf		*bp;
842 	DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
843 
844 	bp = _xfs_buf_alloc(target, &map, 1, 0);
845 	if (unlikely(bp == NULL))
846 		goto fail;
847 
848 	page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
849 	error = _xfs_buf_get_pages(bp, page_count, 0);
850 	if (error)
851 		goto fail_free_buf;
852 
853 	for (i = 0; i < page_count; i++) {
854 		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
855 		if (!bp->b_pages[i])
856 			goto fail_free_mem;
857 	}
858 	bp->b_flags |= _XBF_PAGES;
859 
860 	error = _xfs_buf_map_pages(bp, 0);
861 	if (unlikely(error)) {
862 		xfs_warn(target->bt_mount,
863 			"%s: failed to map pages\n", __func__);
864 		goto fail_free_mem;
865 	}
866 
867 	trace_xfs_buf_get_uncached(bp, _RET_IP_);
868 	return bp;
869 
870  fail_free_mem:
871 	while (--i >= 0)
872 		__free_page(bp->b_pages[i]);
873 	_xfs_buf_free_pages(bp);
874  fail_free_buf:
875 	xfs_buf_free_maps(bp);
876 	kmem_zone_free(xfs_buf_zone, bp);
877  fail:
878 	return NULL;
879 }
880 
881 /*
882  *	Increment reference count on buffer, to hold the buffer concurrently
883  *	with another thread which may release (free) the buffer asynchronously.
884  *	Must hold the buffer already to call this function.
885  */
886 void
887 xfs_buf_hold(
888 	xfs_buf_t		*bp)
889 {
890 	trace_xfs_buf_hold(bp, _RET_IP_);
891 	atomic_inc(&bp->b_hold);
892 }
893 
894 /*
895  *	Releases a hold on the specified buffer.  If the
896  *	the hold count is 1, calls xfs_buf_free.
897  */
898 void
899 xfs_buf_rele(
900 	xfs_buf_t		*bp)
901 {
902 	struct xfs_perag	*pag = bp->b_pag;
903 
904 	trace_xfs_buf_rele(bp, _RET_IP_);
905 
906 	if (!pag) {
907 		ASSERT(list_empty(&bp->b_lru));
908 		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
909 		if (atomic_dec_and_test(&bp->b_hold))
910 			xfs_buf_free(bp);
911 		return;
912 	}
913 
914 	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
915 
916 	ASSERT(atomic_read(&bp->b_hold) > 0);
917 	if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
918 		if (!(bp->b_flags & XBF_STALE) &&
919 			   atomic_read(&bp->b_lru_ref)) {
920 			xfs_buf_lru_add(bp);
921 			spin_unlock(&pag->pag_buf_lock);
922 		} else {
923 			xfs_buf_lru_del(bp);
924 			ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
925 			rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
926 			spin_unlock(&pag->pag_buf_lock);
927 			xfs_perag_put(pag);
928 			xfs_buf_free(bp);
929 		}
930 	}
931 }
932 
933 
934 /*
935  *	Lock a buffer object, if it is not already locked.
936  *
937  *	If we come across a stale, pinned, locked buffer, we know that we are
938  *	being asked to lock a buffer that has been reallocated. Because it is
939  *	pinned, we know that the log has not been pushed to disk and hence it
940  *	will still be locked.  Rather than continuing to have trylock attempts
941  *	fail until someone else pushes the log, push it ourselves before
942  *	returning.  This means that the xfsaild will not get stuck trying
943  *	to push on stale inode buffers.
944  */
945 int
946 xfs_buf_trylock(
947 	struct xfs_buf		*bp)
948 {
949 	int			locked;
950 
951 	locked = down_trylock(&bp->b_sema) == 0;
952 	if (locked)
953 		XB_SET_OWNER(bp);
954 
955 	trace_xfs_buf_trylock(bp, _RET_IP_);
956 	return locked;
957 }
958 
959 /*
960  *	Lock a buffer object.
961  *
962  *	If we come across a stale, pinned, locked buffer, we know that we
963  *	are being asked to lock a buffer that has been reallocated. Because
964  *	it is pinned, we know that the log has not been pushed to disk and
965  *	hence it will still be locked. Rather than sleeping until someone
966  *	else pushes the log, push it ourselves before trying to get the lock.
967  */
968 void
969 xfs_buf_lock(
970 	struct xfs_buf		*bp)
971 {
972 	trace_xfs_buf_lock(bp, _RET_IP_);
973 
974 	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
975 		xfs_log_force(bp->b_target->bt_mount, 0);
976 	down(&bp->b_sema);
977 	XB_SET_OWNER(bp);
978 
979 	trace_xfs_buf_lock_done(bp, _RET_IP_);
980 }
981 
982 void
983 xfs_buf_unlock(
984 	struct xfs_buf		*bp)
985 {
986 	XB_CLEAR_OWNER(bp);
987 	up(&bp->b_sema);
988 
989 	trace_xfs_buf_unlock(bp, _RET_IP_);
990 }
991 
992 STATIC void
993 xfs_buf_wait_unpin(
994 	xfs_buf_t		*bp)
995 {
996 	DECLARE_WAITQUEUE	(wait, current);
997 
998 	if (atomic_read(&bp->b_pin_count) == 0)
999 		return;
1000 
1001 	add_wait_queue(&bp->b_waiters, &wait);
1002 	for (;;) {
1003 		set_current_state(TASK_UNINTERRUPTIBLE);
1004 		if (atomic_read(&bp->b_pin_count) == 0)
1005 			break;
1006 		io_schedule();
1007 	}
1008 	remove_wait_queue(&bp->b_waiters, &wait);
1009 	set_current_state(TASK_RUNNING);
1010 }
1011 
1012 /*
1013  *	Buffer Utility Routines
1014  */
1015 
1016 STATIC void
1017 xfs_buf_iodone_work(
1018 	struct work_struct	*work)
1019 {
1020 	struct xfs_buf		*bp =
1021 		container_of(work, xfs_buf_t, b_iodone_work);
1022 	bool			read = !!(bp->b_flags & XBF_READ);
1023 
1024 	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1025 	if (read && bp->b_ops)
1026 		bp->b_ops->verify_read(bp);
1027 
1028 	if (bp->b_iodone)
1029 		(*(bp->b_iodone))(bp);
1030 	else if (bp->b_flags & XBF_ASYNC)
1031 		xfs_buf_relse(bp);
1032 	else {
1033 		ASSERT(read && bp->b_ops);
1034 		complete(&bp->b_iowait);
1035 	}
1036 }
1037 
1038 void
1039 xfs_buf_ioend(
1040 	struct xfs_buf	*bp,
1041 	int		schedule)
1042 {
1043 	bool		read = !!(bp->b_flags & XBF_READ);
1044 
1045 	trace_xfs_buf_iodone(bp, _RET_IP_);
1046 
1047 	if (bp->b_error == 0)
1048 		bp->b_flags |= XBF_DONE;
1049 
1050 	if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
1051 		if (schedule) {
1052 			INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1053 			queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1054 		} else {
1055 			xfs_buf_iodone_work(&bp->b_iodone_work);
1056 		}
1057 	} else {
1058 		bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1059 		complete(&bp->b_iowait);
1060 	}
1061 }
1062 
1063 void
1064 xfs_buf_ioerror(
1065 	xfs_buf_t		*bp,
1066 	int			error)
1067 {
1068 	ASSERT(error >= 0 && error <= 0xffff);
1069 	bp->b_error = (unsigned short)error;
1070 	trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1071 }
1072 
1073 void
1074 xfs_buf_ioerror_alert(
1075 	struct xfs_buf		*bp,
1076 	const char		*func)
1077 {
1078 	xfs_alert(bp->b_target->bt_mount,
1079 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1080 		(__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
1081 }
1082 
1083 /*
1084  * Called when we want to stop a buffer from getting written or read.
1085  * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1086  * so that the proper iodone callbacks get called.
1087  */
1088 STATIC int
1089 xfs_bioerror(
1090 	xfs_buf_t *bp)
1091 {
1092 #ifdef XFSERRORDEBUG
1093 	ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1094 #endif
1095 
1096 	/*
1097 	 * No need to wait until the buffer is unpinned, we aren't flushing it.
1098 	 */
1099 	xfs_buf_ioerror(bp, EIO);
1100 
1101 	/*
1102 	 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1103 	 */
1104 	XFS_BUF_UNREAD(bp);
1105 	XFS_BUF_UNDONE(bp);
1106 	xfs_buf_stale(bp);
1107 
1108 	xfs_buf_ioend(bp, 0);
1109 
1110 	return EIO;
1111 }
1112 
1113 /*
1114  * Same as xfs_bioerror, except that we are releasing the buffer
1115  * here ourselves, and avoiding the xfs_buf_ioend call.
1116  * This is meant for userdata errors; metadata bufs come with
1117  * iodone functions attached, so that we can track down errors.
1118  */
1119 STATIC int
1120 xfs_bioerror_relse(
1121 	struct xfs_buf	*bp)
1122 {
1123 	int64_t		fl = bp->b_flags;
1124 	/*
1125 	 * No need to wait until the buffer is unpinned.
1126 	 * We aren't flushing it.
1127 	 *
1128 	 * chunkhold expects B_DONE to be set, whether
1129 	 * we actually finish the I/O or not. We don't want to
1130 	 * change that interface.
1131 	 */
1132 	XFS_BUF_UNREAD(bp);
1133 	XFS_BUF_DONE(bp);
1134 	xfs_buf_stale(bp);
1135 	bp->b_iodone = NULL;
1136 	if (!(fl & XBF_ASYNC)) {
1137 		/*
1138 		 * Mark b_error and B_ERROR _both_.
1139 		 * Lot's of chunkcache code assumes that.
1140 		 * There's no reason to mark error for
1141 		 * ASYNC buffers.
1142 		 */
1143 		xfs_buf_ioerror(bp, EIO);
1144 		complete(&bp->b_iowait);
1145 	} else {
1146 		xfs_buf_relse(bp);
1147 	}
1148 
1149 	return EIO;
1150 }
1151 
1152 STATIC int
1153 xfs_bdstrat_cb(
1154 	struct xfs_buf	*bp)
1155 {
1156 	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1157 		trace_xfs_bdstrat_shut(bp, _RET_IP_);
1158 		/*
1159 		 * Metadata write that didn't get logged but
1160 		 * written delayed anyway. These aren't associated
1161 		 * with a transaction, and can be ignored.
1162 		 */
1163 		if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1164 			return xfs_bioerror_relse(bp);
1165 		else
1166 			return xfs_bioerror(bp);
1167 	}
1168 
1169 	xfs_buf_iorequest(bp);
1170 	return 0;
1171 }
1172 
1173 int
1174 xfs_bwrite(
1175 	struct xfs_buf		*bp)
1176 {
1177 	int			error;
1178 
1179 	ASSERT(xfs_buf_islocked(bp));
1180 
1181 	bp->b_flags |= XBF_WRITE;
1182 	bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
1183 
1184 	xfs_bdstrat_cb(bp);
1185 
1186 	error = xfs_buf_iowait(bp);
1187 	if (error) {
1188 		xfs_force_shutdown(bp->b_target->bt_mount,
1189 				   SHUTDOWN_META_IO_ERROR);
1190 	}
1191 	return error;
1192 }
1193 
1194 /*
1195  * Wrapper around bdstrat so that we can stop data from going to disk in case
1196  * we are shutting down the filesystem.  Typically user data goes thru this
1197  * path; one of the exceptions is the superblock.
1198  */
1199 void
1200 xfsbdstrat(
1201 	struct xfs_mount	*mp,
1202 	struct xfs_buf		*bp)
1203 {
1204 	if (XFS_FORCED_SHUTDOWN(mp)) {
1205 		trace_xfs_bdstrat_shut(bp, _RET_IP_);
1206 		xfs_bioerror_relse(bp);
1207 		return;
1208 	}
1209 
1210 	xfs_buf_iorequest(bp);
1211 }
1212 
1213 STATIC void
1214 _xfs_buf_ioend(
1215 	xfs_buf_t		*bp,
1216 	int			schedule)
1217 {
1218 	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1219 		xfs_buf_ioend(bp, schedule);
1220 }
1221 
1222 STATIC void
1223 xfs_buf_bio_end_io(
1224 	struct bio		*bio,
1225 	int			error)
1226 {
1227 	xfs_buf_t		*bp = (xfs_buf_t *)bio->bi_private;
1228 
1229 	/*
1230 	 * don't overwrite existing errors - otherwise we can lose errors on
1231 	 * buffers that require multiple bios to complete.
1232 	 */
1233 	if (!bp->b_error)
1234 		xfs_buf_ioerror(bp, -error);
1235 
1236 	if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1237 		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1238 
1239 	_xfs_buf_ioend(bp, 1);
1240 	bio_put(bio);
1241 }
1242 
1243 static void
1244 xfs_buf_ioapply_map(
1245 	struct xfs_buf	*bp,
1246 	int		map,
1247 	int		*buf_offset,
1248 	int		*count,
1249 	int		rw)
1250 {
1251 	int		page_index;
1252 	int		total_nr_pages = bp->b_page_count;
1253 	int		nr_pages;
1254 	struct bio	*bio;
1255 	sector_t	sector =  bp->b_maps[map].bm_bn;
1256 	int		size;
1257 	int		offset;
1258 
1259 	total_nr_pages = bp->b_page_count;
1260 
1261 	/* skip the pages in the buffer before the start offset */
1262 	page_index = 0;
1263 	offset = *buf_offset;
1264 	while (offset >= PAGE_SIZE) {
1265 		page_index++;
1266 		offset -= PAGE_SIZE;
1267 	}
1268 
1269 	/*
1270 	 * Limit the IO size to the length of the current vector, and update the
1271 	 * remaining IO count for the next time around.
1272 	 */
1273 	size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1274 	*count -= size;
1275 	*buf_offset += size;
1276 
1277 next_chunk:
1278 	atomic_inc(&bp->b_io_remaining);
1279 	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1280 	if (nr_pages > total_nr_pages)
1281 		nr_pages = total_nr_pages;
1282 
1283 	bio = bio_alloc(GFP_NOIO, nr_pages);
1284 	bio->bi_bdev = bp->b_target->bt_bdev;
1285 	bio->bi_sector = sector;
1286 	bio->bi_end_io = xfs_buf_bio_end_io;
1287 	bio->bi_private = bp;
1288 
1289 
1290 	for (; size && nr_pages; nr_pages--, page_index++) {
1291 		int	rbytes, nbytes = PAGE_SIZE - offset;
1292 
1293 		if (nbytes > size)
1294 			nbytes = size;
1295 
1296 		rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1297 				      offset);
1298 		if (rbytes < nbytes)
1299 			break;
1300 
1301 		offset = 0;
1302 		sector += BTOBB(nbytes);
1303 		size -= nbytes;
1304 		total_nr_pages--;
1305 	}
1306 
1307 	if (likely(bio->bi_size)) {
1308 		if (xfs_buf_is_vmapped(bp)) {
1309 			flush_kernel_vmap_range(bp->b_addr,
1310 						xfs_buf_vmap_len(bp));
1311 		}
1312 		submit_bio(rw, bio);
1313 		if (size)
1314 			goto next_chunk;
1315 	} else {
1316 		/*
1317 		 * This is guaranteed not to be the last io reference count
1318 		 * because the caller (xfs_buf_iorequest) holds a count itself.
1319 		 */
1320 		atomic_dec(&bp->b_io_remaining);
1321 		xfs_buf_ioerror(bp, EIO);
1322 		bio_put(bio);
1323 	}
1324 
1325 }
1326 
1327 STATIC void
1328 _xfs_buf_ioapply(
1329 	struct xfs_buf	*bp)
1330 {
1331 	struct blk_plug	plug;
1332 	int		rw;
1333 	int		offset;
1334 	int		size;
1335 	int		i;
1336 
1337 	if (bp->b_flags & XBF_WRITE) {
1338 		if (bp->b_flags & XBF_SYNCIO)
1339 			rw = WRITE_SYNC;
1340 		else
1341 			rw = WRITE;
1342 		if (bp->b_flags & XBF_FUA)
1343 			rw |= REQ_FUA;
1344 		if (bp->b_flags & XBF_FLUSH)
1345 			rw |= REQ_FLUSH;
1346 
1347 		/*
1348 		 * Run the write verifier callback function if it exists. If
1349 		 * this function fails it will mark the buffer with an error and
1350 		 * the IO should not be dispatched.
1351 		 */
1352 		if (bp->b_ops) {
1353 			bp->b_ops->verify_write(bp);
1354 			if (bp->b_error) {
1355 				xfs_force_shutdown(bp->b_target->bt_mount,
1356 						   SHUTDOWN_CORRUPT_INCORE);
1357 				return;
1358 			}
1359 		}
1360 	} else if (bp->b_flags & XBF_READ_AHEAD) {
1361 		rw = READA;
1362 	} else {
1363 		rw = READ;
1364 	}
1365 
1366 	/* we only use the buffer cache for meta-data */
1367 	rw |= REQ_META;
1368 
1369 	/*
1370 	 * Walk all the vectors issuing IO on them. Set up the initial offset
1371 	 * into the buffer and the desired IO size before we start -
1372 	 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1373 	 * subsequent call.
1374 	 */
1375 	offset = bp->b_offset;
1376 	size = BBTOB(bp->b_io_length);
1377 	blk_start_plug(&plug);
1378 	for (i = 0; i < bp->b_map_count; i++) {
1379 		xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1380 		if (bp->b_error)
1381 			break;
1382 		if (size <= 0)
1383 			break;	/* all done */
1384 	}
1385 	blk_finish_plug(&plug);
1386 }
1387 
1388 void
1389 xfs_buf_iorequest(
1390 	xfs_buf_t		*bp)
1391 {
1392 	trace_xfs_buf_iorequest(bp, _RET_IP_);
1393 
1394 	ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1395 
1396 	if (bp->b_flags & XBF_WRITE)
1397 		xfs_buf_wait_unpin(bp);
1398 	xfs_buf_hold(bp);
1399 
1400 	/* Set the count to 1 initially, this will stop an I/O
1401 	 * completion callout which happens before we have started
1402 	 * all the I/O from calling xfs_buf_ioend too early.
1403 	 */
1404 	atomic_set(&bp->b_io_remaining, 1);
1405 	_xfs_buf_ioapply(bp);
1406 	_xfs_buf_ioend(bp, 1);
1407 
1408 	xfs_buf_rele(bp);
1409 }
1410 
1411 /*
1412  * Waits for I/O to complete on the buffer supplied.  It returns immediately if
1413  * no I/O is pending or there is already a pending error on the buffer.  It
1414  * returns the I/O error code, if any, or 0 if there was no error.
1415  */
1416 int
1417 xfs_buf_iowait(
1418 	xfs_buf_t		*bp)
1419 {
1420 	trace_xfs_buf_iowait(bp, _RET_IP_);
1421 
1422 	if (!bp->b_error)
1423 		wait_for_completion(&bp->b_iowait);
1424 
1425 	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1426 	return bp->b_error;
1427 }
1428 
1429 xfs_caddr_t
1430 xfs_buf_offset(
1431 	xfs_buf_t		*bp,
1432 	size_t			offset)
1433 {
1434 	struct page		*page;
1435 
1436 	if (bp->b_addr)
1437 		return bp->b_addr + offset;
1438 
1439 	offset += bp->b_offset;
1440 	page = bp->b_pages[offset >> PAGE_SHIFT];
1441 	return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1442 }
1443 
1444 /*
1445  *	Move data into or out of a buffer.
1446  */
1447 void
1448 xfs_buf_iomove(
1449 	xfs_buf_t		*bp,	/* buffer to process		*/
1450 	size_t			boff,	/* starting buffer offset	*/
1451 	size_t			bsize,	/* length to copy		*/
1452 	void			*data,	/* data address			*/
1453 	xfs_buf_rw_t		mode)	/* read/write/zero flag		*/
1454 {
1455 	size_t			bend;
1456 
1457 	bend = boff + bsize;
1458 	while (boff < bend) {
1459 		struct page	*page;
1460 		int		page_index, page_offset, csize;
1461 
1462 		page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1463 		page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1464 		page = bp->b_pages[page_index];
1465 		csize = min_t(size_t, PAGE_SIZE - page_offset,
1466 				      BBTOB(bp->b_io_length) - boff);
1467 
1468 		ASSERT((csize + page_offset) <= PAGE_SIZE);
1469 
1470 		switch (mode) {
1471 		case XBRW_ZERO:
1472 			memset(page_address(page) + page_offset, 0, csize);
1473 			break;
1474 		case XBRW_READ:
1475 			memcpy(data, page_address(page) + page_offset, csize);
1476 			break;
1477 		case XBRW_WRITE:
1478 			memcpy(page_address(page) + page_offset, data, csize);
1479 		}
1480 
1481 		boff += csize;
1482 		data += csize;
1483 	}
1484 }
1485 
1486 /*
1487  *	Handling of buffer targets (buftargs).
1488  */
1489 
1490 /*
1491  * Wait for any bufs with callbacks that have been submitted but have not yet
1492  * returned. These buffers will have an elevated hold count, so wait on those
1493  * while freeing all the buffers only held by the LRU.
1494  */
1495 void
1496 xfs_wait_buftarg(
1497 	struct xfs_buftarg	*btp)
1498 {
1499 	struct xfs_buf		*bp;
1500 
1501 restart:
1502 	spin_lock(&btp->bt_lru_lock);
1503 	while (!list_empty(&btp->bt_lru)) {
1504 		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1505 		if (atomic_read(&bp->b_hold) > 1) {
1506 			trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1507 			list_move_tail(&bp->b_lru, &btp->bt_lru);
1508 			spin_unlock(&btp->bt_lru_lock);
1509 			delay(100);
1510 			goto restart;
1511 		}
1512 		/*
1513 		 * clear the LRU reference count so the buffer doesn't get
1514 		 * ignored in xfs_buf_rele().
1515 		 */
1516 		atomic_set(&bp->b_lru_ref, 0);
1517 		spin_unlock(&btp->bt_lru_lock);
1518 		xfs_buf_rele(bp);
1519 		spin_lock(&btp->bt_lru_lock);
1520 	}
1521 	spin_unlock(&btp->bt_lru_lock);
1522 }
1523 
1524 int
1525 xfs_buftarg_shrink(
1526 	struct shrinker		*shrink,
1527 	struct shrink_control	*sc)
1528 {
1529 	struct xfs_buftarg	*btp = container_of(shrink,
1530 					struct xfs_buftarg, bt_shrinker);
1531 	struct xfs_buf		*bp;
1532 	int nr_to_scan = sc->nr_to_scan;
1533 	LIST_HEAD(dispose);
1534 
1535 	if (!nr_to_scan)
1536 		return btp->bt_lru_nr;
1537 
1538 	spin_lock(&btp->bt_lru_lock);
1539 	while (!list_empty(&btp->bt_lru)) {
1540 		if (nr_to_scan-- <= 0)
1541 			break;
1542 
1543 		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1544 
1545 		/*
1546 		 * Decrement the b_lru_ref count unless the value is already
1547 		 * zero. If the value is already zero, we need to reclaim the
1548 		 * buffer, otherwise it gets another trip through the LRU.
1549 		 */
1550 		if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1551 			list_move_tail(&bp->b_lru, &btp->bt_lru);
1552 			continue;
1553 		}
1554 
1555 		/*
1556 		 * remove the buffer from the LRU now to avoid needing another
1557 		 * lock round trip inside xfs_buf_rele().
1558 		 */
1559 		list_move(&bp->b_lru, &dispose);
1560 		btp->bt_lru_nr--;
1561 		bp->b_lru_flags |= _XBF_LRU_DISPOSE;
1562 	}
1563 	spin_unlock(&btp->bt_lru_lock);
1564 
1565 	while (!list_empty(&dispose)) {
1566 		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1567 		list_del_init(&bp->b_lru);
1568 		xfs_buf_rele(bp);
1569 	}
1570 
1571 	return btp->bt_lru_nr;
1572 }
1573 
1574 void
1575 xfs_free_buftarg(
1576 	struct xfs_mount	*mp,
1577 	struct xfs_buftarg	*btp)
1578 {
1579 	unregister_shrinker(&btp->bt_shrinker);
1580 
1581 	if (mp->m_flags & XFS_MOUNT_BARRIER)
1582 		xfs_blkdev_issue_flush(btp);
1583 
1584 	kmem_free(btp);
1585 }
1586 
1587 STATIC int
1588 xfs_setsize_buftarg_flags(
1589 	xfs_buftarg_t		*btp,
1590 	unsigned int		blocksize,
1591 	unsigned int		sectorsize,
1592 	int			verbose)
1593 {
1594 	btp->bt_bsize = blocksize;
1595 	btp->bt_sshift = ffs(sectorsize) - 1;
1596 	btp->bt_smask = sectorsize - 1;
1597 
1598 	if (set_blocksize(btp->bt_bdev, sectorsize)) {
1599 		char name[BDEVNAME_SIZE];
1600 
1601 		bdevname(btp->bt_bdev, name);
1602 
1603 		xfs_warn(btp->bt_mount,
1604 			"Cannot set_blocksize to %u on device %s\n",
1605 			sectorsize, name);
1606 		return EINVAL;
1607 	}
1608 
1609 	return 0;
1610 }
1611 
1612 /*
1613  *	When allocating the initial buffer target we have not yet
1614  *	read in the superblock, so don't know what sized sectors
1615  *	are being used is at this early stage.  Play safe.
1616  */
1617 STATIC int
1618 xfs_setsize_buftarg_early(
1619 	xfs_buftarg_t		*btp,
1620 	struct block_device	*bdev)
1621 {
1622 	return xfs_setsize_buftarg_flags(btp,
1623 			PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1624 }
1625 
1626 int
1627 xfs_setsize_buftarg(
1628 	xfs_buftarg_t		*btp,
1629 	unsigned int		blocksize,
1630 	unsigned int		sectorsize)
1631 {
1632 	return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1633 }
1634 
1635 xfs_buftarg_t *
1636 xfs_alloc_buftarg(
1637 	struct xfs_mount	*mp,
1638 	struct block_device	*bdev,
1639 	int			external,
1640 	const char		*fsname)
1641 {
1642 	xfs_buftarg_t		*btp;
1643 
1644 	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1645 
1646 	btp->bt_mount = mp;
1647 	btp->bt_dev =  bdev->bd_dev;
1648 	btp->bt_bdev = bdev;
1649 	btp->bt_bdi = blk_get_backing_dev_info(bdev);
1650 	if (!btp->bt_bdi)
1651 		goto error;
1652 
1653 	INIT_LIST_HEAD(&btp->bt_lru);
1654 	spin_lock_init(&btp->bt_lru_lock);
1655 	if (xfs_setsize_buftarg_early(btp, bdev))
1656 		goto error;
1657 	btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1658 	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1659 	register_shrinker(&btp->bt_shrinker);
1660 	return btp;
1661 
1662 error:
1663 	kmem_free(btp);
1664 	return NULL;
1665 }
1666 
1667 /*
1668  * Add a buffer to the delayed write list.
1669  *
1670  * This queues a buffer for writeout if it hasn't already been.  Note that
1671  * neither this routine nor the buffer list submission functions perform
1672  * any internal synchronization.  It is expected that the lists are thread-local
1673  * to the callers.
1674  *
1675  * Returns true if we queued up the buffer, or false if it already had
1676  * been on the buffer list.
1677  */
1678 bool
1679 xfs_buf_delwri_queue(
1680 	struct xfs_buf		*bp,
1681 	struct list_head	*list)
1682 {
1683 	ASSERT(xfs_buf_islocked(bp));
1684 	ASSERT(!(bp->b_flags & XBF_READ));
1685 
1686 	/*
1687 	 * If the buffer is already marked delwri it already is queued up
1688 	 * by someone else for imediate writeout.  Just ignore it in that
1689 	 * case.
1690 	 */
1691 	if (bp->b_flags & _XBF_DELWRI_Q) {
1692 		trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1693 		return false;
1694 	}
1695 
1696 	trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1697 
1698 	/*
1699 	 * If a buffer gets written out synchronously or marked stale while it
1700 	 * is on a delwri list we lazily remove it. To do this, the other party
1701 	 * clears the  _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1702 	 * It remains referenced and on the list.  In a rare corner case it
1703 	 * might get readded to a delwri list after the synchronous writeout, in
1704 	 * which case we need just need to re-add the flag here.
1705 	 */
1706 	bp->b_flags |= _XBF_DELWRI_Q;
1707 	if (list_empty(&bp->b_list)) {
1708 		atomic_inc(&bp->b_hold);
1709 		list_add_tail(&bp->b_list, list);
1710 	}
1711 
1712 	return true;
1713 }
1714 
1715 /*
1716  * Compare function is more complex than it needs to be because
1717  * the return value is only 32 bits and we are doing comparisons
1718  * on 64 bit values
1719  */
1720 static int
1721 xfs_buf_cmp(
1722 	void		*priv,
1723 	struct list_head *a,
1724 	struct list_head *b)
1725 {
1726 	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
1727 	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
1728 	xfs_daddr_t		diff;
1729 
1730 	diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1731 	if (diff < 0)
1732 		return -1;
1733 	if (diff > 0)
1734 		return 1;
1735 	return 0;
1736 }
1737 
1738 static int
1739 __xfs_buf_delwri_submit(
1740 	struct list_head	*buffer_list,
1741 	struct list_head	*io_list,
1742 	bool			wait)
1743 {
1744 	struct blk_plug		plug;
1745 	struct xfs_buf		*bp, *n;
1746 	int			pinned = 0;
1747 
1748 	list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1749 		if (!wait) {
1750 			if (xfs_buf_ispinned(bp)) {
1751 				pinned++;
1752 				continue;
1753 			}
1754 			if (!xfs_buf_trylock(bp))
1755 				continue;
1756 		} else {
1757 			xfs_buf_lock(bp);
1758 		}
1759 
1760 		/*
1761 		 * Someone else might have written the buffer synchronously or
1762 		 * marked it stale in the meantime.  In that case only the
1763 		 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1764 		 * reference and remove it from the list here.
1765 		 */
1766 		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1767 			list_del_init(&bp->b_list);
1768 			xfs_buf_relse(bp);
1769 			continue;
1770 		}
1771 
1772 		list_move_tail(&bp->b_list, io_list);
1773 		trace_xfs_buf_delwri_split(bp, _RET_IP_);
1774 	}
1775 
1776 	list_sort(NULL, io_list, xfs_buf_cmp);
1777 
1778 	blk_start_plug(&plug);
1779 	list_for_each_entry_safe(bp, n, io_list, b_list) {
1780 		bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
1781 		bp->b_flags |= XBF_WRITE;
1782 
1783 		if (!wait) {
1784 			bp->b_flags |= XBF_ASYNC;
1785 			list_del_init(&bp->b_list);
1786 		}
1787 		xfs_bdstrat_cb(bp);
1788 	}
1789 	blk_finish_plug(&plug);
1790 
1791 	return pinned;
1792 }
1793 
1794 /*
1795  * Write out a buffer list asynchronously.
1796  *
1797  * This will take the @buffer_list, write all non-locked and non-pinned buffers
1798  * out and not wait for I/O completion on any of the buffers.  This interface
1799  * is only safely useable for callers that can track I/O completion by higher
1800  * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1801  * function.
1802  */
1803 int
1804 xfs_buf_delwri_submit_nowait(
1805 	struct list_head	*buffer_list)
1806 {
1807 	LIST_HEAD		(io_list);
1808 	return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1809 }
1810 
1811 /*
1812  * Write out a buffer list synchronously.
1813  *
1814  * This will take the @buffer_list, write all buffers out and wait for I/O
1815  * completion on all of the buffers. @buffer_list is consumed by the function,
1816  * so callers must have some other way of tracking buffers if they require such
1817  * functionality.
1818  */
1819 int
1820 xfs_buf_delwri_submit(
1821 	struct list_head	*buffer_list)
1822 {
1823 	LIST_HEAD		(io_list);
1824 	int			error = 0, error2;
1825 	struct xfs_buf		*bp;
1826 
1827 	__xfs_buf_delwri_submit(buffer_list, &io_list, true);
1828 
1829 	/* Wait for IO to complete. */
1830 	while (!list_empty(&io_list)) {
1831 		bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1832 
1833 		list_del_init(&bp->b_list);
1834 		error2 = xfs_buf_iowait(bp);
1835 		xfs_buf_relse(bp);
1836 		if (!error)
1837 			error = error2;
1838 	}
1839 
1840 	return error;
1841 }
1842 
1843 int __init
1844 xfs_buf_init(void)
1845 {
1846 	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1847 						KM_ZONE_HWALIGN, NULL);
1848 	if (!xfs_buf_zone)
1849 		goto out;
1850 
1851 	xfslogd_workqueue = alloc_workqueue("xfslogd",
1852 					WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1853 	if (!xfslogd_workqueue)
1854 		goto out_free_buf_zone;
1855 
1856 	return 0;
1857 
1858  out_free_buf_zone:
1859 	kmem_zone_destroy(xfs_buf_zone);
1860  out:
1861 	return -ENOMEM;
1862 }
1863 
1864 void
1865 xfs_buf_terminate(void)
1866 {
1867 	destroy_workqueue(xfslogd_workqueue);
1868 	kmem_zone_destroy(xfs_buf_zone);
1869 }
1870