xref: /openbmc/linux/fs/xfs/xfs_trans_buf.c (revision ecba1060)
1 /*
2  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_buf_item.h"
38 #include "xfs_trans_priv.h"
39 #include "xfs_error.h"
40 #include "xfs_rw.h"
41 
42 
43 STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *,
44 		xfs_daddr_t, int);
45 STATIC xfs_buf_t *xfs_trans_buf_item_match_all(xfs_trans_t *, xfs_buftarg_t *,
46 		xfs_daddr_t, int);
47 
48 
49 /*
50  * Get and lock the buffer for the caller if it is not already
51  * locked within the given transaction.  If it is already locked
52  * within the transaction, just increment its lock recursion count
53  * and return a pointer to it.
54  *
55  * Use the fast path function xfs_trans_buf_item_match() or the buffer
56  * cache routine incore_match() to find the buffer
57  * if it is already owned by this transaction.
58  *
59  * If we don't already own the buffer, use get_buf() to get it.
60  * If it doesn't yet have an associated xfs_buf_log_item structure,
61  * then allocate one and add the item to this transaction.
62  *
63  * If the transaction pointer is NULL, make this just a normal
64  * get_buf() call.
65  */
66 xfs_buf_t *
67 xfs_trans_get_buf(xfs_trans_t	*tp,
68 		  xfs_buftarg_t	*target_dev,
69 		  xfs_daddr_t	blkno,
70 		  int		len,
71 		  uint		flags)
72 {
73 	xfs_buf_t		*bp;
74 	xfs_buf_log_item_t	*bip;
75 
76 	if (flags == 0)
77 		flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
78 
79 	/*
80 	 * Default to a normal get_buf() call if the tp is NULL.
81 	 */
82 	if (tp == NULL) {
83 		bp = xfs_buf_get_flags(target_dev, blkno, len,
84 							flags | BUF_BUSY);
85 		return(bp);
86 	}
87 
88 	/*
89 	 * If we find the buffer in the cache with this transaction
90 	 * pointer in its b_fsprivate2 field, then we know we already
91 	 * have it locked.  In this case we just increment the lock
92 	 * recursion count and return the buffer to the caller.
93 	 */
94 	if (tp->t_items.lic_next == NULL) {
95 		bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
96 	} else {
97 		bp  = xfs_trans_buf_item_match_all(tp, target_dev, blkno, len);
98 	}
99 	if (bp != NULL) {
100 		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
101 		if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
102 			xfs_buftrace("TRANS GET RECUR SHUT", bp);
103 			XFS_BUF_SUPER_STALE(bp);
104 		}
105 		/*
106 		 * If the buffer is stale then it was binval'ed
107 		 * since last read.  This doesn't matter since the
108 		 * caller isn't allowed to use the data anyway.
109 		 */
110 		else if (XFS_BUF_ISSTALE(bp)) {
111 			xfs_buftrace("TRANS GET RECUR STALE", bp);
112 			ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
113 		}
114 		ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
115 		bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
116 		ASSERT(bip != NULL);
117 		ASSERT(atomic_read(&bip->bli_refcount) > 0);
118 		bip->bli_recur++;
119 		xfs_buftrace("TRANS GET RECUR", bp);
120 		xfs_buf_item_trace("GET RECUR", bip);
121 		return (bp);
122 	}
123 
124 	/*
125 	 * We always specify the BUF_BUSY flag within a transaction so
126 	 * that get_buf does not try to push out a delayed write buffer
127 	 * which might cause another transaction to take place (if the
128 	 * buffer was delayed alloc).  Such recursive transactions can
129 	 * easily deadlock with our current transaction as well as cause
130 	 * us to run out of stack space.
131 	 */
132 	bp = xfs_buf_get_flags(target_dev, blkno, len, flags | BUF_BUSY);
133 	if (bp == NULL) {
134 		return NULL;
135 	}
136 
137 	ASSERT(!XFS_BUF_GETERROR(bp));
138 
139 	/*
140 	 * The xfs_buf_log_item pointer is stored in b_fsprivate.  If
141 	 * it doesn't have one yet, then allocate one and initialize it.
142 	 * The checks to see if one is there are in xfs_buf_item_init().
143 	 */
144 	xfs_buf_item_init(bp, tp->t_mountp);
145 
146 	/*
147 	 * Set the recursion count for the buffer within this transaction
148 	 * to 0.
149 	 */
150 	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
151 	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
152 	ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
153 	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
154 	bip->bli_recur = 0;
155 
156 	/*
157 	 * Take a reference for this transaction on the buf item.
158 	 */
159 	atomic_inc(&bip->bli_refcount);
160 
161 	/*
162 	 * Get a log_item_desc to point at the new item.
163 	 */
164 	(void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
165 
166 	/*
167 	 * Initialize b_fsprivate2 so we can find it with incore_match()
168 	 * above.
169 	 */
170 	XFS_BUF_SET_FSPRIVATE2(bp, tp);
171 
172 	xfs_buftrace("TRANS GET", bp);
173 	xfs_buf_item_trace("GET", bip);
174 	return (bp);
175 }
176 
177 /*
178  * Get and lock the superblock buffer of this file system for the
179  * given transaction.
180  *
181  * We don't need to use incore_match() here, because the superblock
182  * buffer is a private buffer which we keep a pointer to in the
183  * mount structure.
184  */
185 xfs_buf_t *
186 xfs_trans_getsb(xfs_trans_t	*tp,
187 		struct xfs_mount *mp,
188 		int		flags)
189 {
190 	xfs_buf_t		*bp;
191 	xfs_buf_log_item_t	*bip;
192 
193 	/*
194 	 * Default to just trying to lock the superblock buffer
195 	 * if tp is NULL.
196 	 */
197 	if (tp == NULL) {
198 		return (xfs_getsb(mp, flags));
199 	}
200 
201 	/*
202 	 * If the superblock buffer already has this transaction
203 	 * pointer in its b_fsprivate2 field, then we know we already
204 	 * have it locked.  In this case we just increment the lock
205 	 * recursion count and return the buffer to the caller.
206 	 */
207 	bp = mp->m_sb_bp;
208 	if (XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp) {
209 		bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
210 		ASSERT(bip != NULL);
211 		ASSERT(atomic_read(&bip->bli_refcount) > 0);
212 		bip->bli_recur++;
213 		xfs_buf_item_trace("GETSB RECUR", bip);
214 		return (bp);
215 	}
216 
217 	bp = xfs_getsb(mp, flags);
218 	if (bp == NULL) {
219 		return NULL;
220 	}
221 
222 	/*
223 	 * The xfs_buf_log_item pointer is stored in b_fsprivate.  If
224 	 * it doesn't have one yet, then allocate one and initialize it.
225 	 * The checks to see if one is there are in xfs_buf_item_init().
226 	 */
227 	xfs_buf_item_init(bp, mp);
228 
229 	/*
230 	 * Set the recursion count for the buffer within this transaction
231 	 * to 0.
232 	 */
233 	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
234 	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
235 	ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
236 	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
237 	bip->bli_recur = 0;
238 
239 	/*
240 	 * Take a reference for this transaction on the buf item.
241 	 */
242 	atomic_inc(&bip->bli_refcount);
243 
244 	/*
245 	 * Get a log_item_desc to point at the new item.
246 	 */
247 	(void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
248 
249 	/*
250 	 * Initialize b_fsprivate2 so we can find it with incore_match()
251 	 * above.
252 	 */
253 	XFS_BUF_SET_FSPRIVATE2(bp, tp);
254 
255 	xfs_buf_item_trace("GETSB", bip);
256 	return (bp);
257 }
258 
259 #ifdef DEBUG
260 xfs_buftarg_t *xfs_error_target;
261 int	xfs_do_error;
262 int	xfs_req_num;
263 int	xfs_error_mod = 33;
264 #endif
265 
266 /*
267  * Get and lock the buffer for the caller if it is not already
268  * locked within the given transaction.  If it has not yet been
269  * read in, read it from disk. If it is already locked
270  * within the transaction and already read in, just increment its
271  * lock recursion count and return a pointer to it.
272  *
273  * Use the fast path function xfs_trans_buf_item_match() or the buffer
274  * cache routine incore_match() to find the buffer
275  * if it is already owned by this transaction.
276  *
277  * If we don't already own the buffer, use read_buf() to get it.
278  * If it doesn't yet have an associated xfs_buf_log_item structure,
279  * then allocate one and add the item to this transaction.
280  *
281  * If the transaction pointer is NULL, make this just a normal
282  * read_buf() call.
283  */
284 int
285 xfs_trans_read_buf(
286 	xfs_mount_t	*mp,
287 	xfs_trans_t	*tp,
288 	xfs_buftarg_t	*target,
289 	xfs_daddr_t	blkno,
290 	int		len,
291 	uint		flags,
292 	xfs_buf_t	**bpp)
293 {
294 	xfs_buf_t		*bp;
295 	xfs_buf_log_item_t	*bip;
296 	int			error;
297 
298 	if (flags == 0)
299 		flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
300 
301 	/*
302 	 * Default to a normal get_buf() call if the tp is NULL.
303 	 */
304 	if (tp == NULL) {
305 		bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY);
306 		if (!bp)
307 			return (flags & XFS_BUF_TRYLOCK) ?
308 					EAGAIN : XFS_ERROR(ENOMEM);
309 
310 		if ((bp != NULL) && (XFS_BUF_GETERROR(bp) != 0)) {
311 			xfs_ioerror_alert("xfs_trans_read_buf", mp,
312 					  bp, blkno);
313 			error = XFS_BUF_GETERROR(bp);
314 			xfs_buf_relse(bp);
315 			return error;
316 		}
317 #ifdef DEBUG
318 		if (xfs_do_error && (bp != NULL)) {
319 			if (xfs_error_target == target) {
320 				if (((xfs_req_num++) % xfs_error_mod) == 0) {
321 					xfs_buf_relse(bp);
322 					cmn_err(CE_DEBUG, "Returning error!\n");
323 					return XFS_ERROR(EIO);
324 				}
325 			}
326 		}
327 #endif
328 		if (XFS_FORCED_SHUTDOWN(mp))
329 			goto shutdown_abort;
330 		*bpp = bp;
331 		return 0;
332 	}
333 
334 	/*
335 	 * If we find the buffer in the cache with this transaction
336 	 * pointer in its b_fsprivate2 field, then we know we already
337 	 * have it locked.  If it is already read in we just increment
338 	 * the lock recursion count and return the buffer to the caller.
339 	 * If the buffer is not yet read in, then we read it in, increment
340 	 * the lock recursion count, and return it to the caller.
341 	 */
342 	if (tp->t_items.lic_next == NULL) {
343 		bp = xfs_trans_buf_item_match(tp, target, blkno, len);
344 	} else {
345 		bp = xfs_trans_buf_item_match_all(tp, target, blkno, len);
346 	}
347 	if (bp != NULL) {
348 		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
349 		ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
350 		ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
351 		ASSERT((XFS_BUF_ISERROR(bp)) == 0);
352 		if (!(XFS_BUF_ISDONE(bp))) {
353 			xfs_buftrace("READ_BUF_INCORE !DONE", bp);
354 			ASSERT(!XFS_BUF_ISASYNC(bp));
355 			XFS_BUF_READ(bp);
356 			xfsbdstrat(tp->t_mountp, bp);
357 			error = xfs_iowait(bp);
358 			if (error) {
359 				xfs_ioerror_alert("xfs_trans_read_buf", mp,
360 						  bp, blkno);
361 				xfs_buf_relse(bp);
362 				/*
363 				 * We can gracefully recover from most read
364 				 * errors. Ones we can't are those that happen
365 				 * after the transaction's already dirty.
366 				 */
367 				if (tp->t_flags & XFS_TRANS_DIRTY)
368 					xfs_force_shutdown(tp->t_mountp,
369 							SHUTDOWN_META_IO_ERROR);
370 				return error;
371 			}
372 		}
373 		/*
374 		 * We never locked this buf ourselves, so we shouldn't
375 		 * brelse it either. Just get out.
376 		 */
377 		if (XFS_FORCED_SHUTDOWN(mp)) {
378 			xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp);
379 			*bpp = NULL;
380 			return XFS_ERROR(EIO);
381 		}
382 
383 
384 		bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
385 		bip->bli_recur++;
386 
387 		ASSERT(atomic_read(&bip->bli_refcount) > 0);
388 		xfs_buf_item_trace("READ RECUR", bip);
389 		*bpp = bp;
390 		return 0;
391 	}
392 
393 	/*
394 	 * We always specify the BUF_BUSY flag within a transaction so
395 	 * that get_buf does not try to push out a delayed write buffer
396 	 * which might cause another transaction to take place (if the
397 	 * buffer was delayed alloc).  Such recursive transactions can
398 	 * easily deadlock with our current transaction as well as cause
399 	 * us to run out of stack space.
400 	 */
401 	bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY);
402 	if (bp == NULL) {
403 		*bpp = NULL;
404 		return 0;
405 	}
406 	if (XFS_BUF_GETERROR(bp) != 0) {
407 	    XFS_BUF_SUPER_STALE(bp);
408 		xfs_buftrace("READ ERROR", bp);
409 		error = XFS_BUF_GETERROR(bp);
410 
411 		xfs_ioerror_alert("xfs_trans_read_buf", mp,
412 				  bp, blkno);
413 		if (tp->t_flags & XFS_TRANS_DIRTY)
414 			xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
415 		xfs_buf_relse(bp);
416 		return error;
417 	}
418 #ifdef DEBUG
419 	if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) {
420 		if (xfs_error_target == target) {
421 			if (((xfs_req_num++) % xfs_error_mod) == 0) {
422 				xfs_force_shutdown(tp->t_mountp,
423 						   SHUTDOWN_META_IO_ERROR);
424 				xfs_buf_relse(bp);
425 				cmn_err(CE_DEBUG, "Returning trans error!\n");
426 				return XFS_ERROR(EIO);
427 			}
428 		}
429 	}
430 #endif
431 	if (XFS_FORCED_SHUTDOWN(mp))
432 		goto shutdown_abort;
433 
434 	/*
435 	 * The xfs_buf_log_item pointer is stored in b_fsprivate.  If
436 	 * it doesn't have one yet, then allocate one and initialize it.
437 	 * The checks to see if one is there are in xfs_buf_item_init().
438 	 */
439 	xfs_buf_item_init(bp, tp->t_mountp);
440 
441 	/*
442 	 * Set the recursion count for the buffer within this transaction
443 	 * to 0.
444 	 */
445 	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
446 	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
447 	ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
448 	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
449 	bip->bli_recur = 0;
450 
451 	/*
452 	 * Take a reference for this transaction on the buf item.
453 	 */
454 	atomic_inc(&bip->bli_refcount);
455 
456 	/*
457 	 * Get a log_item_desc to point at the new item.
458 	 */
459 	(void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
460 
461 	/*
462 	 * Initialize b_fsprivate2 so we can find it with incore_match()
463 	 * above.
464 	 */
465 	XFS_BUF_SET_FSPRIVATE2(bp, tp);
466 
467 	xfs_buftrace("TRANS READ", bp);
468 	xfs_buf_item_trace("READ", bip);
469 	*bpp = bp;
470 	return 0;
471 
472 shutdown_abort:
473 	/*
474 	 * the theory here is that buffer is good but we're
475 	 * bailing out because the filesystem is being forcibly
476 	 * shut down.  So we should leave the b_flags alone since
477 	 * the buffer's not staled and just get out.
478 	 */
479 #if defined(DEBUG)
480 	if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
481 		cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp);
482 #endif
483 	ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
484 						(XFS_B_STALE|XFS_B_DELWRI));
485 
486 	xfs_buftrace("READ_BUF XFSSHUTDN", bp);
487 	xfs_buf_relse(bp);
488 	*bpp = NULL;
489 	return XFS_ERROR(EIO);
490 }
491 
492 
493 /*
494  * Release the buffer bp which was previously acquired with one of the
495  * xfs_trans_... buffer allocation routines if the buffer has not
496  * been modified within this transaction.  If the buffer is modified
497  * within this transaction, do decrement the recursion count but do
498  * not release the buffer even if the count goes to 0.  If the buffer is not
499  * modified within the transaction, decrement the recursion count and
500  * release the buffer if the recursion count goes to 0.
501  *
502  * If the buffer is to be released and it was not modified before
503  * this transaction began, then free the buf_log_item associated with it.
504  *
505  * If the transaction pointer is NULL, make this just a normal
506  * brelse() call.
507  */
508 void
509 xfs_trans_brelse(xfs_trans_t	*tp,
510 		 xfs_buf_t	*bp)
511 {
512 	xfs_buf_log_item_t	*bip;
513 	xfs_log_item_t		*lip;
514 	xfs_log_item_desc_t	*lidp;
515 
516 	/*
517 	 * Default to a normal brelse() call if the tp is NULL.
518 	 */
519 	if (tp == NULL) {
520 		ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
521 		/*
522 		 * If there's a buf log item attached to the buffer,
523 		 * then let the AIL know that the buffer is being
524 		 * unlocked.
525 		 */
526 		if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
527 			lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
528 			if (lip->li_type == XFS_LI_BUF) {
529 				bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*);
530 				xfs_trans_unlocked_item(bip->bli_item.li_ailp,
531 							lip);
532 			}
533 		}
534 		xfs_buf_relse(bp);
535 		return;
536 	}
537 
538 	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
539 	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
540 	ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
541 	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
542 	ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
543 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
544 
545 	/*
546 	 * Find the item descriptor pointing to this buffer's
547 	 * log item.  It must be there.
548 	 */
549 	lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
550 	ASSERT(lidp != NULL);
551 
552 	/*
553 	 * If the release is just for a recursive lock,
554 	 * then decrement the count and return.
555 	 */
556 	if (bip->bli_recur > 0) {
557 		bip->bli_recur--;
558 		xfs_buf_item_trace("RELSE RECUR", bip);
559 		return;
560 	}
561 
562 	/*
563 	 * If the buffer is dirty within this transaction, we can't
564 	 * release it until we commit.
565 	 */
566 	if (lidp->lid_flags & XFS_LID_DIRTY) {
567 		xfs_buf_item_trace("RELSE DIRTY", bip);
568 		return;
569 	}
570 
571 	/*
572 	 * If the buffer has been invalidated, then we can't release
573 	 * it until the transaction commits to disk unless it is re-dirtied
574 	 * as part of this transaction.  This prevents us from pulling
575 	 * the item from the AIL before we should.
576 	 */
577 	if (bip->bli_flags & XFS_BLI_STALE) {
578 		xfs_buf_item_trace("RELSE STALE", bip);
579 		return;
580 	}
581 
582 	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
583 	xfs_buf_item_trace("RELSE", bip);
584 
585 	/*
586 	 * Free up the log item descriptor tracking the released item.
587 	 */
588 	xfs_trans_free_item(tp, lidp);
589 
590 	/*
591 	 * Clear the hold flag in the buf log item if it is set.
592 	 * We wouldn't want the next user of the buffer to
593 	 * get confused.
594 	 */
595 	if (bip->bli_flags & XFS_BLI_HOLD) {
596 		bip->bli_flags &= ~XFS_BLI_HOLD;
597 	}
598 
599 	/*
600 	 * Drop our reference to the buf log item.
601 	 */
602 	atomic_dec(&bip->bli_refcount);
603 
604 	/*
605 	 * If the buf item is not tracking data in the log, then
606 	 * we must free it before releasing the buffer back to the
607 	 * free pool.  Before releasing the buffer to the free pool,
608 	 * clear the transaction pointer in b_fsprivate2 to dissolve
609 	 * its relation to this transaction.
610 	 */
611 	if (!xfs_buf_item_dirty(bip)) {
612 /***
613 		ASSERT(bp->b_pincount == 0);
614 ***/
615 		ASSERT(atomic_read(&bip->bli_refcount) == 0);
616 		ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
617 		ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
618 		xfs_buf_item_relse(bp);
619 		bip = NULL;
620 	}
621 	XFS_BUF_SET_FSPRIVATE2(bp, NULL);
622 
623 	/*
624 	 * If we've still got a buf log item on the buffer, then
625 	 * tell the AIL that the buffer is being unlocked.
626 	 */
627 	if (bip != NULL) {
628 		xfs_trans_unlocked_item(bip->bli_item.li_ailp,
629 					(xfs_log_item_t*)bip);
630 	}
631 
632 	xfs_buf_relse(bp);
633 	return;
634 }
635 
636 /*
637  * Add the locked buffer to the transaction.
638  * The buffer must be locked, and it cannot be associated with any
639  * transaction.
640  *
641  * If the buffer does not yet have a buf log item associated with it,
642  * then allocate one for it.  Then add the buf item to the transaction.
643  */
644 void
645 xfs_trans_bjoin(xfs_trans_t	*tp,
646 		xfs_buf_t	*bp)
647 {
648 	xfs_buf_log_item_t	*bip;
649 
650 	ASSERT(XFS_BUF_ISBUSY(bp));
651 	ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
652 
653 	/*
654 	 * The xfs_buf_log_item pointer is stored in b_fsprivate.  If
655 	 * it doesn't have one yet, then allocate one and initialize it.
656 	 * The checks to see if one is there are in xfs_buf_item_init().
657 	 */
658 	xfs_buf_item_init(bp, tp->t_mountp);
659 	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
660 	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
661 	ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
662 	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
663 
664 	/*
665 	 * Take a reference for this transaction on the buf item.
666 	 */
667 	atomic_inc(&bip->bli_refcount);
668 
669 	/*
670 	 * Get a log_item_desc to point at the new item.
671 	 */
672 	(void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip);
673 
674 	/*
675 	 * Initialize b_fsprivate2 so we can find it with incore_match()
676 	 * in xfs_trans_get_buf() and friends above.
677 	 */
678 	XFS_BUF_SET_FSPRIVATE2(bp, tp);
679 
680 	xfs_buf_item_trace("BJOIN", bip);
681 }
682 
683 /*
684  * Mark the buffer as not needing to be unlocked when the buf item's
685  * IOP_UNLOCK() routine is called.  The buffer must already be locked
686  * and associated with the given transaction.
687  */
688 /* ARGSUSED */
689 void
690 xfs_trans_bhold(xfs_trans_t	*tp,
691 		xfs_buf_t	*bp)
692 {
693 	xfs_buf_log_item_t	*bip;
694 
695 	ASSERT(XFS_BUF_ISBUSY(bp));
696 	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
697 	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
698 
699 	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
700 	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
701 	ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
702 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
703 	bip->bli_flags |= XFS_BLI_HOLD;
704 	xfs_buf_item_trace("BHOLD", bip);
705 }
706 
707 /*
708  * Cancel the previous buffer hold request made on this buffer
709  * for this transaction.
710  */
711 void
712 xfs_trans_bhold_release(xfs_trans_t	*tp,
713 			xfs_buf_t	*bp)
714 {
715 	xfs_buf_log_item_t	*bip;
716 
717 	ASSERT(XFS_BUF_ISBUSY(bp));
718 	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
719 	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
720 
721 	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
722 	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
723 	ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
724 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
725 	ASSERT(bip->bli_flags & XFS_BLI_HOLD);
726 	bip->bli_flags &= ~XFS_BLI_HOLD;
727 	xfs_buf_item_trace("BHOLD RELEASE", bip);
728 }
729 
730 /*
731  * This is called to mark bytes first through last inclusive of the given
732  * buffer as needing to be logged when the transaction is committed.
733  * The buffer must already be associated with the given transaction.
734  *
735  * First and last are numbers relative to the beginning of this buffer,
736  * so the first byte in the buffer is numbered 0 regardless of the
737  * value of b_blkno.
738  */
739 void
740 xfs_trans_log_buf(xfs_trans_t	*tp,
741 		  xfs_buf_t	*bp,
742 		  uint		first,
743 		  uint		last)
744 {
745 	xfs_buf_log_item_t	*bip;
746 	xfs_log_item_desc_t	*lidp;
747 
748 	ASSERT(XFS_BUF_ISBUSY(bp));
749 	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
750 	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
751 	ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp)));
752 	ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) ||
753 	       (XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks));
754 
755 	/*
756 	 * Mark the buffer as needing to be written out eventually,
757 	 * and set its iodone function to remove the buffer's buf log
758 	 * item from the AIL and free it when the buffer is flushed
759 	 * to disk.  See xfs_buf_attach_iodone() for more details
760 	 * on li_cb and xfs_buf_iodone_callbacks().
761 	 * If we end up aborting this transaction, we trap this buffer
762 	 * inside the b_bdstrat callback so that this won't get written to
763 	 * disk.
764 	 */
765 	XFS_BUF_DELAYWRITE(bp);
766 	XFS_BUF_DONE(bp);
767 
768 	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
769 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
770 	XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
771 	bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone;
772 
773 	/*
774 	 * If we invalidated the buffer within this transaction, then
775 	 * cancel the invalidation now that we're dirtying the buffer
776 	 * again.  There are no races with the code in xfs_buf_item_unpin(),
777 	 * because we have a reference to the buffer this entire time.
778 	 */
779 	if (bip->bli_flags & XFS_BLI_STALE) {
780 		xfs_buf_item_trace("BLOG UNSTALE", bip);
781 		bip->bli_flags &= ~XFS_BLI_STALE;
782 		ASSERT(XFS_BUF_ISSTALE(bp));
783 		XFS_BUF_UNSTALE(bp);
784 		bip->bli_format.blf_flags &= ~XFS_BLI_CANCEL;
785 	}
786 
787 	lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
788 	ASSERT(lidp != NULL);
789 
790 	tp->t_flags |= XFS_TRANS_DIRTY;
791 	lidp->lid_flags |= XFS_LID_DIRTY;
792 	lidp->lid_flags &= ~XFS_LID_BUF_STALE;
793 	bip->bli_flags |= XFS_BLI_LOGGED;
794 	xfs_buf_item_log(bip, first, last);
795 	xfs_buf_item_trace("BLOG", bip);
796 }
797 
798 
799 /*
800  * This called to invalidate a buffer that is being used within
801  * a transaction.  Typically this is because the blocks in the
802  * buffer are being freed, so we need to prevent it from being
803  * written out when we're done.  Allowing it to be written again
804  * might overwrite data in the free blocks if they are reallocated
805  * to a file.
806  *
807  * We prevent the buffer from being written out by clearing the
808  * B_DELWRI flag.  We can't always
809  * get rid of the buf log item at this point, though, because
810  * the buffer may still be pinned by another transaction.  If that
811  * is the case, then we'll wait until the buffer is committed to
812  * disk for the last time (we can tell by the ref count) and
813  * free it in xfs_buf_item_unpin().  Until it is cleaned up we
814  * will keep the buffer locked so that the buffer and buf log item
815  * are not reused.
816  */
817 void
818 xfs_trans_binval(
819 	xfs_trans_t	*tp,
820 	xfs_buf_t	*bp)
821 {
822 	xfs_log_item_desc_t	*lidp;
823 	xfs_buf_log_item_t	*bip;
824 
825 	ASSERT(XFS_BUF_ISBUSY(bp));
826 	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
827 	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
828 
829 	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
830 	lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
831 	ASSERT(lidp != NULL);
832 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
833 
834 	if (bip->bli_flags & XFS_BLI_STALE) {
835 		/*
836 		 * If the buffer is already invalidated, then
837 		 * just return.
838 		 */
839 		ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
840 		ASSERT(XFS_BUF_ISSTALE(bp));
841 		ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
842 		ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_INODE_BUF));
843 		ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
844 		ASSERT(lidp->lid_flags & XFS_LID_DIRTY);
845 		ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
846 		xfs_buftrace("XFS_BINVAL RECUR", bp);
847 		xfs_buf_item_trace("BINVAL RECUR", bip);
848 		return;
849 	}
850 
851 	/*
852 	 * Clear the dirty bit in the buffer and set the STALE flag
853 	 * in the buf log item.  The STALE flag will be used in
854 	 * xfs_buf_item_unpin() to determine if it should clean up
855 	 * when the last reference to the buf item is given up.
856 	 * We set the XFS_BLI_CANCEL flag in the buf log format structure
857 	 * and log the buf item.  This will be used at recovery time
858 	 * to determine that copies of the buffer in the log before
859 	 * this should not be replayed.
860 	 * We mark the item descriptor and the transaction dirty so
861 	 * that we'll hold the buffer until after the commit.
862 	 *
863 	 * Since we're invalidating the buffer, we also clear the state
864 	 * about which parts of the buffer have been logged.  We also
865 	 * clear the flag indicating that this is an inode buffer since
866 	 * the data in the buffer will no longer be valid.
867 	 *
868 	 * We set the stale bit in the buffer as well since we're getting
869 	 * rid of it.
870 	 */
871 	XFS_BUF_UNDELAYWRITE(bp);
872 	XFS_BUF_STALE(bp);
873 	bip->bli_flags |= XFS_BLI_STALE;
874 	bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_DIRTY);
875 	bip->bli_format.blf_flags &= ~XFS_BLI_INODE_BUF;
876 	bip->bli_format.blf_flags |= XFS_BLI_CANCEL;
877 	memset((char *)(bip->bli_format.blf_data_map), 0,
878 	      (bip->bli_format.blf_map_size * sizeof(uint)));
879 	lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE;
880 	tp->t_flags |= XFS_TRANS_DIRTY;
881 	xfs_buftrace("XFS_BINVAL", bp);
882 	xfs_buf_item_trace("BINVAL", bip);
883 }
884 
885 /*
886  * This call is used to indicate that the buffer contains on-disk
887  * inodes which must be handled specially during recovery.  They
888  * require special handling because only the di_next_unlinked from
889  * the inodes in the buffer should be recovered.  The rest of the
890  * data in the buffer is logged via the inodes themselves.
891  *
892  * All we do is set the XFS_BLI_INODE_BUF flag in the buffer's log
893  * format structure so that we'll know what to do at recovery time.
894  */
895 /* ARGSUSED */
896 void
897 xfs_trans_inode_buf(
898 	xfs_trans_t	*tp,
899 	xfs_buf_t	*bp)
900 {
901 	xfs_buf_log_item_t	*bip;
902 
903 	ASSERT(XFS_BUF_ISBUSY(bp));
904 	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
905 	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
906 
907 	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
908 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
909 
910 	bip->bli_format.blf_flags |= XFS_BLI_INODE_BUF;
911 }
912 
913 /*
914  * This call is used to indicate that the buffer is going to
915  * be staled and was an inode buffer. This means it gets
916  * special processing during unpin - where any inodes
917  * associated with the buffer should be removed from ail.
918  * There is also special processing during recovery,
919  * any replay of the inodes in the buffer needs to be
920  * prevented as the buffer may have been reused.
921  */
922 void
923 xfs_trans_stale_inode_buf(
924 	xfs_trans_t	*tp,
925 	xfs_buf_t	*bp)
926 {
927 	xfs_buf_log_item_t	*bip;
928 
929 	ASSERT(XFS_BUF_ISBUSY(bp));
930 	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
931 	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
932 
933 	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
934 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
935 
936 	bip->bli_flags |= XFS_BLI_STALE_INODE;
937 	bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))
938 		xfs_buf_iodone;
939 }
940 
941 
942 
943 /*
944  * Mark the buffer as being one which contains newly allocated
945  * inodes.  We need to make sure that even if this buffer is
946  * relogged as an 'inode buf' we still recover all of the inode
947  * images in the face of a crash.  This works in coordination with
948  * xfs_buf_item_committed() to ensure that the buffer remains in the
949  * AIL at its original location even after it has been relogged.
950  */
951 /* ARGSUSED */
952 void
953 xfs_trans_inode_alloc_buf(
954 	xfs_trans_t	*tp,
955 	xfs_buf_t	*bp)
956 {
957 	xfs_buf_log_item_t	*bip;
958 
959 	ASSERT(XFS_BUF_ISBUSY(bp));
960 	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
961 	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
962 
963 	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
964 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
965 
966 	bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
967 }
968 
969 
970 /*
971  * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
972  * dquots. However, unlike in inode buffer recovery, dquot buffers get
973  * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
974  * The only thing that makes dquot buffers different from regular
975  * buffers is that we must not replay dquot bufs when recovering
976  * if a _corresponding_ quotaoff has happened. We also have to distinguish
977  * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
978  * can be turned off independently.
979  */
980 /* ARGSUSED */
981 void
982 xfs_trans_dquot_buf(
983 	xfs_trans_t	*tp,
984 	xfs_buf_t	*bp,
985 	uint		type)
986 {
987 	xfs_buf_log_item_t	*bip;
988 
989 	ASSERT(XFS_BUF_ISBUSY(bp));
990 	ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
991 	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
992 	ASSERT(type == XFS_BLI_UDQUOT_BUF ||
993 	       type == XFS_BLI_PDQUOT_BUF ||
994 	       type == XFS_BLI_GDQUOT_BUF);
995 
996 	bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
997 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
998 
999 	bip->bli_format.blf_flags |= type;
1000 }
1001 
1002 /*
1003  * Check to see if a buffer matching the given parameters is already
1004  * a part of the given transaction.  Only check the first, embedded
1005  * chunk, since we don't want to spend all day scanning large transactions.
1006  */
1007 STATIC xfs_buf_t *
1008 xfs_trans_buf_item_match(
1009 	xfs_trans_t	*tp,
1010 	xfs_buftarg_t	*target,
1011 	xfs_daddr_t	blkno,
1012 	int		len)
1013 {
1014 	xfs_log_item_chunk_t	*licp;
1015 	xfs_log_item_desc_t	*lidp;
1016 	xfs_buf_log_item_t	*blip;
1017 	xfs_buf_t		*bp;
1018 	int			i;
1019 
1020 	bp = NULL;
1021 	len = BBTOB(len);
1022 	licp = &tp->t_items;
1023 	if (!xfs_lic_are_all_free(licp)) {
1024 		for (i = 0; i < licp->lic_unused; i++) {
1025 			/*
1026 			 * Skip unoccupied slots.
1027 			 */
1028 			if (xfs_lic_isfree(licp, i)) {
1029 				continue;
1030 			}
1031 
1032 			lidp = xfs_lic_slot(licp, i);
1033 			blip = (xfs_buf_log_item_t *)lidp->lid_item;
1034 			if (blip->bli_item.li_type != XFS_LI_BUF) {
1035 				continue;
1036 			}
1037 
1038 			bp = blip->bli_buf;
1039 			if ((XFS_BUF_TARGET(bp) == target) &&
1040 			    (XFS_BUF_ADDR(bp) == blkno) &&
1041 			    (XFS_BUF_COUNT(bp) == len)) {
1042 				/*
1043 				 * We found it.  Break out and
1044 				 * return the pointer to the buffer.
1045 				 */
1046 				break;
1047 			} else {
1048 				bp = NULL;
1049 			}
1050 		}
1051 	}
1052 	return bp;
1053 }
1054 
1055 /*
1056  * Check to see if a buffer matching the given parameters is already
1057  * a part of the given transaction.  Check all the chunks, we
1058  * want to be thorough.
1059  */
1060 STATIC xfs_buf_t *
1061 xfs_trans_buf_item_match_all(
1062 	xfs_trans_t	*tp,
1063 	xfs_buftarg_t	*target,
1064 	xfs_daddr_t	blkno,
1065 	int		len)
1066 {
1067 	xfs_log_item_chunk_t	*licp;
1068 	xfs_log_item_desc_t	*lidp;
1069 	xfs_buf_log_item_t	*blip;
1070 	xfs_buf_t		*bp;
1071 	int			i;
1072 
1073 	bp = NULL;
1074 	len = BBTOB(len);
1075 	for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) {
1076 		if (xfs_lic_are_all_free(licp)) {
1077 			ASSERT(licp == &tp->t_items);
1078 			ASSERT(licp->lic_next == NULL);
1079 			return NULL;
1080 		}
1081 		for (i = 0; i < licp->lic_unused; i++) {
1082 			/*
1083 			 * Skip unoccupied slots.
1084 			 */
1085 			if (xfs_lic_isfree(licp, i)) {
1086 				continue;
1087 			}
1088 
1089 			lidp = xfs_lic_slot(licp, i);
1090 			blip = (xfs_buf_log_item_t *)lidp->lid_item;
1091 			if (blip->bli_item.li_type != XFS_LI_BUF) {
1092 				continue;
1093 			}
1094 
1095 			bp = blip->bli_buf;
1096 			if ((XFS_BUF_TARGET(bp) == target) &&
1097 			    (XFS_BUF_ADDR(bp) == blkno) &&
1098 			    (XFS_BUF_COUNT(bp) == len)) {
1099 				/*
1100 				 * We found it.  Break out and
1101 				 * return the pointer to the buffer.
1102 				 */
1103 				return bp;
1104 			}
1105 		}
1106 	}
1107 	return NULL;
1108 }
1109