xref: /openbmc/linux/fs/xfs/xfs_rmap_item.c (revision e5c86679)
1 /*
2  * Copyright (C) 2016 Oracle.  All Rights Reserved.
3  *
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it would be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write the Free Software Foundation,
18  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_bit.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_trans.h"
29 #include "xfs_trans_priv.h"
30 #include "xfs_buf_item.h"
31 #include "xfs_rmap_item.h"
32 #include "xfs_log.h"
33 #include "xfs_rmap.h"
34 
35 
36 kmem_zone_t	*xfs_rui_zone;
37 kmem_zone_t	*xfs_rud_zone;
38 
39 static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
40 {
41 	return container_of(lip, struct xfs_rui_log_item, rui_item);
42 }
43 
44 void
45 xfs_rui_item_free(
46 	struct xfs_rui_log_item	*ruip)
47 {
48 	if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
49 		kmem_free(ruip);
50 	else
51 		kmem_zone_free(xfs_rui_zone, ruip);
52 }
53 
54 STATIC void
55 xfs_rui_item_size(
56 	struct xfs_log_item	*lip,
57 	int			*nvecs,
58 	int			*nbytes)
59 {
60 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
61 
62 	*nvecs += 1;
63 	*nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
64 }
65 
66 /*
67  * This is called to fill in the vector of log iovecs for the
68  * given rui log item. We use only 1 iovec, and we point that
69  * at the rui_log_format structure embedded in the rui item.
70  * It is at this point that we assert that all of the extent
71  * slots in the rui item have been filled.
72  */
73 STATIC void
74 xfs_rui_item_format(
75 	struct xfs_log_item	*lip,
76 	struct xfs_log_vec	*lv)
77 {
78 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
79 	struct xfs_log_iovec	*vecp = NULL;
80 
81 	ASSERT(atomic_read(&ruip->rui_next_extent) ==
82 			ruip->rui_format.rui_nextents);
83 
84 	ruip->rui_format.rui_type = XFS_LI_RUI;
85 	ruip->rui_format.rui_size = 1;
86 
87 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
88 			xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
89 }
90 
91 /*
92  * Pinning has no meaning for an rui item, so just return.
93  */
94 STATIC void
95 xfs_rui_item_pin(
96 	struct xfs_log_item	*lip)
97 {
98 }
99 
100 /*
101  * The unpin operation is the last place an RUI is manipulated in the log. It is
102  * either inserted in the AIL or aborted in the event of a log I/O error. In
103  * either case, the RUI transaction has been successfully committed to make it
104  * this far. Therefore, we expect whoever committed the RUI to either construct
105  * and commit the RUD or drop the RUD's reference in the event of error. Simply
106  * drop the log's RUI reference now that the log is done with it.
107  */
108 STATIC void
109 xfs_rui_item_unpin(
110 	struct xfs_log_item	*lip,
111 	int			remove)
112 {
113 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
114 
115 	xfs_rui_release(ruip);
116 }
117 
118 /*
119  * RUI items have no locking or pushing.  However, since RUIs are pulled from
120  * the AIL when their corresponding RUDs are committed to disk, their situation
121  * is very similar to being pinned.  Return XFS_ITEM_PINNED so that the caller
122  * will eventually flush the log.  This should help in getting the RUI out of
123  * the AIL.
124  */
125 STATIC uint
126 xfs_rui_item_push(
127 	struct xfs_log_item	*lip,
128 	struct list_head	*buffer_list)
129 {
130 	return XFS_ITEM_PINNED;
131 }
132 
133 /*
134  * The RUI has been either committed or aborted if the transaction has been
135  * cancelled. If the transaction was cancelled, an RUD isn't going to be
136  * constructed and thus we free the RUI here directly.
137  */
138 STATIC void
139 xfs_rui_item_unlock(
140 	struct xfs_log_item	*lip)
141 {
142 	if (lip->li_flags & XFS_LI_ABORTED)
143 		xfs_rui_item_free(RUI_ITEM(lip));
144 }
145 
146 /*
147  * The RUI is logged only once and cannot be moved in the log, so simply return
148  * the lsn at which it's been logged.
149  */
150 STATIC xfs_lsn_t
151 xfs_rui_item_committed(
152 	struct xfs_log_item	*lip,
153 	xfs_lsn_t		lsn)
154 {
155 	return lsn;
156 }
157 
158 /*
159  * The RUI dependency tracking op doesn't do squat.  It can't because
160  * it doesn't know where the free extent is coming from.  The dependency
161  * tracking has to be handled by the "enclosing" metadata object.  For
162  * example, for inodes, the inode is locked throughout the extent freeing
163  * so the dependency should be recorded there.
164  */
165 STATIC void
166 xfs_rui_item_committing(
167 	struct xfs_log_item	*lip,
168 	xfs_lsn_t		lsn)
169 {
170 }
171 
172 /*
173  * This is the ops vector shared by all rui log items.
174  */
175 static const struct xfs_item_ops xfs_rui_item_ops = {
176 	.iop_size	= xfs_rui_item_size,
177 	.iop_format	= xfs_rui_item_format,
178 	.iop_pin	= xfs_rui_item_pin,
179 	.iop_unpin	= xfs_rui_item_unpin,
180 	.iop_unlock	= xfs_rui_item_unlock,
181 	.iop_committed	= xfs_rui_item_committed,
182 	.iop_push	= xfs_rui_item_push,
183 	.iop_committing = xfs_rui_item_committing,
184 };
185 
186 /*
187  * Allocate and initialize an rui item with the given number of extents.
188  */
189 struct xfs_rui_log_item *
190 xfs_rui_init(
191 	struct xfs_mount		*mp,
192 	uint				nextents)
193 
194 {
195 	struct xfs_rui_log_item		*ruip;
196 
197 	ASSERT(nextents > 0);
198 	if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
199 		ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), KM_SLEEP);
200 	else
201 		ruip = kmem_zone_zalloc(xfs_rui_zone, KM_SLEEP);
202 
203 	xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
204 	ruip->rui_format.rui_nextents = nextents;
205 	ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
206 	atomic_set(&ruip->rui_next_extent, 0);
207 	atomic_set(&ruip->rui_refcount, 2);
208 
209 	return ruip;
210 }
211 
212 /*
213  * Copy an RUI format buffer from the given buf, and into the destination
214  * RUI format structure.  The RUI/RUD items were designed not to need any
215  * special alignment handling.
216  */
217 int
218 xfs_rui_copy_format(
219 	struct xfs_log_iovec		*buf,
220 	struct xfs_rui_log_format	*dst_rui_fmt)
221 {
222 	struct xfs_rui_log_format	*src_rui_fmt;
223 	uint				len;
224 
225 	src_rui_fmt = buf->i_addr;
226 	len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
227 
228 	if (buf->i_len != len)
229 		return -EFSCORRUPTED;
230 
231 	memcpy(dst_rui_fmt, src_rui_fmt, len);
232 	return 0;
233 }
234 
235 /*
236  * Freeing the RUI requires that we remove it from the AIL if it has already
237  * been placed there. However, the RUI may not yet have been placed in the AIL
238  * when called by xfs_rui_release() from RUD processing due to the ordering of
239  * committed vs unpin operations in bulk insert operations. Hence the reference
240  * count to ensure only the last caller frees the RUI.
241  */
242 void
243 xfs_rui_release(
244 	struct xfs_rui_log_item	*ruip)
245 {
246 	if (atomic_dec_and_test(&ruip->rui_refcount)) {
247 		xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
248 		xfs_rui_item_free(ruip);
249 	}
250 }
251 
252 static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
253 {
254 	return container_of(lip, struct xfs_rud_log_item, rud_item);
255 }
256 
257 STATIC void
258 xfs_rud_item_size(
259 	struct xfs_log_item	*lip,
260 	int			*nvecs,
261 	int			*nbytes)
262 {
263 	*nvecs += 1;
264 	*nbytes += sizeof(struct xfs_rud_log_format);
265 }
266 
267 /*
268  * This is called to fill in the vector of log iovecs for the
269  * given rud log item. We use only 1 iovec, and we point that
270  * at the rud_log_format structure embedded in the rud item.
271  * It is at this point that we assert that all of the extent
272  * slots in the rud item have been filled.
273  */
274 STATIC void
275 xfs_rud_item_format(
276 	struct xfs_log_item	*lip,
277 	struct xfs_log_vec	*lv)
278 {
279 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
280 	struct xfs_log_iovec	*vecp = NULL;
281 
282 	rudp->rud_format.rud_type = XFS_LI_RUD;
283 	rudp->rud_format.rud_size = 1;
284 
285 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
286 			sizeof(struct xfs_rud_log_format));
287 }
288 
289 /*
290  * Pinning has no meaning for an rud item, so just return.
291  */
292 STATIC void
293 xfs_rud_item_pin(
294 	struct xfs_log_item	*lip)
295 {
296 }
297 
298 /*
299  * Since pinning has no meaning for an rud item, unpinning does
300  * not either.
301  */
302 STATIC void
303 xfs_rud_item_unpin(
304 	struct xfs_log_item	*lip,
305 	int			remove)
306 {
307 }
308 
309 /*
310  * There isn't much you can do to push on an rud item.  It is simply stuck
311  * waiting for the log to be flushed to disk.
312  */
313 STATIC uint
314 xfs_rud_item_push(
315 	struct xfs_log_item	*lip,
316 	struct list_head	*buffer_list)
317 {
318 	return XFS_ITEM_PINNED;
319 }
320 
321 /*
322  * The RUD is either committed or aborted if the transaction is cancelled. If
323  * the transaction is cancelled, drop our reference to the RUI and free the
324  * RUD.
325  */
326 STATIC void
327 xfs_rud_item_unlock(
328 	struct xfs_log_item	*lip)
329 {
330 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
331 
332 	if (lip->li_flags & XFS_LI_ABORTED) {
333 		xfs_rui_release(rudp->rud_ruip);
334 		kmem_zone_free(xfs_rud_zone, rudp);
335 	}
336 }
337 
338 /*
339  * When the rud item is committed to disk, all we need to do is delete our
340  * reference to our partner rui item and then free ourselves. Since we're
341  * freeing ourselves we must return -1 to keep the transaction code from
342  * further referencing this item.
343  */
344 STATIC xfs_lsn_t
345 xfs_rud_item_committed(
346 	struct xfs_log_item	*lip,
347 	xfs_lsn_t		lsn)
348 {
349 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
350 
351 	/*
352 	 * Drop the RUI reference regardless of whether the RUD has been
353 	 * aborted. Once the RUD transaction is constructed, it is the sole
354 	 * responsibility of the RUD to release the RUI (even if the RUI is
355 	 * aborted due to log I/O error).
356 	 */
357 	xfs_rui_release(rudp->rud_ruip);
358 	kmem_zone_free(xfs_rud_zone, rudp);
359 
360 	return (xfs_lsn_t)-1;
361 }
362 
363 /*
364  * The RUD dependency tracking op doesn't do squat.  It can't because
365  * it doesn't know where the free extent is coming from.  The dependency
366  * tracking has to be handled by the "enclosing" metadata object.  For
367  * example, for inodes, the inode is locked throughout the extent freeing
368  * so the dependency should be recorded there.
369  */
370 STATIC void
371 xfs_rud_item_committing(
372 	struct xfs_log_item	*lip,
373 	xfs_lsn_t		lsn)
374 {
375 }
376 
377 /*
378  * This is the ops vector shared by all rud log items.
379  */
380 static const struct xfs_item_ops xfs_rud_item_ops = {
381 	.iop_size	= xfs_rud_item_size,
382 	.iop_format	= xfs_rud_item_format,
383 	.iop_pin	= xfs_rud_item_pin,
384 	.iop_unpin	= xfs_rud_item_unpin,
385 	.iop_unlock	= xfs_rud_item_unlock,
386 	.iop_committed	= xfs_rud_item_committed,
387 	.iop_push	= xfs_rud_item_push,
388 	.iop_committing = xfs_rud_item_committing,
389 };
390 
391 /*
392  * Allocate and initialize an rud item with the given number of extents.
393  */
394 struct xfs_rud_log_item *
395 xfs_rud_init(
396 	struct xfs_mount		*mp,
397 	struct xfs_rui_log_item		*ruip)
398 
399 {
400 	struct xfs_rud_log_item	*rudp;
401 
402 	rudp = kmem_zone_zalloc(xfs_rud_zone, KM_SLEEP);
403 	xfs_log_item_init(mp, &rudp->rud_item, XFS_LI_RUD, &xfs_rud_item_ops);
404 	rudp->rud_ruip = ruip;
405 	rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
406 
407 	return rudp;
408 }
409 
410 /*
411  * Process an rmap update intent item that was recovered from the log.
412  * We need to update the rmapbt.
413  */
414 int
415 xfs_rui_recover(
416 	struct xfs_mount		*mp,
417 	struct xfs_rui_log_item		*ruip)
418 {
419 	int				i;
420 	int				error = 0;
421 	struct xfs_map_extent		*rmap;
422 	xfs_fsblock_t			startblock_fsb;
423 	bool				op_ok;
424 	struct xfs_rud_log_item		*rudp;
425 	enum xfs_rmap_intent_type	type;
426 	int				whichfork;
427 	xfs_exntst_t			state;
428 	struct xfs_trans		*tp;
429 	struct xfs_btree_cur		*rcur = NULL;
430 
431 	ASSERT(!test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags));
432 
433 	/*
434 	 * First check the validity of the extents described by the
435 	 * RUI.  If any are bad, then assume that all are bad and
436 	 * just toss the RUI.
437 	 */
438 	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
439 		rmap = &ruip->rui_format.rui_extents[i];
440 		startblock_fsb = XFS_BB_TO_FSB(mp,
441 				   XFS_FSB_TO_DADDR(mp, rmap->me_startblock));
442 		switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
443 		case XFS_RMAP_EXTENT_MAP:
444 		case XFS_RMAP_EXTENT_MAP_SHARED:
445 		case XFS_RMAP_EXTENT_UNMAP:
446 		case XFS_RMAP_EXTENT_UNMAP_SHARED:
447 		case XFS_RMAP_EXTENT_CONVERT:
448 		case XFS_RMAP_EXTENT_CONVERT_SHARED:
449 		case XFS_RMAP_EXTENT_ALLOC:
450 		case XFS_RMAP_EXTENT_FREE:
451 			op_ok = true;
452 			break;
453 		default:
454 			op_ok = false;
455 			break;
456 		}
457 		if (!op_ok || startblock_fsb == 0 ||
458 		    rmap->me_len == 0 ||
459 		    startblock_fsb >= mp->m_sb.sb_dblocks ||
460 		    rmap->me_len >= mp->m_sb.sb_agblocks ||
461 		    (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)) {
462 			/*
463 			 * This will pull the RUI from the AIL and
464 			 * free the memory associated with it.
465 			 */
466 			set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
467 			xfs_rui_release(ruip);
468 			return -EIO;
469 		}
470 	}
471 
472 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
473 	if (error)
474 		return error;
475 	rudp = xfs_trans_get_rud(tp, ruip);
476 
477 	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
478 		rmap = &ruip->rui_format.rui_extents[i];
479 		state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
480 				XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
481 		whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
482 				XFS_ATTR_FORK : XFS_DATA_FORK;
483 		switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
484 		case XFS_RMAP_EXTENT_MAP:
485 			type = XFS_RMAP_MAP;
486 			break;
487 		case XFS_RMAP_EXTENT_MAP_SHARED:
488 			type = XFS_RMAP_MAP_SHARED;
489 			break;
490 		case XFS_RMAP_EXTENT_UNMAP:
491 			type = XFS_RMAP_UNMAP;
492 			break;
493 		case XFS_RMAP_EXTENT_UNMAP_SHARED:
494 			type = XFS_RMAP_UNMAP_SHARED;
495 			break;
496 		case XFS_RMAP_EXTENT_CONVERT:
497 			type = XFS_RMAP_CONVERT;
498 			break;
499 		case XFS_RMAP_EXTENT_CONVERT_SHARED:
500 			type = XFS_RMAP_CONVERT_SHARED;
501 			break;
502 		case XFS_RMAP_EXTENT_ALLOC:
503 			type = XFS_RMAP_ALLOC;
504 			break;
505 		case XFS_RMAP_EXTENT_FREE:
506 			type = XFS_RMAP_FREE;
507 			break;
508 		default:
509 			error = -EFSCORRUPTED;
510 			goto abort_error;
511 		}
512 		error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
513 				rmap->me_owner, whichfork,
514 				rmap->me_startoff, rmap->me_startblock,
515 				rmap->me_len, state, &rcur);
516 		if (error)
517 			goto abort_error;
518 
519 	}
520 
521 	xfs_rmap_finish_one_cleanup(tp, rcur, error);
522 	set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
523 	error = xfs_trans_commit(tp);
524 	return error;
525 
526 abort_error:
527 	xfs_rmap_finish_one_cleanup(tp, rcur, error);
528 	xfs_trans_cancel(tp);
529 	return error;
530 }
531