1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_trans.h"
15 #include "xfs_buf_item.h"
16 #include "xfs_inode.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_trace.h"
19 #include "xfs_icache.h"
20 #include "xfs_log.h"
21 #include "xfs_rmap.h"
22 #include "xfs_refcount.h"
23 #include "xfs_bmap.h"
24 #include "xfs_alloc.h"
25 #include "xfs_buf.h"
26 #include "xfs_da_format.h"
27 #include "xfs_da_btree.h"
28 #include "xfs_attr.h"
29
30 static struct kmem_cache *xfs_defer_pending_cache;
31
32 /*
33 * Deferred Operations in XFS
34 *
35 * Due to the way locking rules work in XFS, certain transactions (block
36 * mapping and unmapping, typically) have permanent reservations so that
37 * we can roll the transaction to adhere to AG locking order rules and
38 * to unlock buffers between metadata updates. Prior to rmap/reflink,
39 * the mapping code had a mechanism to perform these deferrals for
40 * extents that were going to be freed; this code makes that facility
41 * more generic.
42 *
43 * When adding the reverse mapping and reflink features, it became
44 * necessary to perform complex remapping multi-transactions to comply
45 * with AG locking order rules, and to be able to spread a single
46 * refcount update operation (an operation on an n-block extent can
47 * update as many as n records!) among multiple transactions. XFS can
48 * roll a transaction to facilitate this, but using this facility
49 * requires us to log "intent" items in case log recovery needs to
50 * redo the operation, and to log "done" items to indicate that redo
51 * is not necessary.
52 *
53 * Deferred work is tracked in xfs_defer_pending items. Each pending
54 * item tracks one type of deferred work. Incoming work items (which
55 * have not yet had an intent logged) are attached to a pending item
56 * on the dop_intake list, where they wait for the caller to finish
57 * the deferred operations.
58 *
59 * Finishing a set of deferred operations is an involved process. To
60 * start, we define "rolling a deferred-op transaction" as follows:
61 *
62 * > For each xfs_defer_pending item on the dop_intake list,
63 * - Sort the work items in AG order. XFS locking
64 * order rules require us to lock buffers in AG order.
65 * - Create a log intent item for that type.
66 * - Attach it to the pending item.
67 * - Move the pending item from the dop_intake list to the
68 * dop_pending list.
69 * > Roll the transaction.
70 *
71 * NOTE: To avoid exceeding the transaction reservation, we limit the
72 * number of items that we attach to a given xfs_defer_pending.
73 *
74 * The actual finishing process looks like this:
75 *
76 * > For each xfs_defer_pending in the dop_pending list,
77 * - Roll the deferred-op transaction as above.
78 * - Create a log done item for that type, and attach it to the
79 * log intent item.
80 * - For each work item attached to the log intent item,
81 * * Perform the described action.
82 * * Attach the work item to the log done item.
83 * * If the result of doing the work was -EAGAIN, ->finish work
84 * wants a new transaction. See the "Requesting a Fresh
85 * Transaction while Finishing Deferred Work" section below for
86 * details.
87 *
88 * The key here is that we must log an intent item for all pending
89 * work items every time we roll the transaction, and that we must log
90 * a done item as soon as the work is completed. With this mechanism
91 * we can perform complex remapping operations, chaining intent items
92 * as needed.
93 *
94 * Requesting a Fresh Transaction while Finishing Deferred Work
95 *
96 * If ->finish_item decides that it needs a fresh transaction to
97 * finish the work, it must ask its caller (xfs_defer_finish) for a
98 * continuation. The most likely cause of this circumstance are the
99 * refcount adjust functions deciding that they've logged enough items
100 * to be at risk of exceeding the transaction reservation.
101 *
102 * To get a fresh transaction, we want to log the existing log done
103 * item to prevent the log intent item from replaying, immediately log
104 * a new log intent item with the unfinished work items, roll the
105 * transaction, and re-call ->finish_item wherever it left off. The
106 * log done item and the new log intent item must be in the same
107 * transaction or atomicity cannot be guaranteed; defer_finish ensures
108 * that this happens.
109 *
110 * This requires some coordination between ->finish_item and
111 * defer_finish. Upon deciding to request a new transaction,
112 * ->finish_item should update the current work item to reflect the
113 * unfinished work. Next, it should reset the log done item's list
114 * count to the number of items finished, and return -EAGAIN.
115 * defer_finish sees the -EAGAIN, logs the new log intent item
116 * with the remaining work items, and leaves the xfs_defer_pending
117 * item at the head of the dop_work queue. Then it rolls the
118 * transaction and picks up processing where it left off. It is
119 * required that ->finish_item must be careful to leave enough
120 * transaction reservation to fit the new log intent item.
121 *
122 * This is an example of remapping the extent (E, E+B) into file X at
123 * offset A and dealing with the extent (C, C+B) already being mapped
124 * there:
125 * +-------------------------------------------------+
126 * | Unmap file X startblock C offset A length B | t0
127 * | Intent to reduce refcount for extent (C, B) |
128 * | Intent to remove rmap (X, C, A, B) |
129 * | Intent to free extent (D, 1) (bmbt block) |
130 * | Intent to map (X, A, B) at startblock E |
131 * +-------------------------------------------------+
132 * | Map file X startblock E offset A length B | t1
133 * | Done mapping (X, E, A, B) |
134 * | Intent to increase refcount for extent (E, B) |
135 * | Intent to add rmap (X, E, A, B) |
136 * +-------------------------------------------------+
137 * | Reduce refcount for extent (C, B) | t2
138 * | Done reducing refcount for extent (C, 9) |
139 * | Intent to reduce refcount for extent (C+9, B-9) |
140 * | (ran out of space after 9 refcount updates) |
141 * +-------------------------------------------------+
142 * | Reduce refcount for extent (C+9, B+9) | t3
143 * | Done reducing refcount for extent (C+9, B-9) |
144 * | Increase refcount for extent (E, B) |
145 * | Done increasing refcount for extent (E, B) |
146 * | Intent to free extent (C, B) |
147 * | Intent to free extent (F, 1) (refcountbt block) |
148 * | Intent to remove rmap (F, 1, REFC) |
149 * +-------------------------------------------------+
150 * | Remove rmap (X, C, A, B) | t4
151 * | Done removing rmap (X, C, A, B) |
152 * | Add rmap (X, E, A, B) |
153 * | Done adding rmap (X, E, A, B) |
154 * | Remove rmap (F, 1, REFC) |
155 * | Done removing rmap (F, 1, REFC) |
156 * +-------------------------------------------------+
157 * | Free extent (C, B) | t5
158 * | Done freeing extent (C, B) |
159 * | Free extent (D, 1) |
160 * | Done freeing extent (D, 1) |
161 * | Free extent (F, 1) |
162 * | Done freeing extent (F, 1) |
163 * +-------------------------------------------------+
164 *
165 * If we should crash before t2 commits, log recovery replays
166 * the following intent items:
167 *
168 * - Intent to reduce refcount for extent (C, B)
169 * - Intent to remove rmap (X, C, A, B)
170 * - Intent to free extent (D, 1) (bmbt block)
171 * - Intent to increase refcount for extent (E, B)
172 * - Intent to add rmap (X, E, A, B)
173 *
174 * In the process of recovering, it should also generate and take care
175 * of these intent items:
176 *
177 * - Intent to free extent (C, B)
178 * - Intent to free extent (F, 1) (refcountbt block)
179 * - Intent to remove rmap (F, 1, REFC)
180 *
181 * Note that the continuation requested between t2 and t3 is likely to
182 * reoccur.
183 */
184
185 static const struct xfs_defer_op_type *defer_op_types[] = {
186 [XFS_DEFER_OPS_TYPE_BMAP] = &xfs_bmap_update_defer_type,
187 [XFS_DEFER_OPS_TYPE_REFCOUNT] = &xfs_refcount_update_defer_type,
188 [XFS_DEFER_OPS_TYPE_RMAP] = &xfs_rmap_update_defer_type,
189 [XFS_DEFER_OPS_TYPE_FREE] = &xfs_extent_free_defer_type,
190 [XFS_DEFER_OPS_TYPE_AGFL_FREE] = &xfs_agfl_free_defer_type,
191 [XFS_DEFER_OPS_TYPE_ATTR] = &xfs_attr_defer_type,
192 };
193
194 /*
195 * Ensure there's a log intent item associated with this deferred work item if
196 * the operation must be restarted on crash. Returns 1 if there's a log item;
197 * 0 if there isn't; or a negative errno.
198 */
199 static int
xfs_defer_create_intent(struct xfs_trans * tp,struct xfs_defer_pending * dfp,bool sort)200 xfs_defer_create_intent(
201 struct xfs_trans *tp,
202 struct xfs_defer_pending *dfp,
203 bool sort)
204 {
205 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
206 struct xfs_log_item *lip;
207
208 if (dfp->dfp_intent)
209 return 1;
210
211 lip = ops->create_intent(tp, &dfp->dfp_work, dfp->dfp_count, sort);
212 if (!lip)
213 return 0;
214 if (IS_ERR(lip))
215 return PTR_ERR(lip);
216
217 dfp->dfp_intent = lip;
218 return 1;
219 }
220
221 /*
222 * For each pending item in the intake list, log its intent item and the
223 * associated extents, then add the entire intake list to the end of
224 * the pending list.
225 *
226 * Returns 1 if at least one log item was associated with the deferred work;
227 * 0 if there are no log items; or a negative errno.
228 */
229 static int
xfs_defer_create_intents(struct xfs_trans * tp)230 xfs_defer_create_intents(
231 struct xfs_trans *tp)
232 {
233 struct xfs_defer_pending *dfp;
234 int ret = 0;
235
236 list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
237 int ret2;
238
239 trace_xfs_defer_create_intent(tp->t_mountp, dfp);
240 ret2 = xfs_defer_create_intent(tp, dfp, true);
241 if (ret2 < 0)
242 return ret2;
243 ret |= ret2;
244 }
245 return ret;
246 }
247
248 static inline void
xfs_defer_pending_abort(struct xfs_mount * mp,struct xfs_defer_pending * dfp)249 xfs_defer_pending_abort(
250 struct xfs_mount *mp,
251 struct xfs_defer_pending *dfp)
252 {
253 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
254
255 trace_xfs_defer_pending_abort(mp, dfp);
256
257 if (dfp->dfp_intent && !dfp->dfp_done) {
258 ops->abort_intent(dfp->dfp_intent);
259 dfp->dfp_intent = NULL;
260 }
261 }
262
263 static inline void
xfs_defer_pending_cancel_work(struct xfs_mount * mp,struct xfs_defer_pending * dfp)264 xfs_defer_pending_cancel_work(
265 struct xfs_mount *mp,
266 struct xfs_defer_pending *dfp)
267 {
268 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
269 struct list_head *pwi;
270 struct list_head *n;
271
272 trace_xfs_defer_cancel_list(mp, dfp);
273
274 list_del(&dfp->dfp_list);
275 list_for_each_safe(pwi, n, &dfp->dfp_work) {
276 list_del(pwi);
277 dfp->dfp_count--;
278 trace_xfs_defer_cancel_item(mp, dfp, pwi);
279 ops->cancel_item(pwi);
280 }
281 ASSERT(dfp->dfp_count == 0);
282 kmem_cache_free(xfs_defer_pending_cache, dfp);
283 }
284
285 STATIC void
xfs_defer_pending_abort_list(struct xfs_mount * mp,struct list_head * dop_list)286 xfs_defer_pending_abort_list(
287 struct xfs_mount *mp,
288 struct list_head *dop_list)
289 {
290 struct xfs_defer_pending *dfp;
291
292 /* Abort intent items that don't have a done item. */
293 list_for_each_entry(dfp, dop_list, dfp_list)
294 xfs_defer_pending_abort(mp, dfp);
295 }
296
297 /* Abort all the intents that were committed. */
298 STATIC void
xfs_defer_trans_abort(struct xfs_trans * tp,struct list_head * dop_pending)299 xfs_defer_trans_abort(
300 struct xfs_trans *tp,
301 struct list_head *dop_pending)
302 {
303 trace_xfs_defer_trans_abort(tp, _RET_IP_);
304 xfs_defer_pending_abort_list(tp->t_mountp, dop_pending);
305 }
306
307 /*
308 * Capture resources that the caller said not to release ("held") when the
309 * transaction commits. Caller is responsible for zero-initializing @dres.
310 */
311 static int
xfs_defer_save_resources(struct xfs_defer_resources * dres,struct xfs_trans * tp)312 xfs_defer_save_resources(
313 struct xfs_defer_resources *dres,
314 struct xfs_trans *tp)
315 {
316 struct xfs_buf_log_item *bli;
317 struct xfs_inode_log_item *ili;
318 struct xfs_log_item *lip;
319
320 BUILD_BUG_ON(NBBY * sizeof(dres->dr_ordered) < XFS_DEFER_OPS_NR_BUFS);
321
322 list_for_each_entry(lip, &tp->t_items, li_trans) {
323 switch (lip->li_type) {
324 case XFS_LI_BUF:
325 bli = container_of(lip, struct xfs_buf_log_item,
326 bli_item);
327 if (bli->bli_flags & XFS_BLI_HOLD) {
328 if (dres->dr_bufs >= XFS_DEFER_OPS_NR_BUFS) {
329 ASSERT(0);
330 return -EFSCORRUPTED;
331 }
332 if (bli->bli_flags & XFS_BLI_ORDERED)
333 dres->dr_ordered |=
334 (1U << dres->dr_bufs);
335 else
336 xfs_trans_dirty_buf(tp, bli->bli_buf);
337 dres->dr_bp[dres->dr_bufs++] = bli->bli_buf;
338 }
339 break;
340 case XFS_LI_INODE:
341 ili = container_of(lip, struct xfs_inode_log_item,
342 ili_item);
343 if (ili->ili_lock_flags == 0) {
344 if (dres->dr_inos >= XFS_DEFER_OPS_NR_INODES) {
345 ASSERT(0);
346 return -EFSCORRUPTED;
347 }
348 xfs_trans_log_inode(tp, ili->ili_inode,
349 XFS_ILOG_CORE);
350 dres->dr_ip[dres->dr_inos++] = ili->ili_inode;
351 }
352 break;
353 default:
354 break;
355 }
356 }
357
358 return 0;
359 }
360
361 /* Attach the held resources to the transaction. */
362 static void
xfs_defer_restore_resources(struct xfs_trans * tp,struct xfs_defer_resources * dres)363 xfs_defer_restore_resources(
364 struct xfs_trans *tp,
365 struct xfs_defer_resources *dres)
366 {
367 unsigned short i;
368
369 /* Rejoin the joined inodes. */
370 for (i = 0; i < dres->dr_inos; i++)
371 xfs_trans_ijoin(tp, dres->dr_ip[i], 0);
372
373 /* Rejoin the buffers and dirty them so the log moves forward. */
374 for (i = 0; i < dres->dr_bufs; i++) {
375 xfs_trans_bjoin(tp, dres->dr_bp[i]);
376 if (dres->dr_ordered & (1U << i))
377 xfs_trans_ordered_buf(tp, dres->dr_bp[i]);
378 xfs_trans_bhold(tp, dres->dr_bp[i]);
379 }
380 }
381
382 /* Roll a transaction so we can do some deferred op processing. */
383 STATIC int
xfs_defer_trans_roll(struct xfs_trans ** tpp)384 xfs_defer_trans_roll(
385 struct xfs_trans **tpp)
386 {
387 struct xfs_defer_resources dres = { };
388 int error;
389
390 error = xfs_defer_save_resources(&dres, *tpp);
391 if (error)
392 return error;
393
394 trace_xfs_defer_trans_roll(*tpp, _RET_IP_);
395
396 /*
397 * Roll the transaction. Rolling always given a new transaction (even
398 * if committing the old one fails!) to hand back to the caller, so we
399 * join the held resources to the new transaction so that we always
400 * return with the held resources joined to @tpp, no matter what
401 * happened.
402 */
403 error = xfs_trans_roll(tpp);
404
405 xfs_defer_restore_resources(*tpp, &dres);
406
407 if (error)
408 trace_xfs_defer_trans_roll_error(*tpp, error);
409 return error;
410 }
411
412 /*
413 * Free up any items left in the list.
414 */
415 static void
xfs_defer_cancel_list(struct xfs_mount * mp,struct list_head * dop_list)416 xfs_defer_cancel_list(
417 struct xfs_mount *mp,
418 struct list_head *dop_list)
419 {
420 struct xfs_defer_pending *dfp;
421 struct xfs_defer_pending *pli;
422
423 /*
424 * Free the pending items. Caller should already have arranged
425 * for the intent items to be released.
426 */
427 list_for_each_entry_safe(dfp, pli, dop_list, dfp_list)
428 xfs_defer_pending_cancel_work(mp, dfp);
429 }
430
431 /*
432 * Prevent a log intent item from pinning the tail of the log by logging a
433 * done item to release the intent item; and then log a new intent item.
434 * The caller should provide a fresh transaction and roll it after we're done.
435 */
436 static int
xfs_defer_relog(struct xfs_trans ** tpp,struct list_head * dfops)437 xfs_defer_relog(
438 struct xfs_trans **tpp,
439 struct list_head *dfops)
440 {
441 struct xlog *log = (*tpp)->t_mountp->m_log;
442 struct xfs_defer_pending *dfp;
443 xfs_lsn_t threshold_lsn = NULLCOMMITLSN;
444
445
446 ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
447
448 list_for_each_entry(dfp, dfops, dfp_list) {
449 /*
450 * If the log intent item for this deferred op is not a part of
451 * the current log checkpoint, relog the intent item to keep
452 * the log tail moving forward. We're ok with this being racy
453 * because an incorrect decision means we'll be a little slower
454 * at pushing the tail.
455 */
456 if (dfp->dfp_intent == NULL ||
457 xfs_log_item_in_current_chkpt(dfp->dfp_intent))
458 continue;
459
460 /*
461 * Figure out where we need the tail to be in order to maintain
462 * the minimum required free space in the log. Only sample
463 * the log threshold once per call.
464 */
465 if (threshold_lsn == NULLCOMMITLSN) {
466 threshold_lsn = xlog_grant_push_threshold(log, 0);
467 if (threshold_lsn == NULLCOMMITLSN)
468 break;
469 }
470 if (XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) >= 0)
471 continue;
472
473 trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp);
474 XFS_STATS_INC((*tpp)->t_mountp, defer_relog);
475 dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp);
476 }
477
478 if ((*tpp)->t_flags & XFS_TRANS_DIRTY)
479 return xfs_defer_trans_roll(tpp);
480 return 0;
481 }
482
483 /*
484 * Log an intent-done item for the first pending intent, and finish the work
485 * items.
486 */
487 static int
xfs_defer_finish_one(struct xfs_trans * tp,struct xfs_defer_pending * dfp)488 xfs_defer_finish_one(
489 struct xfs_trans *tp,
490 struct xfs_defer_pending *dfp)
491 {
492 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
493 struct xfs_btree_cur *state = NULL;
494 struct list_head *li, *n;
495 int error;
496
497 trace_xfs_defer_pending_finish(tp->t_mountp, dfp);
498
499 dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
500 list_for_each_safe(li, n, &dfp->dfp_work) {
501 list_del(li);
502 dfp->dfp_count--;
503 trace_xfs_defer_finish_item(tp->t_mountp, dfp, li);
504 error = ops->finish_item(tp, dfp->dfp_done, li, &state);
505 if (error == -EAGAIN) {
506 int ret;
507
508 /*
509 * Caller wants a fresh transaction; put the work item
510 * back on the list and log a new log intent item to
511 * replace the old one. See "Requesting a Fresh
512 * Transaction while Finishing Deferred Work" above.
513 */
514 list_add(li, &dfp->dfp_work);
515 dfp->dfp_count++;
516 dfp->dfp_done = NULL;
517 dfp->dfp_intent = NULL;
518 ret = xfs_defer_create_intent(tp, dfp, false);
519 if (ret < 0)
520 error = ret;
521 }
522
523 if (error)
524 goto out;
525 }
526
527 /* Done with the dfp, free it. */
528 list_del(&dfp->dfp_list);
529 kmem_cache_free(xfs_defer_pending_cache, dfp);
530 out:
531 if (ops->finish_cleanup)
532 ops->finish_cleanup(tp, state, error);
533 return error;
534 }
535
536 /*
537 * Finish all the pending work. This involves logging intent items for
538 * any work items that wandered in since the last transaction roll (if
539 * one has even happened), rolling the transaction, and finishing the
540 * work items in the first item on the logged-and-pending list.
541 *
542 * If an inode is provided, relog it to the new transaction.
543 */
544 int
xfs_defer_finish_noroll(struct xfs_trans ** tp)545 xfs_defer_finish_noroll(
546 struct xfs_trans **tp)
547 {
548 struct xfs_defer_pending *dfp = NULL;
549 int error = 0;
550 LIST_HEAD(dop_pending);
551
552 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
553
554 trace_xfs_defer_finish(*tp, _RET_IP_);
555
556 /* Until we run out of pending work to finish... */
557 while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
558 /*
559 * Deferred items that are created in the process of finishing
560 * other deferred work items should be queued at the head of
561 * the pending list, which puts them ahead of the deferred work
562 * that was created by the caller. This keeps the number of
563 * pending work items to a minimum, which decreases the amount
564 * of time that any one intent item can stick around in memory,
565 * pinning the log tail.
566 */
567 int has_intents = xfs_defer_create_intents(*tp);
568
569 list_splice_init(&(*tp)->t_dfops, &dop_pending);
570
571 if (has_intents < 0) {
572 error = has_intents;
573 goto out_shutdown;
574 }
575 if (has_intents || dfp) {
576 error = xfs_defer_trans_roll(tp);
577 if (error)
578 goto out_shutdown;
579
580 /* Relog intent items to keep the log moving. */
581 error = xfs_defer_relog(tp, &dop_pending);
582 if (error)
583 goto out_shutdown;
584 }
585
586 dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
587 dfp_list);
588 error = xfs_defer_finish_one(*tp, dfp);
589 if (error && error != -EAGAIN)
590 goto out_shutdown;
591 }
592
593 trace_xfs_defer_finish_done(*tp, _RET_IP_);
594 return 0;
595
596 out_shutdown:
597 xfs_defer_trans_abort(*tp, &dop_pending);
598 xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
599 trace_xfs_defer_finish_error(*tp, error);
600 xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
601 xfs_defer_cancel(*tp);
602 return error;
603 }
604
605 int
xfs_defer_finish(struct xfs_trans ** tp)606 xfs_defer_finish(
607 struct xfs_trans **tp)
608 {
609 int error;
610
611 /*
612 * Finish and roll the transaction once more to avoid returning to the
613 * caller with a dirty transaction.
614 */
615 error = xfs_defer_finish_noroll(tp);
616 if (error)
617 return error;
618 if ((*tp)->t_flags & XFS_TRANS_DIRTY) {
619 error = xfs_defer_trans_roll(tp);
620 if (error) {
621 xfs_force_shutdown((*tp)->t_mountp,
622 SHUTDOWN_CORRUPT_INCORE);
623 return error;
624 }
625 }
626
627 /* Reset LOWMODE now that we've finished all the dfops. */
628 ASSERT(list_empty(&(*tp)->t_dfops));
629 (*tp)->t_flags &= ~XFS_TRANS_LOWMODE;
630 return 0;
631 }
632
633 void
xfs_defer_cancel(struct xfs_trans * tp)634 xfs_defer_cancel(
635 struct xfs_trans *tp)
636 {
637 struct xfs_mount *mp = tp->t_mountp;
638
639 trace_xfs_defer_cancel(tp, _RET_IP_);
640 xfs_defer_cancel_list(mp, &tp->t_dfops);
641 }
642
643 /* Add an item for later deferred processing. */
644 void
xfs_defer_add(struct xfs_trans * tp,enum xfs_defer_ops_type type,struct list_head * li)645 xfs_defer_add(
646 struct xfs_trans *tp,
647 enum xfs_defer_ops_type type,
648 struct list_head *li)
649 {
650 struct xfs_defer_pending *dfp = NULL;
651 const struct xfs_defer_op_type *ops = defer_op_types[type];
652
653 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
654 BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX);
655
656 /*
657 * Add the item to a pending item at the end of the intake list.
658 * If the last pending item has the same type, reuse it. Else,
659 * create a new pending item at the end of the intake list.
660 */
661 if (!list_empty(&tp->t_dfops)) {
662 dfp = list_last_entry(&tp->t_dfops,
663 struct xfs_defer_pending, dfp_list);
664 if (dfp->dfp_type != type ||
665 (ops->max_items && dfp->dfp_count >= ops->max_items))
666 dfp = NULL;
667 }
668 if (!dfp) {
669 dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
670 GFP_NOFS | __GFP_NOFAIL);
671 dfp->dfp_type = type;
672 dfp->dfp_intent = NULL;
673 dfp->dfp_done = NULL;
674 dfp->dfp_count = 0;
675 INIT_LIST_HEAD(&dfp->dfp_work);
676 list_add_tail(&dfp->dfp_list, &tp->t_dfops);
677 }
678
679 list_add_tail(li, &dfp->dfp_work);
680 trace_xfs_defer_add_item(tp->t_mountp, dfp, li);
681 dfp->dfp_count++;
682 }
683
684 /*
685 * Create a pending deferred work item to replay the recovered intent item
686 * and add it to the list.
687 */
688 void
xfs_defer_start_recovery(struct xfs_log_item * lip,enum xfs_defer_ops_type dfp_type,struct list_head * r_dfops)689 xfs_defer_start_recovery(
690 struct xfs_log_item *lip,
691 enum xfs_defer_ops_type dfp_type,
692 struct list_head *r_dfops)
693 {
694 struct xfs_defer_pending *dfp;
695
696 dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
697 GFP_NOFS | __GFP_NOFAIL);
698 dfp->dfp_type = dfp_type;
699 dfp->dfp_intent = lip;
700 INIT_LIST_HEAD(&dfp->dfp_work);
701 list_add_tail(&dfp->dfp_list, r_dfops);
702 }
703
704 /*
705 * Cancel a deferred work item created to recover a log intent item. @dfp
706 * will be freed after this function returns.
707 */
708 void
xfs_defer_cancel_recovery(struct xfs_mount * mp,struct xfs_defer_pending * dfp)709 xfs_defer_cancel_recovery(
710 struct xfs_mount *mp,
711 struct xfs_defer_pending *dfp)
712 {
713 xfs_defer_pending_abort(mp, dfp);
714 xfs_defer_pending_cancel_work(mp, dfp);
715 }
716
717 /*
718 * Move deferred ops from one transaction to another and reset the source to
719 * initial state. This is primarily used to carry state forward across
720 * transaction rolls with pending dfops.
721 */
722 void
xfs_defer_move(struct xfs_trans * dtp,struct xfs_trans * stp)723 xfs_defer_move(
724 struct xfs_trans *dtp,
725 struct xfs_trans *stp)
726 {
727 list_splice_init(&stp->t_dfops, &dtp->t_dfops);
728
729 /*
730 * Low free space mode was historically controlled by a dfops field.
731 * This meant that low mode state potentially carried across multiple
732 * transaction rolls. Transfer low mode on a dfops move to preserve
733 * that behavior.
734 */
735 dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE);
736 stp->t_flags &= ~XFS_TRANS_LOWMODE;
737 }
738
739 /*
740 * Prepare a chain of fresh deferred ops work items to be completed later. Log
741 * recovery requires the ability to put off until later the actual finishing
742 * work so that it can process unfinished items recovered from the log in
743 * correct order.
744 *
745 * Create and log intent items for all the work that we're capturing so that we
746 * can be assured that the items will get replayed if the system goes down
747 * before log recovery gets a chance to finish the work it put off. The entire
748 * deferred ops state is transferred to the capture structure and the
749 * transaction is then ready for the caller to commit it. If there are no
750 * intent items to capture, this function returns NULL.
751 *
752 * If capture_ip is not NULL, the capture structure will obtain an extra
753 * reference to the inode.
754 */
755 static struct xfs_defer_capture *
xfs_defer_ops_capture(struct xfs_trans * tp)756 xfs_defer_ops_capture(
757 struct xfs_trans *tp)
758 {
759 struct xfs_defer_capture *dfc;
760 unsigned short i;
761 int error;
762
763 if (list_empty(&tp->t_dfops))
764 return NULL;
765
766 error = xfs_defer_create_intents(tp);
767 if (error < 0)
768 return ERR_PTR(error);
769
770 /* Create an object to capture the defer ops. */
771 dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
772 INIT_LIST_HEAD(&dfc->dfc_list);
773 INIT_LIST_HEAD(&dfc->dfc_dfops);
774
775 /* Move the dfops chain and transaction state to the capture struct. */
776 list_splice_init(&tp->t_dfops, &dfc->dfc_dfops);
777 dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE;
778 tp->t_flags &= ~XFS_TRANS_LOWMODE;
779
780 /* Capture the remaining block reservations along with the dfops. */
781 dfc->dfc_blkres = tp->t_blk_res - tp->t_blk_res_used;
782 dfc->dfc_rtxres = tp->t_rtx_res - tp->t_rtx_res_used;
783
784 /* Preserve the log reservation size. */
785 dfc->dfc_logres = tp->t_log_res;
786
787 error = xfs_defer_save_resources(&dfc->dfc_held, tp);
788 if (error) {
789 /*
790 * Resource capture should never fail, but if it does, we
791 * still have to shut down the log and release things
792 * properly.
793 */
794 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_CORRUPT_INCORE);
795 }
796
797 /*
798 * Grab extra references to the inodes and buffers because callers are
799 * expected to release their held references after we commit the
800 * transaction.
801 */
802 for (i = 0; i < dfc->dfc_held.dr_inos; i++) {
803 ASSERT(xfs_isilocked(dfc->dfc_held.dr_ip[i], XFS_ILOCK_EXCL));
804 ihold(VFS_I(dfc->dfc_held.dr_ip[i]));
805 }
806
807 for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
808 xfs_buf_hold(dfc->dfc_held.dr_bp[i]);
809
810 return dfc;
811 }
812
813 /* Release all resources that we used to capture deferred ops. */
814 void
xfs_defer_ops_capture_abort(struct xfs_mount * mp,struct xfs_defer_capture * dfc)815 xfs_defer_ops_capture_abort(
816 struct xfs_mount *mp,
817 struct xfs_defer_capture *dfc)
818 {
819 unsigned short i;
820
821 xfs_defer_pending_abort_list(mp, &dfc->dfc_dfops);
822 xfs_defer_cancel_list(mp, &dfc->dfc_dfops);
823
824 for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
825 xfs_buf_relse(dfc->dfc_held.dr_bp[i]);
826
827 for (i = 0; i < dfc->dfc_held.dr_inos; i++)
828 xfs_irele(dfc->dfc_held.dr_ip[i]);
829
830 kmem_free(dfc);
831 }
832
833 /*
834 * Capture any deferred ops and commit the transaction. This is the last step
835 * needed to finish a log intent item that we recovered from the log. If any
836 * of the deferred ops operate on an inode, the caller must pass in that inode
837 * so that the reference can be transferred to the capture structure. The
838 * caller must hold ILOCK_EXCL on the inode, and must unlock it before calling
839 * xfs_defer_ops_continue.
840 */
841 int
xfs_defer_ops_capture_and_commit(struct xfs_trans * tp,struct list_head * capture_list)842 xfs_defer_ops_capture_and_commit(
843 struct xfs_trans *tp,
844 struct list_head *capture_list)
845 {
846 struct xfs_mount *mp = tp->t_mountp;
847 struct xfs_defer_capture *dfc;
848 int error;
849
850 /* If we don't capture anything, commit transaction and exit. */
851 dfc = xfs_defer_ops_capture(tp);
852 if (IS_ERR(dfc)) {
853 xfs_trans_cancel(tp);
854 return PTR_ERR(dfc);
855 }
856 if (!dfc)
857 return xfs_trans_commit(tp);
858
859 /* Commit the transaction and add the capture structure to the list. */
860 error = xfs_trans_commit(tp);
861 if (error) {
862 xfs_defer_ops_capture_abort(mp, dfc);
863 return error;
864 }
865
866 list_add_tail(&dfc->dfc_list, capture_list);
867 return 0;
868 }
869
870 /*
871 * Attach a chain of captured deferred ops to a new transaction and free the
872 * capture structure. If an inode was captured, it will be passed back to the
873 * caller with ILOCK_EXCL held and joined to the transaction with lockflags==0.
874 * The caller now owns the inode reference.
875 */
876 void
xfs_defer_ops_continue(struct xfs_defer_capture * dfc,struct xfs_trans * tp,struct xfs_defer_resources * dres)877 xfs_defer_ops_continue(
878 struct xfs_defer_capture *dfc,
879 struct xfs_trans *tp,
880 struct xfs_defer_resources *dres)
881 {
882 unsigned int i;
883
884 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
885 ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY));
886
887 /* Lock the captured resources to the new transaction. */
888 if (dfc->dfc_held.dr_inos == 2)
889 xfs_lock_two_inodes(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL,
890 dfc->dfc_held.dr_ip[1], XFS_ILOCK_EXCL);
891 else if (dfc->dfc_held.dr_inos == 1)
892 xfs_ilock(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL);
893
894 for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
895 xfs_buf_lock(dfc->dfc_held.dr_bp[i]);
896
897 /* Join the captured resources to the new transaction. */
898 xfs_defer_restore_resources(tp, &dfc->dfc_held);
899 memcpy(dres, &dfc->dfc_held, sizeof(struct xfs_defer_resources));
900 dres->dr_bufs = 0;
901
902 /* Move captured dfops chain and state to the transaction. */
903 list_splice_init(&dfc->dfc_dfops, &tp->t_dfops);
904 tp->t_flags |= dfc->dfc_tpflags;
905
906 kmem_free(dfc);
907 }
908
909 /* Release the resources captured and continued during recovery. */
910 void
xfs_defer_resources_rele(struct xfs_defer_resources * dres)911 xfs_defer_resources_rele(
912 struct xfs_defer_resources *dres)
913 {
914 unsigned short i;
915
916 for (i = 0; i < dres->dr_inos; i++) {
917 xfs_iunlock(dres->dr_ip[i], XFS_ILOCK_EXCL);
918 xfs_irele(dres->dr_ip[i]);
919 dres->dr_ip[i] = NULL;
920 }
921
922 for (i = 0; i < dres->dr_bufs; i++) {
923 xfs_buf_relse(dres->dr_bp[i]);
924 dres->dr_bp[i] = NULL;
925 }
926
927 dres->dr_inos = 0;
928 dres->dr_bufs = 0;
929 dres->dr_ordered = 0;
930 }
931
932 static inline int __init
xfs_defer_init_cache(void)933 xfs_defer_init_cache(void)
934 {
935 xfs_defer_pending_cache = kmem_cache_create("xfs_defer_pending",
936 sizeof(struct xfs_defer_pending),
937 0, 0, NULL);
938
939 return xfs_defer_pending_cache != NULL ? 0 : -ENOMEM;
940 }
941
942 static inline void
xfs_defer_destroy_cache(void)943 xfs_defer_destroy_cache(void)
944 {
945 kmem_cache_destroy(xfs_defer_pending_cache);
946 xfs_defer_pending_cache = NULL;
947 }
948
949 /* Set up caches for deferred work items. */
950 int __init
xfs_defer_init_item_caches(void)951 xfs_defer_init_item_caches(void)
952 {
953 int error;
954
955 error = xfs_defer_init_cache();
956 if (error)
957 return error;
958 error = xfs_rmap_intent_init_cache();
959 if (error)
960 goto err;
961 error = xfs_refcount_intent_init_cache();
962 if (error)
963 goto err;
964 error = xfs_bmap_intent_init_cache();
965 if (error)
966 goto err;
967 error = xfs_extfree_intent_init_cache();
968 if (error)
969 goto err;
970 error = xfs_attr_intent_init_cache();
971 if (error)
972 goto err;
973 return 0;
974 err:
975 xfs_defer_destroy_item_caches();
976 return error;
977 }
978
979 /* Destroy all the deferred work item caches, if they've been allocated. */
980 void
xfs_defer_destroy_item_caches(void)981 xfs_defer_destroy_item_caches(void)
982 {
983 xfs_attr_intent_destroy_cache();
984 xfs_extfree_intent_destroy_cache();
985 xfs_bmap_intent_destroy_cache();
986 xfs_refcount_intent_destroy_cache();
987 xfs_rmap_intent_destroy_cache();
988 xfs_defer_destroy_cache();
989 }
990