1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_shared.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_defer.h" 15 #include "xfs_inode.h" 16 #include "xfs_bmap.h" 17 #include "xfs_quota.h" 18 #include "xfs_trans.h" 19 #include "xfs_buf_item.h" 20 #include "xfs_trans_space.h" 21 #include "xfs_trans_priv.h" 22 #include "xfs_qm.h" 23 #include "xfs_trace.h" 24 #include "xfs_log.h" 25 #include "xfs_bmap_btree.h" 26 #include "xfs_error.h" 27 28 /* 29 * Lock order: 30 * 31 * ip->i_lock 32 * qi->qi_tree_lock 33 * dquot->q_qlock (xfs_dqlock() and friends) 34 * dquot->q_flush (xfs_dqflock() and friends) 35 * qi->qi_lru_lock 36 * 37 * If two dquots need to be locked the order is user before group/project, 38 * otherwise by the lowest id first, see xfs_dqlock2. 39 */ 40 41 struct kmem_zone *xfs_qm_dqtrxzone; 42 static struct kmem_zone *xfs_qm_dqzone; 43 44 static struct lock_class_key xfs_dquot_group_class; 45 static struct lock_class_key xfs_dquot_project_class; 46 47 /* 48 * This is called to free all the memory associated with a dquot 49 */ 50 void 51 xfs_qm_dqdestroy( 52 struct xfs_dquot *dqp) 53 { 54 ASSERT(list_empty(&dqp->q_lru)); 55 56 kmem_free(dqp->q_logitem.qli_item.li_lv_shadow); 57 mutex_destroy(&dqp->q_qlock); 58 59 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot); 60 kmem_cache_free(xfs_qm_dqzone, dqp); 61 } 62 63 /* 64 * If default limits are in force, push them into the dquot now. 65 * We overwrite the dquot limits only if they are zero and this 66 * is not the root dquot. 67 */ 68 void 69 xfs_qm_adjust_dqlimits( 70 struct xfs_mount *mp, 71 struct xfs_dquot *dq) 72 { 73 struct xfs_quotainfo *q = mp->m_quotainfo; 74 struct xfs_disk_dquot *d = &dq->q_core; 75 struct xfs_def_quota *defq; 76 int prealloc = 0; 77 78 ASSERT(d->d_id); 79 defq = xfs_get_defquota(q, xfs_dquot_type(dq)); 80 81 if (defq->bsoftlimit && !d->d_blk_softlimit) { 82 d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit); 83 prealloc = 1; 84 } 85 if (defq->bhardlimit && !d->d_blk_hardlimit) { 86 d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit); 87 prealloc = 1; 88 } 89 if (defq->isoftlimit && !d->d_ino_softlimit) 90 d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit); 91 if (defq->ihardlimit && !d->d_ino_hardlimit) 92 d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit); 93 if (defq->rtbsoftlimit && !d->d_rtb_softlimit) 94 d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit); 95 if (defq->rtbhardlimit && !d->d_rtb_hardlimit) 96 d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit); 97 98 if (prealloc) 99 xfs_dquot_set_prealloc_limits(dq); 100 } 101 102 /* 103 * Check the limits and timers of a dquot and start or reset timers 104 * if necessary. 105 * This gets called even when quota enforcement is OFF, which makes our 106 * life a little less complicated. (We just don't reject any quota 107 * reservations in that case, when enforcement is off). 108 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when 109 * enforcement's off. 110 * In contrast, warnings are a little different in that they don't 111 * 'automatically' get started when limits get exceeded. They do 112 * get reset to zero, however, when we find the count to be under 113 * the soft limit (they are only ever set non-zero via userspace). 114 */ 115 void 116 xfs_qm_adjust_dqtimers( 117 struct xfs_mount *mp, 118 struct xfs_dquot *dq) 119 { 120 struct xfs_quotainfo *qi = mp->m_quotainfo; 121 struct xfs_disk_dquot *d = &dq->q_core; 122 struct xfs_def_quota *defq; 123 124 ASSERT(d->d_id); 125 defq = xfs_get_defquota(qi, xfs_dquot_type(dq)); 126 127 #ifdef DEBUG 128 if (d->d_blk_hardlimit) 129 ASSERT(be64_to_cpu(d->d_blk_softlimit) <= 130 be64_to_cpu(d->d_blk_hardlimit)); 131 if (d->d_ino_hardlimit) 132 ASSERT(be64_to_cpu(d->d_ino_softlimit) <= 133 be64_to_cpu(d->d_ino_hardlimit)); 134 if (d->d_rtb_hardlimit) 135 ASSERT(be64_to_cpu(d->d_rtb_softlimit) <= 136 be64_to_cpu(d->d_rtb_hardlimit)); 137 #endif 138 139 if (!d->d_btimer) { 140 if ((d->d_blk_softlimit && 141 (be64_to_cpu(d->d_bcount) > 142 be64_to_cpu(d->d_blk_softlimit))) || 143 (d->d_blk_hardlimit && 144 (be64_to_cpu(d->d_bcount) > 145 be64_to_cpu(d->d_blk_hardlimit)))) { 146 d->d_btimer = cpu_to_be32(ktime_get_real_seconds() + 147 defq->btimelimit); 148 } else { 149 d->d_bwarns = 0; 150 } 151 } else { 152 if ((!d->d_blk_softlimit || 153 (be64_to_cpu(d->d_bcount) <= 154 be64_to_cpu(d->d_blk_softlimit))) && 155 (!d->d_blk_hardlimit || 156 (be64_to_cpu(d->d_bcount) <= 157 be64_to_cpu(d->d_blk_hardlimit)))) { 158 d->d_btimer = 0; 159 } 160 } 161 162 if (!d->d_itimer) { 163 if ((d->d_ino_softlimit && 164 (be64_to_cpu(d->d_icount) > 165 be64_to_cpu(d->d_ino_softlimit))) || 166 (d->d_ino_hardlimit && 167 (be64_to_cpu(d->d_icount) > 168 be64_to_cpu(d->d_ino_hardlimit)))) { 169 d->d_itimer = cpu_to_be32(ktime_get_real_seconds() + 170 defq->itimelimit); 171 } else { 172 d->d_iwarns = 0; 173 } 174 } else { 175 if ((!d->d_ino_softlimit || 176 (be64_to_cpu(d->d_icount) <= 177 be64_to_cpu(d->d_ino_softlimit))) && 178 (!d->d_ino_hardlimit || 179 (be64_to_cpu(d->d_icount) <= 180 be64_to_cpu(d->d_ino_hardlimit)))) { 181 d->d_itimer = 0; 182 } 183 } 184 185 if (!d->d_rtbtimer) { 186 if ((d->d_rtb_softlimit && 187 (be64_to_cpu(d->d_rtbcount) > 188 be64_to_cpu(d->d_rtb_softlimit))) || 189 (d->d_rtb_hardlimit && 190 (be64_to_cpu(d->d_rtbcount) > 191 be64_to_cpu(d->d_rtb_hardlimit)))) { 192 d->d_rtbtimer = cpu_to_be32(ktime_get_real_seconds() + 193 defq->rtbtimelimit); 194 } else { 195 d->d_rtbwarns = 0; 196 } 197 } else { 198 if ((!d->d_rtb_softlimit || 199 (be64_to_cpu(d->d_rtbcount) <= 200 be64_to_cpu(d->d_rtb_softlimit))) && 201 (!d->d_rtb_hardlimit || 202 (be64_to_cpu(d->d_rtbcount) <= 203 be64_to_cpu(d->d_rtb_hardlimit)))) { 204 d->d_rtbtimer = 0; 205 } 206 } 207 } 208 209 /* 210 * initialize a buffer full of dquots and log the whole thing 211 */ 212 STATIC void 213 xfs_qm_init_dquot_blk( 214 struct xfs_trans *tp, 215 struct xfs_mount *mp, 216 xfs_dqid_t id, 217 uint type, 218 struct xfs_buf *bp) 219 { 220 struct xfs_quotainfo *q = mp->m_quotainfo; 221 struct xfs_dqblk *d; 222 xfs_dqid_t curid; 223 unsigned int qflag; 224 unsigned int blftype; 225 int i; 226 227 ASSERT(tp); 228 ASSERT(xfs_buf_islocked(bp)); 229 230 d = bp->b_addr; 231 232 /* 233 * ID of the first dquot in the block - id's are zero based. 234 */ 235 curid = id - (id % q->qi_dqperchunk); 236 memset(d, 0, BBTOB(q->qi_dqchunklen)); 237 for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) { 238 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); 239 d->dd_diskdq.d_version = XFS_DQUOT_VERSION; 240 d->dd_diskdq.d_id = cpu_to_be32(curid); 241 d->dd_diskdq.d_flags = type; 242 if (xfs_sb_version_hascrc(&mp->m_sb)) { 243 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid); 244 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk), 245 XFS_DQUOT_CRC_OFF); 246 } 247 } 248 249 if (type & XFS_DQ_USER) { 250 qflag = XFS_UQUOTA_CHKD; 251 blftype = XFS_BLF_UDQUOT_BUF; 252 } else if (type & XFS_DQ_PROJ) { 253 qflag = XFS_PQUOTA_CHKD; 254 blftype = XFS_BLF_PDQUOT_BUF; 255 } else { 256 qflag = XFS_GQUOTA_CHKD; 257 blftype = XFS_BLF_GDQUOT_BUF; 258 } 259 260 xfs_trans_dquot_buf(tp, bp, blftype); 261 262 /* 263 * quotacheck uses delayed writes to update all the dquots on disk in an 264 * efficient manner instead of logging the individual dquot changes as 265 * they are made. However if we log the buffer allocated here and crash 266 * after quotacheck while the logged initialisation is still in the 267 * active region of the log, log recovery can replay the dquot buffer 268 * initialisation over the top of the checked dquots and corrupt quota 269 * accounting. 270 * 271 * To avoid this problem, quotacheck cannot log the initialised buffer. 272 * We must still dirty the buffer and write it back before the 273 * allocation transaction clears the log. Therefore, mark the buffer as 274 * ordered instead of logging it directly. This is safe for quotacheck 275 * because it detects and repairs allocated but initialized dquot blocks 276 * in the quota inodes. 277 */ 278 if (!(mp->m_qflags & qflag)) 279 xfs_trans_ordered_buf(tp, bp); 280 else 281 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); 282 } 283 284 /* 285 * Initialize the dynamic speculative preallocation thresholds. The lo/hi 286 * watermarks correspond to the soft and hard limits by default. If a soft limit 287 * is not specified, we use 95% of the hard limit. 288 */ 289 void 290 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp) 291 { 292 uint64_t space; 293 294 dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit); 295 dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit); 296 if (!dqp->q_prealloc_lo_wmark) { 297 dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark; 298 do_div(dqp->q_prealloc_lo_wmark, 100); 299 dqp->q_prealloc_lo_wmark *= 95; 300 } 301 302 space = dqp->q_prealloc_hi_wmark; 303 304 do_div(space, 100); 305 dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space; 306 dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3; 307 dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5; 308 } 309 310 /* 311 * Ensure that the given in-core dquot has a buffer on disk backing it, and 312 * return the buffer locked and held. This is called when the bmapi finds a 313 * hole. 314 */ 315 STATIC int 316 xfs_dquot_disk_alloc( 317 struct xfs_trans **tpp, 318 struct xfs_dquot *dqp, 319 struct xfs_buf **bpp) 320 { 321 struct xfs_bmbt_irec map; 322 struct xfs_trans *tp = *tpp; 323 struct xfs_mount *mp = tp->t_mountp; 324 struct xfs_buf *bp; 325 struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags); 326 int nmaps = 1; 327 int error; 328 329 trace_xfs_dqalloc(dqp); 330 331 xfs_ilock(quotip, XFS_ILOCK_EXCL); 332 if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { 333 /* 334 * Return if this type of quotas is turned off while we didn't 335 * have an inode lock 336 */ 337 xfs_iunlock(quotip, XFS_ILOCK_EXCL); 338 return -ESRCH; 339 } 340 341 /* Create the block mapping. */ 342 xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); 343 error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset, 344 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map, 345 &nmaps); 346 if (error) 347 return error; 348 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); 349 ASSERT(nmaps == 1); 350 ASSERT((map.br_startblock != DELAYSTARTBLOCK) && 351 (map.br_startblock != HOLESTARTBLOCK)); 352 353 /* 354 * Keep track of the blkno to save a lookup later 355 */ 356 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); 357 358 /* now we can just get the buffer (there's nothing to read yet) */ 359 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno, 360 mp->m_quotainfo->qi_dqchunklen, 0, &bp); 361 if (error) 362 return error; 363 bp->b_ops = &xfs_dquot_buf_ops; 364 365 /* 366 * Make a chunk of dquots out of this buffer and log 367 * the entire thing. 368 */ 369 xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id), 370 dqp->dq_flags & XFS_DQ_ALLTYPES, bp); 371 xfs_buf_set_ref(bp, XFS_DQUOT_REF); 372 373 /* 374 * Hold the buffer and join it to the dfops so that we'll still own 375 * the buffer when we return to the caller. The buffer disposal on 376 * error must be paid attention to very carefully, as it has been 377 * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota 378 * code when allocating a new dquot record" in 2005, and the later 379 * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep 380 * the buffer locked across the _defer_finish call. We can now do 381 * this correctly with xfs_defer_bjoin. 382 * 383 * Above, we allocated a disk block for the dquot information and used 384 * get_buf to initialize the dquot. If the _defer_finish fails, the old 385 * transaction is gone but the new buffer is not joined or held to any 386 * transaction, so we must _buf_relse it. 387 * 388 * If everything succeeds, the caller of this function is returned a 389 * buffer that is locked and held to the transaction. The caller 390 * is responsible for unlocking any buffer passed back, either 391 * manually or by committing the transaction. On error, the buffer is 392 * released and not passed back. 393 */ 394 xfs_trans_bhold(tp, bp); 395 error = xfs_defer_finish(tpp); 396 if (error) { 397 xfs_trans_bhold_release(*tpp, bp); 398 xfs_trans_brelse(*tpp, bp); 399 return error; 400 } 401 *bpp = bp; 402 return 0; 403 } 404 405 /* 406 * Read in the in-core dquot's on-disk metadata and return the buffer. 407 * Returns ENOENT to signal a hole. 408 */ 409 STATIC int 410 xfs_dquot_disk_read( 411 struct xfs_mount *mp, 412 struct xfs_dquot *dqp, 413 struct xfs_buf **bpp) 414 { 415 struct xfs_bmbt_irec map; 416 struct xfs_buf *bp; 417 struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags); 418 uint lock_mode; 419 int nmaps = 1; 420 int error; 421 422 lock_mode = xfs_ilock_data_map_shared(quotip); 423 if (!xfs_this_quota_on(mp, dqp->dq_flags)) { 424 /* 425 * Return if this type of quotas is turned off while we 426 * didn't have the quota inode lock. 427 */ 428 xfs_iunlock(quotip, lock_mode); 429 return -ESRCH; 430 } 431 432 /* 433 * Find the block map; no allocations yet 434 */ 435 error = xfs_bmapi_read(quotip, dqp->q_fileoffset, 436 XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); 437 xfs_iunlock(quotip, lock_mode); 438 if (error) 439 return error; 440 441 ASSERT(nmaps == 1); 442 ASSERT(map.br_blockcount >= 1); 443 ASSERT(map.br_startblock != DELAYSTARTBLOCK); 444 if (map.br_startblock == HOLESTARTBLOCK) 445 return -ENOENT; 446 447 trace_xfs_dqtobp_read(dqp); 448 449 /* 450 * store the blkno etc so that we don't have to do the 451 * mapping all the time 452 */ 453 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); 454 455 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, 456 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 457 &xfs_dquot_buf_ops); 458 if (error) { 459 ASSERT(bp == NULL); 460 return error; 461 } 462 463 ASSERT(xfs_buf_islocked(bp)); 464 xfs_buf_set_ref(bp, XFS_DQUOT_REF); 465 *bpp = bp; 466 467 return 0; 468 } 469 470 /* Allocate and initialize everything we need for an incore dquot. */ 471 STATIC struct xfs_dquot * 472 xfs_dquot_alloc( 473 struct xfs_mount *mp, 474 xfs_dqid_t id, 475 uint type) 476 { 477 struct xfs_dquot *dqp; 478 479 dqp = kmem_zone_zalloc(xfs_qm_dqzone, 0); 480 481 dqp->dq_flags = type; 482 dqp->q_core.d_id = cpu_to_be32(id); 483 dqp->q_mount = mp; 484 INIT_LIST_HEAD(&dqp->q_lru); 485 mutex_init(&dqp->q_qlock); 486 init_waitqueue_head(&dqp->q_pinwait); 487 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; 488 /* 489 * Offset of dquot in the (fixed sized) dquot chunk. 490 */ 491 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * 492 sizeof(xfs_dqblk_t); 493 494 /* 495 * Because we want to use a counting completion, complete 496 * the flush completion once to allow a single access to 497 * the flush completion without blocking. 498 */ 499 init_completion(&dqp->q_flush); 500 complete(&dqp->q_flush); 501 502 /* 503 * Make sure group quotas have a different lock class than user 504 * quotas. 505 */ 506 switch (type) { 507 case XFS_DQ_USER: 508 /* uses the default lock class */ 509 break; 510 case XFS_DQ_GROUP: 511 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class); 512 break; 513 case XFS_DQ_PROJ: 514 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class); 515 break; 516 default: 517 ASSERT(0); 518 break; 519 } 520 521 xfs_qm_dquot_logitem_init(dqp); 522 523 XFS_STATS_INC(mp, xs_qm_dquot); 524 return dqp; 525 } 526 527 /* Copy the in-core quota fields in from the on-disk buffer. */ 528 STATIC int 529 xfs_dquot_from_disk( 530 struct xfs_dquot *dqp, 531 struct xfs_buf *bp) 532 { 533 struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset; 534 535 /* 536 * Ensure that we got the type and ID we were looking for. 537 * Everything else was checked by the dquot buffer verifier. 538 */ 539 if ((ddqp->d_flags & XFS_DQ_ALLTYPES) != dqp->dq_flags || 540 ddqp->d_id != dqp->q_core.d_id) { 541 xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR, 542 "Metadata corruption detected at %pS, quota %u", 543 __this_address, be32_to_cpu(dqp->q_core.d_id)); 544 xfs_alert(bp->b_mount, "Unmount and run xfs_repair"); 545 return -EFSCORRUPTED; 546 } 547 548 /* copy everything from disk dquot to the incore dquot */ 549 memcpy(&dqp->q_core, ddqp, sizeof(struct xfs_disk_dquot)); 550 551 /* 552 * Reservation counters are defined as reservation plus current usage 553 * to avoid having to add every time. 554 */ 555 dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); 556 dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); 557 dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount); 558 559 /* initialize the dquot speculative prealloc thresholds */ 560 xfs_dquot_set_prealloc_limits(dqp); 561 return 0; 562 } 563 564 /* Copy the in-core quota fields into the on-disk buffer. */ 565 void 566 xfs_dquot_to_disk( 567 struct xfs_disk_dquot *ddqp, 568 struct xfs_dquot *dqp) 569 { 570 memcpy(ddqp, &dqp->q_core, sizeof(struct xfs_disk_dquot)); 571 } 572 573 /* Allocate and initialize the dquot buffer for this in-core dquot. */ 574 static int 575 xfs_qm_dqread_alloc( 576 struct xfs_mount *mp, 577 struct xfs_dquot *dqp, 578 struct xfs_buf **bpp) 579 { 580 struct xfs_trans *tp; 581 int error; 582 583 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc, 584 XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp); 585 if (error) 586 goto err; 587 588 error = xfs_dquot_disk_alloc(&tp, dqp, bpp); 589 if (error) 590 goto err_cancel; 591 592 error = xfs_trans_commit(tp); 593 if (error) { 594 /* 595 * Buffer was held to the transaction, so we have to unlock it 596 * manually here because we're not passing it back. 597 */ 598 xfs_buf_relse(*bpp); 599 *bpp = NULL; 600 goto err; 601 } 602 return 0; 603 604 err_cancel: 605 xfs_trans_cancel(tp); 606 err: 607 return error; 608 } 609 610 /* 611 * Read in the ondisk dquot using dqtobp() then copy it to an incore version, 612 * and release the buffer immediately. If @can_alloc is true, fill any 613 * holes in the on-disk metadata. 614 */ 615 static int 616 xfs_qm_dqread( 617 struct xfs_mount *mp, 618 xfs_dqid_t id, 619 uint type, 620 bool can_alloc, 621 struct xfs_dquot **dqpp) 622 { 623 struct xfs_dquot *dqp; 624 struct xfs_buf *bp; 625 int error; 626 627 dqp = xfs_dquot_alloc(mp, id, type); 628 trace_xfs_dqread(dqp); 629 630 /* Try to read the buffer, allocating if necessary. */ 631 error = xfs_dquot_disk_read(mp, dqp, &bp); 632 if (error == -ENOENT && can_alloc) 633 error = xfs_qm_dqread_alloc(mp, dqp, &bp); 634 if (error) 635 goto err; 636 637 /* 638 * At this point we should have a clean locked buffer. Copy the data 639 * to the incore dquot and release the buffer since the incore dquot 640 * has its own locking protocol so we needn't tie up the buffer any 641 * further. 642 */ 643 ASSERT(xfs_buf_islocked(bp)); 644 error = xfs_dquot_from_disk(dqp, bp); 645 xfs_buf_relse(bp); 646 if (error) 647 goto err; 648 649 *dqpp = dqp; 650 return error; 651 652 err: 653 trace_xfs_dqread_fail(dqp); 654 xfs_qm_dqdestroy(dqp); 655 *dqpp = NULL; 656 return error; 657 } 658 659 /* 660 * Advance to the next id in the current chunk, or if at the 661 * end of the chunk, skip ahead to first id in next allocated chunk 662 * using the SEEK_DATA interface. 663 */ 664 static int 665 xfs_dq_get_next_id( 666 struct xfs_mount *mp, 667 uint type, 668 xfs_dqid_t *id) 669 { 670 struct xfs_inode *quotip = xfs_quota_inode(mp, type); 671 xfs_dqid_t next_id = *id + 1; /* simple advance */ 672 uint lock_flags; 673 struct xfs_bmbt_irec got; 674 struct xfs_iext_cursor cur; 675 xfs_fsblock_t start; 676 int error = 0; 677 678 /* If we'd wrap past the max ID, stop */ 679 if (next_id < *id) 680 return -ENOENT; 681 682 /* If new ID is within the current chunk, advancing it sufficed */ 683 if (next_id % mp->m_quotainfo->qi_dqperchunk) { 684 *id = next_id; 685 return 0; 686 } 687 688 /* Nope, next_id is now past the current chunk, so find the next one */ 689 start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk; 690 691 lock_flags = xfs_ilock_data_map_shared(quotip); 692 if (!(quotip->i_df.if_flags & XFS_IFEXTENTS)) { 693 error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK); 694 if (error) 695 return error; 696 } 697 698 if (xfs_iext_lookup_extent(quotip, "ip->i_df, start, &cur, &got)) { 699 /* contiguous chunk, bump startoff for the id calculation */ 700 if (got.br_startoff < start) 701 got.br_startoff = start; 702 *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk; 703 } else { 704 error = -ENOENT; 705 } 706 707 xfs_iunlock(quotip, lock_flags); 708 709 return error; 710 } 711 712 /* 713 * Look up the dquot in the in-core cache. If found, the dquot is returned 714 * locked and ready to go. 715 */ 716 static struct xfs_dquot * 717 xfs_qm_dqget_cache_lookup( 718 struct xfs_mount *mp, 719 struct xfs_quotainfo *qi, 720 struct radix_tree_root *tree, 721 xfs_dqid_t id) 722 { 723 struct xfs_dquot *dqp; 724 725 restart: 726 mutex_lock(&qi->qi_tree_lock); 727 dqp = radix_tree_lookup(tree, id); 728 if (!dqp) { 729 mutex_unlock(&qi->qi_tree_lock); 730 XFS_STATS_INC(mp, xs_qm_dqcachemisses); 731 return NULL; 732 } 733 734 xfs_dqlock(dqp); 735 if (dqp->q_flags & XFS_DQFLAG_FREEING) { 736 xfs_dqunlock(dqp); 737 mutex_unlock(&qi->qi_tree_lock); 738 trace_xfs_dqget_freeing(dqp); 739 delay(1); 740 goto restart; 741 } 742 743 dqp->q_nrefs++; 744 mutex_unlock(&qi->qi_tree_lock); 745 746 trace_xfs_dqget_hit(dqp); 747 XFS_STATS_INC(mp, xs_qm_dqcachehits); 748 return dqp; 749 } 750 751 /* 752 * Try to insert a new dquot into the in-core cache. If an error occurs the 753 * caller should throw away the dquot and start over. Otherwise, the dquot 754 * is returned locked (and held by the cache) as if there had been a cache 755 * hit. 756 */ 757 static int 758 xfs_qm_dqget_cache_insert( 759 struct xfs_mount *mp, 760 struct xfs_quotainfo *qi, 761 struct radix_tree_root *tree, 762 xfs_dqid_t id, 763 struct xfs_dquot *dqp) 764 { 765 int error; 766 767 mutex_lock(&qi->qi_tree_lock); 768 error = radix_tree_insert(tree, id, dqp); 769 if (unlikely(error)) { 770 /* Duplicate found! Caller must try again. */ 771 WARN_ON(error != -EEXIST); 772 mutex_unlock(&qi->qi_tree_lock); 773 trace_xfs_dqget_dup(dqp); 774 return error; 775 } 776 777 /* Return a locked dquot to the caller, with a reference taken. */ 778 xfs_dqlock(dqp); 779 dqp->q_nrefs = 1; 780 781 qi->qi_dquots++; 782 mutex_unlock(&qi->qi_tree_lock); 783 784 return 0; 785 } 786 787 /* Check our input parameters. */ 788 static int 789 xfs_qm_dqget_checks( 790 struct xfs_mount *mp, 791 uint type) 792 { 793 if (WARN_ON_ONCE(!XFS_IS_QUOTA_RUNNING(mp))) 794 return -ESRCH; 795 796 switch (type) { 797 case XFS_DQ_USER: 798 if (!XFS_IS_UQUOTA_ON(mp)) 799 return -ESRCH; 800 return 0; 801 case XFS_DQ_GROUP: 802 if (!XFS_IS_GQUOTA_ON(mp)) 803 return -ESRCH; 804 return 0; 805 case XFS_DQ_PROJ: 806 if (!XFS_IS_PQUOTA_ON(mp)) 807 return -ESRCH; 808 return 0; 809 default: 810 WARN_ON_ONCE(0); 811 return -EINVAL; 812 } 813 } 814 815 /* 816 * Given the file system, id, and type (UDQUOT/GDQUOT), return a a locked 817 * dquot, doing an allocation (if requested) as needed. 818 */ 819 int 820 xfs_qm_dqget( 821 struct xfs_mount *mp, 822 xfs_dqid_t id, 823 uint type, 824 bool can_alloc, 825 struct xfs_dquot **O_dqpp) 826 { 827 struct xfs_quotainfo *qi = mp->m_quotainfo; 828 struct radix_tree_root *tree = xfs_dquot_tree(qi, type); 829 struct xfs_dquot *dqp; 830 int error; 831 832 error = xfs_qm_dqget_checks(mp, type); 833 if (error) 834 return error; 835 836 restart: 837 dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id); 838 if (dqp) { 839 *O_dqpp = dqp; 840 return 0; 841 } 842 843 error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp); 844 if (error) 845 return error; 846 847 error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp); 848 if (error) { 849 /* 850 * Duplicate found. Just throw away the new dquot and start 851 * over. 852 */ 853 xfs_qm_dqdestroy(dqp); 854 XFS_STATS_INC(mp, xs_qm_dquot_dups); 855 goto restart; 856 } 857 858 trace_xfs_dqget_miss(dqp); 859 *O_dqpp = dqp; 860 return 0; 861 } 862 863 /* 864 * Given a dquot id and type, read and initialize a dquot from the on-disk 865 * metadata. This function is only for use during quota initialization so 866 * it ignores the dquot cache assuming that the dquot shrinker isn't set up. 867 * The caller is responsible for _qm_dqdestroy'ing the returned dquot. 868 */ 869 int 870 xfs_qm_dqget_uncached( 871 struct xfs_mount *mp, 872 xfs_dqid_t id, 873 uint type, 874 struct xfs_dquot **dqpp) 875 { 876 int error; 877 878 error = xfs_qm_dqget_checks(mp, type); 879 if (error) 880 return error; 881 882 return xfs_qm_dqread(mp, id, type, 0, dqpp); 883 } 884 885 /* Return the quota id for a given inode and type. */ 886 xfs_dqid_t 887 xfs_qm_id_for_quotatype( 888 struct xfs_inode *ip, 889 uint type) 890 { 891 switch (type) { 892 case XFS_DQ_USER: 893 return i_uid_read(VFS_I(ip)); 894 case XFS_DQ_GROUP: 895 return i_gid_read(VFS_I(ip)); 896 case XFS_DQ_PROJ: 897 return ip->i_d.di_projid; 898 } 899 ASSERT(0); 900 return 0; 901 } 902 903 /* 904 * Return the dquot for a given inode and type. If @can_alloc is true, then 905 * allocate blocks if needed. The inode's ILOCK must be held and it must not 906 * have already had an inode attached. 907 */ 908 int 909 xfs_qm_dqget_inode( 910 struct xfs_inode *ip, 911 uint type, 912 bool can_alloc, 913 struct xfs_dquot **O_dqpp) 914 { 915 struct xfs_mount *mp = ip->i_mount; 916 struct xfs_quotainfo *qi = mp->m_quotainfo; 917 struct radix_tree_root *tree = xfs_dquot_tree(qi, type); 918 struct xfs_dquot *dqp; 919 xfs_dqid_t id; 920 int error; 921 922 error = xfs_qm_dqget_checks(mp, type); 923 if (error) 924 return error; 925 926 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 927 ASSERT(xfs_inode_dquot(ip, type) == NULL); 928 929 id = xfs_qm_id_for_quotatype(ip, type); 930 931 restart: 932 dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id); 933 if (dqp) { 934 *O_dqpp = dqp; 935 return 0; 936 } 937 938 /* 939 * Dquot cache miss. We don't want to keep the inode lock across 940 * a (potential) disk read. Also we don't want to deal with the lock 941 * ordering between quotainode and this inode. OTOH, dropping the inode 942 * lock here means dealing with a chown that can happen before 943 * we re-acquire the lock. 944 */ 945 xfs_iunlock(ip, XFS_ILOCK_EXCL); 946 error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp); 947 xfs_ilock(ip, XFS_ILOCK_EXCL); 948 if (error) 949 return error; 950 951 /* 952 * A dquot could be attached to this inode by now, since we had 953 * dropped the ilock. 954 */ 955 if (xfs_this_quota_on(mp, type)) { 956 struct xfs_dquot *dqp1; 957 958 dqp1 = xfs_inode_dquot(ip, type); 959 if (dqp1) { 960 xfs_qm_dqdestroy(dqp); 961 dqp = dqp1; 962 xfs_dqlock(dqp); 963 goto dqret; 964 } 965 } else { 966 /* inode stays locked on return */ 967 xfs_qm_dqdestroy(dqp); 968 return -ESRCH; 969 } 970 971 error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp); 972 if (error) { 973 /* 974 * Duplicate found. Just throw away the new dquot and start 975 * over. 976 */ 977 xfs_qm_dqdestroy(dqp); 978 XFS_STATS_INC(mp, xs_qm_dquot_dups); 979 goto restart; 980 } 981 982 dqret: 983 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 984 trace_xfs_dqget_miss(dqp); 985 *O_dqpp = dqp; 986 return 0; 987 } 988 989 /* 990 * Starting at @id and progressing upwards, look for an initialized incore 991 * dquot, lock it, and return it. 992 */ 993 int 994 xfs_qm_dqget_next( 995 struct xfs_mount *mp, 996 xfs_dqid_t id, 997 uint type, 998 struct xfs_dquot **dqpp) 999 { 1000 struct xfs_dquot *dqp; 1001 int error = 0; 1002 1003 *dqpp = NULL; 1004 for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) { 1005 error = xfs_qm_dqget(mp, id, type, false, &dqp); 1006 if (error == -ENOENT) 1007 continue; 1008 else if (error != 0) 1009 break; 1010 1011 if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) { 1012 *dqpp = dqp; 1013 return 0; 1014 } 1015 1016 xfs_qm_dqput(dqp); 1017 } 1018 1019 return error; 1020 } 1021 1022 /* 1023 * Release a reference to the dquot (decrement ref-count) and unlock it. 1024 * 1025 * If there is a group quota attached to this dquot, carefully release that 1026 * too without tripping over deadlocks'n'stuff. 1027 */ 1028 void 1029 xfs_qm_dqput( 1030 struct xfs_dquot *dqp) 1031 { 1032 ASSERT(dqp->q_nrefs > 0); 1033 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1034 1035 trace_xfs_dqput(dqp); 1036 1037 if (--dqp->q_nrefs == 0) { 1038 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; 1039 trace_xfs_dqput_free(dqp); 1040 1041 if (list_lru_add(&qi->qi_lru, &dqp->q_lru)) 1042 XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused); 1043 } 1044 xfs_dqunlock(dqp); 1045 } 1046 1047 /* 1048 * Release a dquot. Flush it if dirty, then dqput() it. 1049 * dquot must not be locked. 1050 */ 1051 void 1052 xfs_qm_dqrele( 1053 struct xfs_dquot *dqp) 1054 { 1055 if (!dqp) 1056 return; 1057 1058 trace_xfs_dqrele(dqp); 1059 1060 xfs_dqlock(dqp); 1061 /* 1062 * We don't care to flush it if the dquot is dirty here. 1063 * That will create stutters that we want to avoid. 1064 * Instead we do a delayed write when we try to reclaim 1065 * a dirty dquot. Also xfs_sync will take part of the burden... 1066 */ 1067 xfs_qm_dqput(dqp); 1068 } 1069 1070 /* 1071 * This is the dquot flushing I/O completion routine. It is called 1072 * from interrupt level when the buffer containing the dquot is 1073 * flushed to disk. It is responsible for removing the dquot logitem 1074 * from the AIL if it has not been re-logged, and unlocking the dquot's 1075 * flush lock. This behavior is very similar to that of inodes.. 1076 */ 1077 static void 1078 xfs_qm_dqflush_done( 1079 struct xfs_log_item *lip) 1080 { 1081 struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip; 1082 struct xfs_dquot *dqp = qip->qli_dquot; 1083 struct xfs_ail *ailp = lip->li_ailp; 1084 xfs_lsn_t tail_lsn; 1085 1086 /* 1087 * We only want to pull the item from the AIL if its 1088 * location in the log has not changed since we started the flush. 1089 * Thus, we only bother if the dquot's lsn has 1090 * not changed. First we check the lsn outside the lock 1091 * since it's cheaper, and then we recheck while 1092 * holding the lock before removing the dquot from the AIL. 1093 */ 1094 if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) && 1095 ((lip->li_lsn == qip->qli_flush_lsn) || 1096 test_bit(XFS_LI_FAILED, &lip->li_flags))) { 1097 1098 spin_lock(&ailp->ail_lock); 1099 xfs_clear_li_failed(lip); 1100 if (lip->li_lsn == qip->qli_flush_lsn) { 1101 /* xfs_ail_update_finish() drops the AIL lock */ 1102 tail_lsn = xfs_ail_delete_one(ailp, lip); 1103 xfs_ail_update_finish(ailp, tail_lsn); 1104 } else { 1105 spin_unlock(&ailp->ail_lock); 1106 } 1107 } 1108 1109 /* 1110 * Release the dq's flush lock since we're done with it. 1111 */ 1112 xfs_dqfunlock(dqp); 1113 } 1114 1115 void 1116 xfs_dquot_done( 1117 struct xfs_buf *bp) 1118 { 1119 struct xfs_log_item *lip, *n; 1120 1121 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { 1122 list_del_init(&lip->li_bio_list); 1123 xfs_qm_dqflush_done(lip); 1124 } 1125 } 1126 1127 /* Check incore dquot for errors before we flush. */ 1128 static xfs_failaddr_t 1129 xfs_qm_dqflush_check( 1130 struct xfs_dquot *dqp) 1131 { 1132 __u8 type = dqp->dq_flags & XFS_DQ_ALLTYPES; 1133 1134 if (type != XFS_DQ_USER && 1135 type != XFS_DQ_GROUP && 1136 type != XFS_DQ_PROJ) 1137 return __this_address; 1138 1139 return NULL; 1140 } 1141 1142 /* 1143 * Write a modified dquot to disk. 1144 * The dquot must be locked and the flush lock too taken by caller. 1145 * The flush lock will not be unlocked until the dquot reaches the disk, 1146 * but the dquot is free to be unlocked and modified by the caller 1147 * in the interim. Dquot is still locked on return. This behavior is 1148 * identical to that of inodes. 1149 */ 1150 int 1151 xfs_qm_dqflush( 1152 struct xfs_dquot *dqp, 1153 struct xfs_buf **bpp) 1154 { 1155 struct xfs_mount *mp = dqp->q_mount; 1156 struct xfs_log_item *lip = &dqp->q_logitem.qli_item; 1157 struct xfs_buf *bp; 1158 struct xfs_dqblk *dqb; 1159 struct xfs_disk_dquot *ddqp; 1160 xfs_failaddr_t fa; 1161 int error; 1162 1163 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1164 ASSERT(!completion_done(&dqp->q_flush)); 1165 1166 trace_xfs_dqflush(dqp); 1167 1168 *bpp = NULL; 1169 1170 xfs_qm_dqunpin_wait(dqp); 1171 1172 /* 1173 * Get the buffer containing the on-disk dquot 1174 */ 1175 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, 1176 mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK, 1177 &bp, &xfs_dquot_buf_ops); 1178 if (error == -EAGAIN) 1179 goto out_unlock; 1180 if (error) 1181 goto out_abort; 1182 1183 /* 1184 * Calculate the location of the dquot inside the buffer. 1185 */ 1186 dqb = bp->b_addr + dqp->q_bufoffset; 1187 ddqp = &dqb->dd_diskdq; 1188 1189 /* sanity check the in-core structure before we flush */ 1190 fa = xfs_dquot_verify(mp, &dqp->q_core, be32_to_cpu(dqp->q_core.d_id), 1191 0); 1192 if (fa) { 1193 xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS", 1194 be32_to_cpu(dqp->q_core.d_id), fa); 1195 xfs_buf_relse(bp); 1196 error = -EFSCORRUPTED; 1197 goto out_abort; 1198 } 1199 1200 fa = xfs_qm_dqflush_check(dqp); 1201 if (fa) { 1202 xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS", 1203 be32_to_cpu(dqp->q_core.d_id), fa); 1204 xfs_buf_relse(bp); 1205 error = -EFSCORRUPTED; 1206 goto out_abort; 1207 } 1208 1209 xfs_dquot_to_disk(ddqp, dqp); 1210 1211 /* 1212 * Clear the dirty field and remember the flush lsn for later use. 1213 */ 1214 dqp->q_flags &= ~XFS_DQFLAG_DIRTY; 1215 1216 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, 1217 &dqp->q_logitem.qli_item.li_lsn); 1218 1219 /* 1220 * copy the lsn into the on-disk dquot now while we have the in memory 1221 * dquot here. This can't be done later in the write verifier as we 1222 * can't get access to the log item at that point in time. 1223 * 1224 * We also calculate the CRC here so that the on-disk dquot in the 1225 * buffer always has a valid CRC. This ensures there is no possibility 1226 * of a dquot without an up-to-date CRC getting to disk. 1227 */ 1228 if (xfs_sb_version_hascrc(&mp->m_sb)) { 1229 dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn); 1230 xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk), 1231 XFS_DQUOT_CRC_OFF); 1232 } 1233 1234 /* 1235 * Attach the dquot to the buffer so that we can remove this dquot from 1236 * the AIL and release the flush lock once the dquot is synced to disk. 1237 */ 1238 bp->b_flags |= _XBF_DQUOTS; 1239 list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list); 1240 1241 /* 1242 * If the buffer is pinned then push on the log so we won't 1243 * get stuck waiting in the write for too long. 1244 */ 1245 if (xfs_buf_ispinned(bp)) { 1246 trace_xfs_dqflush_force(dqp); 1247 xfs_log_force(mp, 0); 1248 } 1249 1250 trace_xfs_dqflush_done(dqp); 1251 *bpp = bp; 1252 return 0; 1253 1254 out_abort: 1255 dqp->q_flags &= ~XFS_DQFLAG_DIRTY; 1256 xfs_trans_ail_delete(lip, 0); 1257 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1258 out_unlock: 1259 xfs_dqfunlock(dqp); 1260 return error; 1261 } 1262 1263 /* 1264 * Lock two xfs_dquot structures. 1265 * 1266 * To avoid deadlocks we always lock the quota structure with 1267 * the lowerd id first. 1268 */ 1269 void 1270 xfs_dqlock2( 1271 struct xfs_dquot *d1, 1272 struct xfs_dquot *d2) 1273 { 1274 if (d1 && d2) { 1275 ASSERT(d1 != d2); 1276 if (be32_to_cpu(d1->q_core.d_id) > 1277 be32_to_cpu(d2->q_core.d_id)) { 1278 mutex_lock(&d2->q_qlock); 1279 mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); 1280 } else { 1281 mutex_lock(&d1->q_qlock); 1282 mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED); 1283 } 1284 } else if (d1) { 1285 mutex_lock(&d1->q_qlock); 1286 } else if (d2) { 1287 mutex_lock(&d2->q_qlock); 1288 } 1289 } 1290 1291 int __init 1292 xfs_qm_init(void) 1293 { 1294 xfs_qm_dqzone = kmem_cache_create("xfs_dquot", 1295 sizeof(struct xfs_dquot), 1296 0, 0, NULL); 1297 if (!xfs_qm_dqzone) 1298 goto out; 1299 1300 xfs_qm_dqtrxzone = kmem_cache_create("xfs_dqtrx", 1301 sizeof(struct xfs_dquot_acct), 1302 0, 0, NULL); 1303 if (!xfs_qm_dqtrxzone) 1304 goto out_free_dqzone; 1305 1306 return 0; 1307 1308 out_free_dqzone: 1309 kmem_cache_destroy(xfs_qm_dqzone); 1310 out: 1311 return -ENOMEM; 1312 } 1313 1314 void 1315 xfs_qm_exit(void) 1316 { 1317 kmem_cache_destroy(xfs_qm_dqtrxzone); 1318 kmem_cache_destroy(xfs_qm_dqzone); 1319 } 1320 1321 /* 1322 * Iterate every dquot of a particular type. The caller must ensure that the 1323 * particular quota type is active. iter_fn can return negative error codes, 1324 * or -ECANCELED to indicate that it wants to stop iterating. 1325 */ 1326 int 1327 xfs_qm_dqiterate( 1328 struct xfs_mount *mp, 1329 uint dqtype, 1330 xfs_qm_dqiterate_fn iter_fn, 1331 void *priv) 1332 { 1333 struct xfs_dquot *dq; 1334 xfs_dqid_t id = 0; 1335 int error; 1336 1337 do { 1338 error = xfs_qm_dqget_next(mp, id, dqtype, &dq); 1339 if (error == -ENOENT) 1340 return 0; 1341 if (error) 1342 return error; 1343 1344 error = iter_fn(dq, dqtype, priv); 1345 id = be32_to_cpu(dq->q_core.d_id); 1346 xfs_qm_dqput(dq); 1347 id++; 1348 } while (error == 0 && id != 0); 1349 1350 return error; 1351 } 1352