1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_sb.h" 26 #include "xfs_mount.h" 27 #include "xfs_inode.h" 28 #include "xfs_ialloc.h" 29 #include "xfs_itable.h" 30 #include "xfs_quota.h" 31 #include "xfs_error.h" 32 #include "xfs_bmap.h" 33 #include "xfs_bmap_btree.h" 34 #include "xfs_trans.h" 35 #include "xfs_trans_space.h" 36 #include "xfs_qm.h" 37 #include "xfs_trace.h" 38 #include "xfs_icache.h" 39 #include "xfs_cksum.h" 40 41 /* 42 * The global quota manager. There is only one of these for the entire 43 * system, _not_ one per file system. XQM keeps track of the overall 44 * quota functionality, including maintaining the freelist and hash 45 * tables of dquots. 46 */ 47 STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 48 STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 49 50 51 STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); 52 /* 53 * We use the batch lookup interface to iterate over the dquots as it 54 * currently is the only interface into the radix tree code that allows 55 * fuzzy lookups instead of exact matches. Holding the lock over multiple 56 * operations is fine as all callers are used either during mount/umount 57 * or quotaoff. 58 */ 59 #define XFS_DQ_LOOKUP_BATCH 32 60 61 STATIC int 62 xfs_qm_dquot_walk( 63 struct xfs_mount *mp, 64 int type, 65 int (*execute)(struct xfs_dquot *dqp, void *data), 66 void *data) 67 { 68 struct xfs_quotainfo *qi = mp->m_quotainfo; 69 struct radix_tree_root *tree = xfs_dquot_tree(qi, type); 70 uint32_t next_index; 71 int last_error = 0; 72 int skipped; 73 int nr_found; 74 75 restart: 76 skipped = 0; 77 next_index = 0; 78 nr_found = 0; 79 80 while (1) { 81 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; 82 int error = 0; 83 int i; 84 85 mutex_lock(&qi->qi_tree_lock); 86 nr_found = radix_tree_gang_lookup(tree, (void **)batch, 87 next_index, XFS_DQ_LOOKUP_BATCH); 88 if (!nr_found) { 89 mutex_unlock(&qi->qi_tree_lock); 90 break; 91 } 92 93 for (i = 0; i < nr_found; i++) { 94 struct xfs_dquot *dqp = batch[i]; 95 96 next_index = be32_to_cpu(dqp->q_core.d_id) + 1; 97 98 error = execute(batch[i], data); 99 if (error == -EAGAIN) { 100 skipped++; 101 continue; 102 } 103 if (error && last_error != -EFSCORRUPTED) 104 last_error = error; 105 } 106 107 mutex_unlock(&qi->qi_tree_lock); 108 109 /* bail out if the filesystem is corrupted. */ 110 if (last_error == -EFSCORRUPTED) { 111 skipped = 0; 112 break; 113 } 114 } 115 116 if (skipped) { 117 delay(1); 118 goto restart; 119 } 120 121 return last_error; 122 } 123 124 125 /* 126 * Purge a dquot from all tracking data structures and free it. 127 */ 128 STATIC int 129 xfs_qm_dqpurge( 130 struct xfs_dquot *dqp, 131 void *data) 132 { 133 struct xfs_mount *mp = dqp->q_mount; 134 struct xfs_quotainfo *qi = mp->m_quotainfo; 135 136 xfs_dqlock(dqp); 137 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { 138 xfs_dqunlock(dqp); 139 return -EAGAIN; 140 } 141 142 dqp->dq_flags |= XFS_DQ_FREEING; 143 144 xfs_dqflock(dqp); 145 146 /* 147 * If we are turning this type of quotas off, we don't care 148 * about the dirty metadata sitting in this dquot. OTOH, if 149 * we're unmounting, we do care, so we flush it and wait. 150 */ 151 if (XFS_DQ_IS_DIRTY(dqp)) { 152 struct xfs_buf *bp = NULL; 153 int error; 154 155 /* 156 * We don't care about getting disk errors here. We need 157 * to purge this dquot anyway, so we go ahead regardless. 158 */ 159 error = xfs_qm_dqflush(dqp, &bp); 160 if (error) { 161 xfs_warn(mp, "%s: dquot %p flush failed", 162 __func__, dqp); 163 } else { 164 error = xfs_bwrite(bp); 165 xfs_buf_relse(bp); 166 } 167 xfs_dqflock(dqp); 168 } 169 170 ASSERT(atomic_read(&dqp->q_pincount) == 0); 171 ASSERT(XFS_FORCED_SHUTDOWN(mp) || 172 !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); 173 174 xfs_dqfunlock(dqp); 175 xfs_dqunlock(dqp); 176 177 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), 178 be32_to_cpu(dqp->q_core.d_id)); 179 qi->qi_dquots--; 180 181 /* 182 * We move dquots to the freelist as soon as their reference count 183 * hits zero, so it really should be on the freelist here. 184 */ 185 ASSERT(!list_empty(&dqp->q_lru)); 186 list_lru_del(&qi->qi_lru, &dqp->q_lru); 187 XFS_STATS_DEC(mp, xs_qm_dquot_unused); 188 189 xfs_qm_dqdestroy(dqp); 190 return 0; 191 } 192 193 /* 194 * Purge the dquot cache. 195 */ 196 void 197 xfs_qm_dqpurge_all( 198 struct xfs_mount *mp, 199 uint flags) 200 { 201 if (flags & XFS_QMOPT_UQUOTA) 202 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); 203 if (flags & XFS_QMOPT_GQUOTA) 204 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); 205 if (flags & XFS_QMOPT_PQUOTA) 206 xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL); 207 } 208 209 /* 210 * Just destroy the quotainfo structure. 211 */ 212 void 213 xfs_qm_unmount( 214 struct xfs_mount *mp) 215 { 216 if (mp->m_quotainfo) { 217 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); 218 xfs_qm_destroy_quotainfo(mp); 219 } 220 } 221 222 /* 223 * Called from the vfsops layer. 224 */ 225 void 226 xfs_qm_unmount_quotas( 227 xfs_mount_t *mp) 228 { 229 /* 230 * Release the dquots that root inode, et al might be holding, 231 * before we flush quotas and blow away the quotainfo structure. 232 */ 233 ASSERT(mp->m_rootip); 234 xfs_qm_dqdetach(mp->m_rootip); 235 if (mp->m_rbmip) 236 xfs_qm_dqdetach(mp->m_rbmip); 237 if (mp->m_rsumip) 238 xfs_qm_dqdetach(mp->m_rsumip); 239 240 /* 241 * Release the quota inodes. 242 */ 243 if (mp->m_quotainfo) { 244 if (mp->m_quotainfo->qi_uquotaip) { 245 IRELE(mp->m_quotainfo->qi_uquotaip); 246 mp->m_quotainfo->qi_uquotaip = NULL; 247 } 248 if (mp->m_quotainfo->qi_gquotaip) { 249 IRELE(mp->m_quotainfo->qi_gquotaip); 250 mp->m_quotainfo->qi_gquotaip = NULL; 251 } 252 if (mp->m_quotainfo->qi_pquotaip) { 253 IRELE(mp->m_quotainfo->qi_pquotaip); 254 mp->m_quotainfo->qi_pquotaip = NULL; 255 } 256 } 257 } 258 259 STATIC int 260 xfs_qm_dqattach_one( 261 xfs_inode_t *ip, 262 xfs_dqid_t id, 263 uint type, 264 uint doalloc, 265 xfs_dquot_t **IO_idqpp) 266 { 267 xfs_dquot_t *dqp; 268 int error; 269 270 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 271 error = 0; 272 273 /* 274 * See if we already have it in the inode itself. IO_idqpp is &i_udquot 275 * or &i_gdquot. This made the code look weird, but made the logic a lot 276 * simpler. 277 */ 278 dqp = *IO_idqpp; 279 if (dqp) { 280 trace_xfs_dqattach_found(dqp); 281 return 0; 282 } 283 284 /* 285 * Find the dquot from somewhere. This bumps the reference count of 286 * dquot and returns it locked. This can return ENOENT if dquot didn't 287 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got 288 * turned off suddenly. 289 */ 290 error = xfs_qm_dqget(ip->i_mount, ip, id, type, 291 doalloc | XFS_QMOPT_DOWARN, &dqp); 292 if (error) 293 return error; 294 295 trace_xfs_dqattach_get(dqp); 296 297 /* 298 * dqget may have dropped and re-acquired the ilock, but it guarantees 299 * that the dquot returned is the one that should go in the inode. 300 */ 301 *IO_idqpp = dqp; 302 xfs_dqunlock(dqp); 303 return 0; 304 } 305 306 static bool 307 xfs_qm_need_dqattach( 308 struct xfs_inode *ip) 309 { 310 struct xfs_mount *mp = ip->i_mount; 311 312 if (!XFS_IS_QUOTA_RUNNING(mp)) 313 return false; 314 if (!XFS_IS_QUOTA_ON(mp)) 315 return false; 316 if (!XFS_NOT_DQATTACHED(mp, ip)) 317 return false; 318 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) 319 return false; 320 return true; 321 } 322 323 /* 324 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON 325 * into account. 326 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. 327 * Inode may get unlocked and relocked in here, and the caller must deal with 328 * the consequences. 329 */ 330 int 331 xfs_qm_dqattach_locked( 332 xfs_inode_t *ip, 333 uint flags) 334 { 335 xfs_mount_t *mp = ip->i_mount; 336 int error = 0; 337 338 if (!xfs_qm_need_dqattach(ip)) 339 return 0; 340 341 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 342 343 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) { 344 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, 345 flags & XFS_QMOPT_DQALLOC, 346 &ip->i_udquot); 347 if (error) 348 goto done; 349 ASSERT(ip->i_udquot); 350 } 351 352 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) { 353 error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, 354 flags & XFS_QMOPT_DQALLOC, 355 &ip->i_gdquot); 356 if (error) 357 goto done; 358 ASSERT(ip->i_gdquot); 359 } 360 361 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) { 362 error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, 363 flags & XFS_QMOPT_DQALLOC, 364 &ip->i_pdquot); 365 if (error) 366 goto done; 367 ASSERT(ip->i_pdquot); 368 } 369 370 done: 371 /* 372 * Don't worry about the dquots that we may have attached before any 373 * error - they'll get detached later if it has not already been done. 374 */ 375 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 376 return error; 377 } 378 379 int 380 xfs_qm_dqattach( 381 struct xfs_inode *ip, 382 uint flags) 383 { 384 int error; 385 386 if (!xfs_qm_need_dqattach(ip)) 387 return 0; 388 389 xfs_ilock(ip, XFS_ILOCK_EXCL); 390 error = xfs_qm_dqattach_locked(ip, flags); 391 xfs_iunlock(ip, XFS_ILOCK_EXCL); 392 393 return error; 394 } 395 396 /* 397 * Release dquots (and their references) if any. 398 * The inode should be locked EXCL except when this's called by 399 * xfs_ireclaim. 400 */ 401 void 402 xfs_qm_dqdetach( 403 xfs_inode_t *ip) 404 { 405 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot)) 406 return; 407 408 trace_xfs_dquot_dqdetach(ip); 409 410 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino)); 411 if (ip->i_udquot) { 412 xfs_qm_dqrele(ip->i_udquot); 413 ip->i_udquot = NULL; 414 } 415 if (ip->i_gdquot) { 416 xfs_qm_dqrele(ip->i_gdquot); 417 ip->i_gdquot = NULL; 418 } 419 if (ip->i_pdquot) { 420 xfs_qm_dqrele(ip->i_pdquot); 421 ip->i_pdquot = NULL; 422 } 423 } 424 425 struct xfs_qm_isolate { 426 struct list_head buffers; 427 struct list_head dispose; 428 }; 429 430 static enum lru_status 431 xfs_qm_dquot_isolate( 432 struct list_head *item, 433 struct list_lru_one *lru, 434 spinlock_t *lru_lock, 435 void *arg) 436 __releases(lru_lock) __acquires(lru_lock) 437 { 438 struct xfs_dquot *dqp = container_of(item, 439 struct xfs_dquot, q_lru); 440 struct xfs_qm_isolate *isol = arg; 441 442 if (!xfs_dqlock_nowait(dqp)) 443 goto out_miss_busy; 444 445 /* 446 * This dquot has acquired a reference in the meantime remove it from 447 * the freelist and try again. 448 */ 449 if (dqp->q_nrefs) { 450 xfs_dqunlock(dqp); 451 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants); 452 453 trace_xfs_dqreclaim_want(dqp); 454 list_lru_isolate(lru, &dqp->q_lru); 455 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); 456 return LRU_REMOVED; 457 } 458 459 /* 460 * If the dquot is dirty, flush it. If it's already being flushed, just 461 * skip it so there is time for the IO to complete before we try to 462 * reclaim it again on the next LRU pass. 463 */ 464 if (!xfs_dqflock_nowait(dqp)) { 465 xfs_dqunlock(dqp); 466 goto out_miss_busy; 467 } 468 469 if (XFS_DQ_IS_DIRTY(dqp)) { 470 struct xfs_buf *bp = NULL; 471 int error; 472 473 trace_xfs_dqreclaim_dirty(dqp); 474 475 /* we have to drop the LRU lock to flush the dquot */ 476 spin_unlock(lru_lock); 477 478 error = xfs_qm_dqflush(dqp, &bp); 479 if (error) { 480 xfs_warn(dqp->q_mount, "%s: dquot %p flush failed", 481 __func__, dqp); 482 goto out_unlock_dirty; 483 } 484 485 xfs_buf_delwri_queue(bp, &isol->buffers); 486 xfs_buf_relse(bp); 487 goto out_unlock_dirty; 488 } 489 xfs_dqfunlock(dqp); 490 491 /* 492 * Prevent lookups now that we are past the point of no return. 493 */ 494 dqp->dq_flags |= XFS_DQ_FREEING; 495 xfs_dqunlock(dqp); 496 497 ASSERT(dqp->q_nrefs == 0); 498 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose); 499 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); 500 trace_xfs_dqreclaim_done(dqp); 501 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims); 502 return LRU_REMOVED; 503 504 out_miss_busy: 505 trace_xfs_dqreclaim_busy(dqp); 506 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses); 507 return LRU_SKIP; 508 509 out_unlock_dirty: 510 trace_xfs_dqreclaim_busy(dqp); 511 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses); 512 xfs_dqunlock(dqp); 513 spin_lock(lru_lock); 514 return LRU_RETRY; 515 } 516 517 static unsigned long 518 xfs_qm_shrink_scan( 519 struct shrinker *shrink, 520 struct shrink_control *sc) 521 { 522 struct xfs_quotainfo *qi = container_of(shrink, 523 struct xfs_quotainfo, qi_shrinker); 524 struct xfs_qm_isolate isol; 525 unsigned long freed; 526 int error; 527 528 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM)) 529 return 0; 530 531 INIT_LIST_HEAD(&isol.buffers); 532 INIT_LIST_HEAD(&isol.dispose); 533 534 freed = list_lru_shrink_walk(&qi->qi_lru, sc, 535 xfs_qm_dquot_isolate, &isol); 536 537 error = xfs_buf_delwri_submit(&isol.buffers); 538 if (error) 539 xfs_warn(NULL, "%s: dquot reclaim failed", __func__); 540 541 while (!list_empty(&isol.dispose)) { 542 struct xfs_dquot *dqp; 543 544 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru); 545 list_del_init(&dqp->q_lru); 546 xfs_qm_dqfree_one(dqp); 547 } 548 549 return freed; 550 } 551 552 static unsigned long 553 xfs_qm_shrink_count( 554 struct shrinker *shrink, 555 struct shrink_control *sc) 556 { 557 struct xfs_quotainfo *qi = container_of(shrink, 558 struct xfs_quotainfo, qi_shrinker); 559 560 return list_lru_shrink_count(&qi->qi_lru, sc); 561 } 562 563 STATIC void 564 xfs_qm_set_defquota( 565 xfs_mount_t *mp, 566 uint type, 567 xfs_quotainfo_t *qinf) 568 { 569 xfs_dquot_t *dqp; 570 struct xfs_def_quota *defq; 571 int error; 572 573 error = xfs_qm_dqread(mp, 0, type, XFS_QMOPT_DOWARN, &dqp); 574 575 if (!error) { 576 xfs_disk_dquot_t *ddqp = &dqp->q_core; 577 578 defq = xfs_get_defquota(dqp, qinf); 579 580 /* 581 * Timers and warnings have been already set, let's just set the 582 * default limits for this quota type 583 */ 584 defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); 585 defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); 586 defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); 587 defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); 588 defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); 589 defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); 590 xfs_qm_dqdestroy(dqp); 591 } 592 } 593 594 /* 595 * This initializes all the quota information that's kept in the 596 * mount structure 597 */ 598 STATIC int 599 xfs_qm_init_quotainfo( 600 xfs_mount_t *mp) 601 { 602 xfs_quotainfo_t *qinf; 603 int error; 604 xfs_dquot_t *dqp; 605 606 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 607 608 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); 609 610 error = list_lru_init(&qinf->qi_lru); 611 if (error) 612 goto out_free_qinf; 613 614 /* 615 * See if quotainodes are setup, and if not, allocate them, 616 * and change the superblock accordingly. 617 */ 618 error = xfs_qm_init_quotainos(mp); 619 if (error) 620 goto out_free_lru; 621 622 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); 623 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); 624 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS); 625 mutex_init(&qinf->qi_tree_lock); 626 627 /* mutex used to serialize quotaoffs */ 628 mutex_init(&qinf->qi_quotaofflock); 629 630 /* Precalc some constants */ 631 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); 632 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen); 633 634 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); 635 636 /* 637 * We try to get the limits from the superuser's limits fields. 638 * This is quite hacky, but it is standard quota practice. 639 * 640 * Since we may not have done a quotacheck by this point, just read 641 * the dquot without attaching it to any hashtables or lists. 642 * 643 * Timers and warnings are globally set by the first timer found in 644 * user/group/proj quota types, otherwise a default value is used. 645 * This should be split into different fields per quota type. 646 */ 647 error = xfs_qm_dqread(mp, 0, 648 XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : 649 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP : 650 XFS_DQ_PROJ), 651 XFS_QMOPT_DOWARN, &dqp); 652 653 if (!error) { 654 xfs_disk_dquot_t *ddqp = &dqp->q_core; 655 656 /* 657 * The warnings and timers set the grace period given to 658 * a user or group before he or she can not perform any 659 * more writing. If it is zero, a default is used. 660 */ 661 qinf->qi_btimelimit = ddqp->d_btimer ? 662 be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT; 663 qinf->qi_itimelimit = ddqp->d_itimer ? 664 be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT; 665 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ? 666 be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT; 667 qinf->qi_bwarnlimit = ddqp->d_bwarns ? 668 be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT; 669 qinf->qi_iwarnlimit = ddqp->d_iwarns ? 670 be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT; 671 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ? 672 be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT; 673 xfs_qm_dqdestroy(dqp); 674 } else { 675 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; 676 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; 677 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; 678 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; 679 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; 680 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; 681 } 682 683 if (XFS_IS_UQUOTA_RUNNING(mp)) 684 xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf); 685 if (XFS_IS_GQUOTA_RUNNING(mp)) 686 xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf); 687 if (XFS_IS_PQUOTA_RUNNING(mp)) 688 xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf); 689 690 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count; 691 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; 692 qinf->qi_shrinker.seeks = DEFAULT_SEEKS; 693 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; 694 register_shrinker(&qinf->qi_shrinker); 695 return 0; 696 697 out_free_lru: 698 list_lru_destroy(&qinf->qi_lru); 699 out_free_qinf: 700 kmem_free(qinf); 701 mp->m_quotainfo = NULL; 702 return error; 703 } 704 705 706 /* 707 * Gets called when unmounting a filesystem or when all quotas get 708 * turned off. 709 * This purges the quota inodes, destroys locks and frees itself. 710 */ 711 void 712 xfs_qm_destroy_quotainfo( 713 xfs_mount_t *mp) 714 { 715 xfs_quotainfo_t *qi; 716 717 qi = mp->m_quotainfo; 718 ASSERT(qi != NULL); 719 720 unregister_shrinker(&qi->qi_shrinker); 721 list_lru_destroy(&qi->qi_lru); 722 723 if (qi->qi_uquotaip) { 724 IRELE(qi->qi_uquotaip); 725 qi->qi_uquotaip = NULL; /* paranoia */ 726 } 727 if (qi->qi_gquotaip) { 728 IRELE(qi->qi_gquotaip); 729 qi->qi_gquotaip = NULL; 730 } 731 if (qi->qi_pquotaip) { 732 IRELE(qi->qi_pquotaip); 733 qi->qi_pquotaip = NULL; 734 } 735 mutex_destroy(&qi->qi_quotaofflock); 736 kmem_free(qi); 737 mp->m_quotainfo = NULL; 738 } 739 740 /* 741 * Create an inode and return with a reference already taken, but unlocked 742 * This is how we create quota inodes 743 */ 744 STATIC int 745 xfs_qm_qino_alloc( 746 xfs_mount_t *mp, 747 xfs_inode_t **ip, 748 uint flags) 749 { 750 xfs_trans_t *tp; 751 int error; 752 int committed; 753 bool need_alloc = true; 754 755 *ip = NULL; 756 /* 757 * With superblock that doesn't have separate pquotino, we 758 * share an inode between gquota and pquota. If the on-disk 759 * superblock has GQUOTA and the filesystem is now mounted 760 * with PQUOTA, just use sb_gquotino for sb_pquotino and 761 * vice-versa. 762 */ 763 if (!xfs_sb_version_has_pquotino(&mp->m_sb) && 764 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) { 765 xfs_ino_t ino = NULLFSINO; 766 767 if ((flags & XFS_QMOPT_PQUOTA) && 768 (mp->m_sb.sb_gquotino != NULLFSINO)) { 769 ino = mp->m_sb.sb_gquotino; 770 ASSERT(mp->m_sb.sb_pquotino == NULLFSINO); 771 } else if ((flags & XFS_QMOPT_GQUOTA) && 772 (mp->m_sb.sb_pquotino != NULLFSINO)) { 773 ino = mp->m_sb.sb_pquotino; 774 ASSERT(mp->m_sb.sb_gquotino == NULLFSINO); 775 } 776 if (ino != NULLFSINO) { 777 error = xfs_iget(mp, NULL, ino, 0, 0, ip); 778 if (error) 779 return error; 780 mp->m_sb.sb_gquotino = NULLFSINO; 781 mp->m_sb.sb_pquotino = NULLFSINO; 782 need_alloc = false; 783 } 784 } 785 786 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create, 787 XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp); 788 if (error) 789 return error; 790 791 if (need_alloc) { 792 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, 793 &committed); 794 if (error) { 795 xfs_trans_cancel(tp); 796 return error; 797 } 798 } 799 800 /* 801 * Make the changes in the superblock, and log those too. 802 * sbfields arg may contain fields other than *QUOTINO; 803 * VERSIONNUM for example. 804 */ 805 spin_lock(&mp->m_sb_lock); 806 if (flags & XFS_QMOPT_SBVERSION) { 807 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); 808 809 xfs_sb_version_addquota(&mp->m_sb); 810 mp->m_sb.sb_uquotino = NULLFSINO; 811 mp->m_sb.sb_gquotino = NULLFSINO; 812 mp->m_sb.sb_pquotino = NULLFSINO; 813 814 /* qflags will get updated fully _after_ quotacheck */ 815 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT; 816 } 817 if (flags & XFS_QMOPT_UQUOTA) 818 mp->m_sb.sb_uquotino = (*ip)->i_ino; 819 else if (flags & XFS_QMOPT_GQUOTA) 820 mp->m_sb.sb_gquotino = (*ip)->i_ino; 821 else 822 mp->m_sb.sb_pquotino = (*ip)->i_ino; 823 spin_unlock(&mp->m_sb_lock); 824 xfs_log_sb(tp); 825 826 error = xfs_trans_commit(tp); 827 if (error) { 828 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 829 xfs_alert(mp, "%s failed (error %d)!", __func__, error); 830 } 831 if (need_alloc) 832 xfs_finish_inode_setup(*ip); 833 return error; 834 } 835 836 837 STATIC void 838 xfs_qm_reset_dqcounts( 839 xfs_mount_t *mp, 840 xfs_buf_t *bp, 841 xfs_dqid_t id, 842 uint type) 843 { 844 struct xfs_dqblk *dqb; 845 int j; 846 847 trace_xfs_reset_dqcounts(bp, _RET_IP_); 848 849 /* 850 * Reset all counters and timers. They'll be 851 * started afresh by xfs_qm_quotacheck. 852 */ 853 #ifdef DEBUG 854 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) / 855 sizeof(xfs_dqblk_t); 856 ASSERT(mp->m_quotainfo->qi_dqperchunk == j); 857 #endif 858 dqb = bp->b_addr; 859 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { 860 struct xfs_disk_dquot *ddq; 861 862 ddq = (struct xfs_disk_dquot *)&dqb[j]; 863 864 /* 865 * Do a sanity check, and if needed, repair the dqblk. Don't 866 * output any warnings because it's perfectly possible to 867 * find uninitialised dquot blks. See comment in xfs_dqcheck. 868 */ 869 xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, 870 "xfs_quotacheck"); 871 /* 872 * Reset type in case we are reusing group quota file for 873 * project quotas or vice versa 874 */ 875 ddq->d_flags = type; 876 ddq->d_bcount = 0; 877 ddq->d_icount = 0; 878 ddq->d_rtbcount = 0; 879 ddq->d_btimer = 0; 880 ddq->d_itimer = 0; 881 ddq->d_rtbtimer = 0; 882 ddq->d_bwarns = 0; 883 ddq->d_iwarns = 0; 884 ddq->d_rtbwarns = 0; 885 886 if (xfs_sb_version_hascrc(&mp->m_sb)) { 887 xfs_update_cksum((char *)&dqb[j], 888 sizeof(struct xfs_dqblk), 889 XFS_DQUOT_CRC_OFF); 890 } 891 } 892 } 893 894 STATIC int 895 xfs_qm_dqiter_bufs( 896 struct xfs_mount *mp, 897 xfs_dqid_t firstid, 898 xfs_fsblock_t bno, 899 xfs_filblks_t blkcnt, 900 uint flags, 901 struct list_head *buffer_list) 902 { 903 struct xfs_buf *bp; 904 int error; 905 int type; 906 907 ASSERT(blkcnt > 0); 908 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : 909 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); 910 error = 0; 911 912 /* 913 * Blkcnt arg can be a very big number, and might even be 914 * larger than the log itself. So, we have to break it up into 915 * manageable-sized transactions. 916 * Note that we don't start a permanent transaction here; we might 917 * not be able to get a log reservation for the whole thing up front, 918 * and we don't really care to either, because we just discard 919 * everything if we were to crash in the middle of this loop. 920 */ 921 while (blkcnt--) { 922 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 923 XFS_FSB_TO_DADDR(mp, bno), 924 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 925 &xfs_dquot_buf_ops); 926 927 /* 928 * CRC and validation errors will return a EFSCORRUPTED here. If 929 * this occurs, re-read without CRC validation so that we can 930 * repair the damage via xfs_qm_reset_dqcounts(). This process 931 * will leave a trace in the log indicating corruption has 932 * been detected. 933 */ 934 if (error == -EFSCORRUPTED) { 935 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 936 XFS_FSB_TO_DADDR(mp, bno), 937 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 938 NULL); 939 } 940 941 if (error) 942 break; 943 944 /* 945 * A corrupt buffer might not have a verifier attached, so 946 * make sure we have the correct one attached before writeback 947 * occurs. 948 */ 949 bp->b_ops = &xfs_dquot_buf_ops; 950 xfs_qm_reset_dqcounts(mp, bp, firstid, type); 951 xfs_buf_delwri_queue(bp, buffer_list); 952 xfs_buf_relse(bp); 953 954 /* goto the next block. */ 955 bno++; 956 firstid += mp->m_quotainfo->qi_dqperchunk; 957 } 958 959 return error; 960 } 961 962 /* 963 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a 964 * caller supplied function for every chunk of dquots that we find. 965 */ 966 STATIC int 967 xfs_qm_dqiterate( 968 struct xfs_mount *mp, 969 struct xfs_inode *qip, 970 uint flags, 971 struct list_head *buffer_list) 972 { 973 struct xfs_bmbt_irec *map; 974 int i, nmaps; /* number of map entries */ 975 int error; /* return value */ 976 xfs_fileoff_t lblkno; 977 xfs_filblks_t maxlblkcnt; 978 xfs_dqid_t firstid; 979 xfs_fsblock_t rablkno; 980 xfs_filblks_t rablkcnt; 981 982 error = 0; 983 /* 984 * This looks racy, but we can't keep an inode lock across a 985 * trans_reserve. But, this gets called during quotacheck, and that 986 * happens only at mount time which is single threaded. 987 */ 988 if (qip->i_d.di_nblocks == 0) 989 return 0; 990 991 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); 992 993 lblkno = 0; 994 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 995 do { 996 uint lock_mode; 997 998 nmaps = XFS_DQITER_MAP_SIZE; 999 /* 1000 * We aren't changing the inode itself. Just changing 1001 * some of its data. No new blocks are added here, and 1002 * the inode is never added to the transaction. 1003 */ 1004 lock_mode = xfs_ilock_data_map_shared(qip); 1005 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, 1006 map, &nmaps, 0); 1007 xfs_iunlock(qip, lock_mode); 1008 if (error) 1009 break; 1010 1011 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); 1012 for (i = 0; i < nmaps; i++) { 1013 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); 1014 ASSERT(map[i].br_blockcount); 1015 1016 1017 lblkno += map[i].br_blockcount; 1018 1019 if (map[i].br_startblock == HOLESTARTBLOCK) 1020 continue; 1021 1022 firstid = (xfs_dqid_t) map[i].br_startoff * 1023 mp->m_quotainfo->qi_dqperchunk; 1024 /* 1025 * Do a read-ahead on the next extent. 1026 */ 1027 if ((i+1 < nmaps) && 1028 (map[i+1].br_startblock != HOLESTARTBLOCK)) { 1029 rablkcnt = map[i+1].br_blockcount; 1030 rablkno = map[i+1].br_startblock; 1031 while (rablkcnt--) { 1032 xfs_buf_readahead(mp->m_ddev_targp, 1033 XFS_FSB_TO_DADDR(mp, rablkno), 1034 mp->m_quotainfo->qi_dqchunklen, 1035 &xfs_dquot_buf_ops); 1036 rablkno++; 1037 } 1038 } 1039 /* 1040 * Iterate thru all the blks in the extent and 1041 * reset the counters of all the dquots inside them. 1042 */ 1043 error = xfs_qm_dqiter_bufs(mp, firstid, 1044 map[i].br_startblock, 1045 map[i].br_blockcount, 1046 flags, buffer_list); 1047 if (error) 1048 goto out; 1049 } 1050 } while (nmaps > 0); 1051 1052 out: 1053 kmem_free(map); 1054 return error; 1055 } 1056 1057 /* 1058 * Called by dqusage_adjust in doing a quotacheck. 1059 * 1060 * Given the inode, and a dquot id this updates both the incore dqout as well 1061 * as the buffer copy. This is so that once the quotacheck is done, we can 1062 * just log all the buffers, as opposed to logging numerous updates to 1063 * individual dquots. 1064 */ 1065 STATIC int 1066 xfs_qm_quotacheck_dqadjust( 1067 struct xfs_inode *ip, 1068 xfs_dqid_t id, 1069 uint type, 1070 xfs_qcnt_t nblks, 1071 xfs_qcnt_t rtblks) 1072 { 1073 struct xfs_mount *mp = ip->i_mount; 1074 struct xfs_dquot *dqp; 1075 int error; 1076 1077 error = xfs_qm_dqget(mp, ip, id, type, 1078 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp); 1079 if (error) { 1080 /* 1081 * Shouldn't be able to turn off quotas here. 1082 */ 1083 ASSERT(error != -ESRCH); 1084 ASSERT(error != -ENOENT); 1085 return error; 1086 } 1087 1088 trace_xfs_dqadjust(dqp); 1089 1090 /* 1091 * Adjust the inode count and the block count to reflect this inode's 1092 * resource usage. 1093 */ 1094 be64_add_cpu(&dqp->q_core.d_icount, 1); 1095 dqp->q_res_icount++; 1096 if (nblks) { 1097 be64_add_cpu(&dqp->q_core.d_bcount, nblks); 1098 dqp->q_res_bcount += nblks; 1099 } 1100 if (rtblks) { 1101 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); 1102 dqp->q_res_rtbcount += rtblks; 1103 } 1104 1105 /* 1106 * Set default limits, adjust timers (since we changed usages) 1107 * 1108 * There are no timers for the default values set in the root dquot. 1109 */ 1110 if (dqp->q_core.d_id) { 1111 xfs_qm_adjust_dqlimits(mp, dqp); 1112 xfs_qm_adjust_dqtimers(mp, &dqp->q_core); 1113 } 1114 1115 dqp->dq_flags |= XFS_DQ_DIRTY; 1116 xfs_qm_dqput(dqp); 1117 return 0; 1118 } 1119 1120 STATIC int 1121 xfs_qm_get_rtblks( 1122 xfs_inode_t *ip, 1123 xfs_qcnt_t *O_rtblks) 1124 { 1125 xfs_filblks_t rtblks; /* total rt blks */ 1126 xfs_extnum_t idx; /* extent record index */ 1127 xfs_ifork_t *ifp; /* inode fork pointer */ 1128 xfs_extnum_t nextents; /* number of extent entries */ 1129 int error; 1130 1131 ASSERT(XFS_IS_REALTIME_INODE(ip)); 1132 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1133 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1134 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) 1135 return error; 1136 } 1137 rtblks = 0; 1138 nextents = xfs_iext_count(ifp); 1139 for (idx = 0; idx < nextents; idx++) 1140 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx)); 1141 *O_rtblks = (xfs_qcnt_t)rtblks; 1142 return 0; 1143 } 1144 1145 /* 1146 * callback routine supplied to bulkstat(). Given an inumber, find its 1147 * dquots and update them to account for resources taken by that inode. 1148 */ 1149 /* ARGSUSED */ 1150 STATIC int 1151 xfs_qm_dqusage_adjust( 1152 xfs_mount_t *mp, /* mount point for filesystem */ 1153 xfs_ino_t ino, /* inode number to get data for */ 1154 void __user *buffer, /* not used */ 1155 int ubsize, /* not used */ 1156 int *ubused, /* not used */ 1157 int *res) /* result code value */ 1158 { 1159 xfs_inode_t *ip; 1160 xfs_qcnt_t nblks, rtblks = 0; 1161 int error; 1162 1163 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1164 1165 /* 1166 * rootino must have its resources accounted for, not so with the quota 1167 * inodes. 1168 */ 1169 if (xfs_is_quota_inode(&mp->m_sb, ino)) { 1170 *res = BULKSTAT_RV_NOTHING; 1171 return -EINVAL; 1172 } 1173 1174 /* 1175 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget 1176 * interface expects the inode to be exclusively locked because that's 1177 * the case in all other instances. It's OK that we do this because 1178 * quotacheck is done only at mount time. 1179 */ 1180 error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL, 1181 &ip); 1182 if (error) { 1183 *res = BULKSTAT_RV_NOTHING; 1184 return error; 1185 } 1186 1187 ASSERT(ip->i_delayed_blks == 0); 1188 1189 if (XFS_IS_REALTIME_INODE(ip)) { 1190 /* 1191 * Walk thru the extent list and count the realtime blocks. 1192 */ 1193 error = xfs_qm_get_rtblks(ip, &rtblks); 1194 if (error) 1195 goto error0; 1196 } 1197 1198 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; 1199 1200 /* 1201 * Add the (disk blocks and inode) resources occupied by this 1202 * inode to its dquots. We do this adjustment in the incore dquot, 1203 * and also copy the changes to its buffer. 1204 * We don't care about putting these changes in a transaction 1205 * envelope because if we crash in the middle of a 'quotacheck' 1206 * we have to start from the beginning anyway. 1207 * Once we're done, we'll log all the dquot bufs. 1208 * 1209 * The *QUOTA_ON checks below may look pretty racy, but quotachecks 1210 * and quotaoffs don't race. (Quotachecks happen at mount time only). 1211 */ 1212 if (XFS_IS_UQUOTA_ON(mp)) { 1213 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, 1214 XFS_DQ_USER, nblks, rtblks); 1215 if (error) 1216 goto error0; 1217 } 1218 1219 if (XFS_IS_GQUOTA_ON(mp)) { 1220 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, 1221 XFS_DQ_GROUP, nblks, rtblks); 1222 if (error) 1223 goto error0; 1224 } 1225 1226 if (XFS_IS_PQUOTA_ON(mp)) { 1227 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), 1228 XFS_DQ_PROJ, nblks, rtblks); 1229 if (error) 1230 goto error0; 1231 } 1232 1233 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1234 IRELE(ip); 1235 *res = BULKSTAT_RV_DIDONE; 1236 return 0; 1237 1238 error0: 1239 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1240 IRELE(ip); 1241 *res = BULKSTAT_RV_GIVEUP; 1242 return error; 1243 } 1244 1245 STATIC int 1246 xfs_qm_flush_one( 1247 struct xfs_dquot *dqp, 1248 void *data) 1249 { 1250 struct list_head *buffer_list = data; 1251 struct xfs_buf *bp = NULL; 1252 int error = 0; 1253 1254 xfs_dqlock(dqp); 1255 if (dqp->dq_flags & XFS_DQ_FREEING) 1256 goto out_unlock; 1257 if (!XFS_DQ_IS_DIRTY(dqp)) 1258 goto out_unlock; 1259 1260 xfs_dqflock(dqp); 1261 error = xfs_qm_dqflush(dqp, &bp); 1262 if (error) 1263 goto out_unlock; 1264 1265 xfs_buf_delwri_queue(bp, buffer_list); 1266 xfs_buf_relse(bp); 1267 out_unlock: 1268 xfs_dqunlock(dqp); 1269 return error; 1270 } 1271 1272 /* 1273 * Walk thru all the filesystem inodes and construct a consistent view 1274 * of the disk quota world. If the quotacheck fails, disable quotas. 1275 */ 1276 STATIC int 1277 xfs_qm_quotacheck( 1278 xfs_mount_t *mp) 1279 { 1280 int done, count, error, error2; 1281 xfs_ino_t lastino; 1282 size_t structsz; 1283 uint flags; 1284 LIST_HEAD (buffer_list); 1285 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip; 1286 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip; 1287 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip; 1288 1289 count = INT_MAX; 1290 structsz = 1; 1291 lastino = 0; 1292 flags = 0; 1293 1294 ASSERT(uip || gip || pip); 1295 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1296 1297 xfs_notice(mp, "Quotacheck needed: Please wait."); 1298 1299 /* 1300 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset 1301 * their counters to zero. We need a clean slate. 1302 * We don't log our changes till later. 1303 */ 1304 if (uip) { 1305 error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA, 1306 &buffer_list); 1307 if (error) 1308 goto error_return; 1309 flags |= XFS_UQUOTA_CHKD; 1310 } 1311 1312 if (gip) { 1313 error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA, 1314 &buffer_list); 1315 if (error) 1316 goto error_return; 1317 flags |= XFS_GQUOTA_CHKD; 1318 } 1319 1320 if (pip) { 1321 error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA, 1322 &buffer_list); 1323 if (error) 1324 goto error_return; 1325 flags |= XFS_PQUOTA_CHKD; 1326 } 1327 1328 do { 1329 /* 1330 * Iterate thru all the inodes in the file system, 1331 * adjusting the corresponding dquot counters in core. 1332 */ 1333 error = xfs_bulkstat(mp, &lastino, &count, 1334 xfs_qm_dqusage_adjust, 1335 structsz, NULL, &done); 1336 if (error) 1337 break; 1338 1339 } while (!done); 1340 1341 /* 1342 * We've made all the changes that we need to make incore. Flush them 1343 * down to disk buffers if everything was updated successfully. 1344 */ 1345 if (XFS_IS_UQUOTA_ON(mp)) { 1346 error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one, 1347 &buffer_list); 1348 } 1349 if (XFS_IS_GQUOTA_ON(mp)) { 1350 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one, 1351 &buffer_list); 1352 if (!error) 1353 error = error2; 1354 } 1355 if (XFS_IS_PQUOTA_ON(mp)) { 1356 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one, 1357 &buffer_list); 1358 if (!error) 1359 error = error2; 1360 } 1361 1362 error2 = xfs_buf_delwri_submit(&buffer_list); 1363 if (!error) 1364 error = error2; 1365 1366 /* 1367 * We can get this error if we couldn't do a dquot allocation inside 1368 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the 1369 * dirty dquots that might be cached, we just want to get rid of them 1370 * and turn quotaoff. The dquots won't be attached to any of the inodes 1371 * at this point (because we intentionally didn't in dqget_noattach). 1372 */ 1373 if (error) { 1374 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); 1375 goto error_return; 1376 } 1377 1378 /* 1379 * If one type of quotas is off, then it will lose its 1380 * quotachecked status, since we won't be doing accounting for 1381 * that type anymore. 1382 */ 1383 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD; 1384 mp->m_qflags |= flags; 1385 1386 error_return: 1387 xfs_buf_delwri_cancel(&buffer_list); 1388 1389 if (error) { 1390 xfs_warn(mp, 1391 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", 1392 error); 1393 /* 1394 * We must turn off quotas. 1395 */ 1396 ASSERT(mp->m_quotainfo != NULL); 1397 xfs_qm_destroy_quotainfo(mp); 1398 if (xfs_mount_reset_sbqflags(mp)) { 1399 xfs_warn(mp, 1400 "Quotacheck: Failed to reset quota flags."); 1401 } 1402 } else 1403 xfs_notice(mp, "Quotacheck: Done."); 1404 return error; 1405 } 1406 1407 /* 1408 * This is called from xfs_mountfs to start quotas and initialize all 1409 * necessary data structures like quotainfo. This is also responsible for 1410 * running a quotacheck as necessary. We are guaranteed that the superblock 1411 * is consistently read in at this point. 1412 * 1413 * If we fail here, the mount will continue with quota turned off. We don't 1414 * need to inidicate success or failure at all. 1415 */ 1416 void 1417 xfs_qm_mount_quotas( 1418 struct xfs_mount *mp) 1419 { 1420 int error = 0; 1421 uint sbf; 1422 1423 /* 1424 * If quotas on realtime volumes is not supported, we disable 1425 * quotas immediately. 1426 */ 1427 if (mp->m_sb.sb_rextents) { 1428 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); 1429 mp->m_qflags = 0; 1430 goto write_changes; 1431 } 1432 1433 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1434 1435 /* 1436 * Allocate the quotainfo structure inside the mount struct, and 1437 * create quotainode(s), and change/rev superblock if necessary. 1438 */ 1439 error = xfs_qm_init_quotainfo(mp); 1440 if (error) { 1441 /* 1442 * We must turn off quotas. 1443 */ 1444 ASSERT(mp->m_quotainfo == NULL); 1445 mp->m_qflags = 0; 1446 goto write_changes; 1447 } 1448 /* 1449 * If any of the quotas are not consistent, do a quotacheck. 1450 */ 1451 if (XFS_QM_NEED_QUOTACHECK(mp)) { 1452 error = xfs_qm_quotacheck(mp); 1453 if (error) { 1454 /* Quotacheck failed and disabled quotas. */ 1455 return; 1456 } 1457 } 1458 /* 1459 * If one type of quotas is off, then it will lose its 1460 * quotachecked status, since we won't be doing accounting for 1461 * that type anymore. 1462 */ 1463 if (!XFS_IS_UQUOTA_ON(mp)) 1464 mp->m_qflags &= ~XFS_UQUOTA_CHKD; 1465 if (!XFS_IS_GQUOTA_ON(mp)) 1466 mp->m_qflags &= ~XFS_GQUOTA_CHKD; 1467 if (!XFS_IS_PQUOTA_ON(mp)) 1468 mp->m_qflags &= ~XFS_PQUOTA_CHKD; 1469 1470 write_changes: 1471 /* 1472 * We actually don't have to acquire the m_sb_lock at all. 1473 * This can only be called from mount, and that's single threaded. XXX 1474 */ 1475 spin_lock(&mp->m_sb_lock); 1476 sbf = mp->m_sb.sb_qflags; 1477 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; 1478 spin_unlock(&mp->m_sb_lock); 1479 1480 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 1481 if (xfs_sync_sb(mp, false)) { 1482 /* 1483 * We could only have been turning quotas off. 1484 * We aren't in very good shape actually because 1485 * the incore structures are convinced that quotas are 1486 * off, but the on disk superblock doesn't know that ! 1487 */ 1488 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); 1489 xfs_alert(mp, "%s: Superblock update failed!", 1490 __func__); 1491 } 1492 } 1493 1494 if (error) { 1495 xfs_warn(mp, "Failed to initialize disk quotas."); 1496 return; 1497 } 1498 } 1499 1500 /* 1501 * This is called after the superblock has been read in and we're ready to 1502 * iget the quota inodes. 1503 */ 1504 STATIC int 1505 xfs_qm_init_quotainos( 1506 xfs_mount_t *mp) 1507 { 1508 struct xfs_inode *uip = NULL; 1509 struct xfs_inode *gip = NULL; 1510 struct xfs_inode *pip = NULL; 1511 int error; 1512 uint flags = 0; 1513 1514 ASSERT(mp->m_quotainfo); 1515 1516 /* 1517 * Get the uquota and gquota inodes 1518 */ 1519 if (xfs_sb_version_hasquota(&mp->m_sb)) { 1520 if (XFS_IS_UQUOTA_ON(mp) && 1521 mp->m_sb.sb_uquotino != NULLFSINO) { 1522 ASSERT(mp->m_sb.sb_uquotino > 0); 1523 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 1524 0, 0, &uip); 1525 if (error) 1526 return error; 1527 } 1528 if (XFS_IS_GQUOTA_ON(mp) && 1529 mp->m_sb.sb_gquotino != NULLFSINO) { 1530 ASSERT(mp->m_sb.sb_gquotino > 0); 1531 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 1532 0, 0, &gip); 1533 if (error) 1534 goto error_rele; 1535 } 1536 if (XFS_IS_PQUOTA_ON(mp) && 1537 mp->m_sb.sb_pquotino != NULLFSINO) { 1538 ASSERT(mp->m_sb.sb_pquotino > 0); 1539 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino, 1540 0, 0, &pip); 1541 if (error) 1542 goto error_rele; 1543 } 1544 } else { 1545 flags |= XFS_QMOPT_SBVERSION; 1546 } 1547 1548 /* 1549 * Create the three inodes, if they don't exist already. The changes 1550 * made above will get added to a transaction and logged in one of 1551 * the qino_alloc calls below. If the device is readonly, 1552 * temporarily switch to read-write to do this. 1553 */ 1554 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { 1555 error = xfs_qm_qino_alloc(mp, &uip, 1556 flags | XFS_QMOPT_UQUOTA); 1557 if (error) 1558 goto error_rele; 1559 1560 flags &= ~XFS_QMOPT_SBVERSION; 1561 } 1562 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) { 1563 error = xfs_qm_qino_alloc(mp, &gip, 1564 flags | XFS_QMOPT_GQUOTA); 1565 if (error) 1566 goto error_rele; 1567 1568 flags &= ~XFS_QMOPT_SBVERSION; 1569 } 1570 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) { 1571 error = xfs_qm_qino_alloc(mp, &pip, 1572 flags | XFS_QMOPT_PQUOTA); 1573 if (error) 1574 goto error_rele; 1575 } 1576 1577 mp->m_quotainfo->qi_uquotaip = uip; 1578 mp->m_quotainfo->qi_gquotaip = gip; 1579 mp->m_quotainfo->qi_pquotaip = pip; 1580 1581 return 0; 1582 1583 error_rele: 1584 if (uip) 1585 IRELE(uip); 1586 if (gip) 1587 IRELE(gip); 1588 if (pip) 1589 IRELE(pip); 1590 return error; 1591 } 1592 1593 STATIC void 1594 xfs_qm_dqfree_one( 1595 struct xfs_dquot *dqp) 1596 { 1597 struct xfs_mount *mp = dqp->q_mount; 1598 struct xfs_quotainfo *qi = mp->m_quotainfo; 1599 1600 mutex_lock(&qi->qi_tree_lock); 1601 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), 1602 be32_to_cpu(dqp->q_core.d_id)); 1603 1604 qi->qi_dquots--; 1605 mutex_unlock(&qi->qi_tree_lock); 1606 1607 xfs_qm_dqdestroy(dqp); 1608 } 1609 1610 /* --------------- utility functions for vnodeops ---------------- */ 1611 1612 1613 /* 1614 * Given an inode, a uid, gid and prid make sure that we have 1615 * allocated relevant dquot(s) on disk, and that we won't exceed inode 1616 * quotas by creating this file. 1617 * This also attaches dquot(s) to the given inode after locking it, 1618 * and returns the dquots corresponding to the uid and/or gid. 1619 * 1620 * in : inode (unlocked) 1621 * out : udquot, gdquot with references taken and unlocked 1622 */ 1623 int 1624 xfs_qm_vop_dqalloc( 1625 struct xfs_inode *ip, 1626 xfs_dqid_t uid, 1627 xfs_dqid_t gid, 1628 prid_t prid, 1629 uint flags, 1630 struct xfs_dquot **O_udqpp, 1631 struct xfs_dquot **O_gdqpp, 1632 struct xfs_dquot **O_pdqpp) 1633 { 1634 struct xfs_mount *mp = ip->i_mount; 1635 struct xfs_dquot *uq = NULL; 1636 struct xfs_dquot *gq = NULL; 1637 struct xfs_dquot *pq = NULL; 1638 int error; 1639 uint lockflags; 1640 1641 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1642 return 0; 1643 1644 lockflags = XFS_ILOCK_EXCL; 1645 xfs_ilock(ip, lockflags); 1646 1647 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) 1648 gid = ip->i_d.di_gid; 1649 1650 /* 1651 * Attach the dquot(s) to this inode, doing a dquot allocation 1652 * if necessary. The dquot(s) will not be locked. 1653 */ 1654 if (XFS_NOT_DQATTACHED(mp, ip)) { 1655 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC); 1656 if (error) { 1657 xfs_iunlock(ip, lockflags); 1658 return error; 1659 } 1660 } 1661 1662 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { 1663 if (ip->i_d.di_uid != uid) { 1664 /* 1665 * What we need is the dquot that has this uid, and 1666 * if we send the inode to dqget, the uid of the inode 1667 * takes priority over what's sent in the uid argument. 1668 * We must unlock inode here before calling dqget if 1669 * we're not sending the inode, because otherwise 1670 * we'll deadlock by doing trans_reserve while 1671 * holding ilock. 1672 */ 1673 xfs_iunlock(ip, lockflags); 1674 error = xfs_qm_dqget(mp, NULL, uid, 1675 XFS_DQ_USER, 1676 XFS_QMOPT_DQALLOC | 1677 XFS_QMOPT_DOWARN, 1678 &uq); 1679 if (error) { 1680 ASSERT(error != -ENOENT); 1681 return error; 1682 } 1683 /* 1684 * Get the ilock in the right order. 1685 */ 1686 xfs_dqunlock(uq); 1687 lockflags = XFS_ILOCK_SHARED; 1688 xfs_ilock(ip, lockflags); 1689 } else { 1690 /* 1691 * Take an extra reference, because we'll return 1692 * this to caller 1693 */ 1694 ASSERT(ip->i_udquot); 1695 uq = xfs_qm_dqhold(ip->i_udquot); 1696 } 1697 } 1698 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { 1699 if (ip->i_d.di_gid != gid) { 1700 xfs_iunlock(ip, lockflags); 1701 error = xfs_qm_dqget(mp, NULL, gid, 1702 XFS_DQ_GROUP, 1703 XFS_QMOPT_DQALLOC | 1704 XFS_QMOPT_DOWARN, 1705 &gq); 1706 if (error) { 1707 ASSERT(error != -ENOENT); 1708 goto error_rele; 1709 } 1710 xfs_dqunlock(gq); 1711 lockflags = XFS_ILOCK_SHARED; 1712 xfs_ilock(ip, lockflags); 1713 } else { 1714 ASSERT(ip->i_gdquot); 1715 gq = xfs_qm_dqhold(ip->i_gdquot); 1716 } 1717 } 1718 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { 1719 if (xfs_get_projid(ip) != prid) { 1720 xfs_iunlock(ip, lockflags); 1721 error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, 1722 XFS_DQ_PROJ, 1723 XFS_QMOPT_DQALLOC | 1724 XFS_QMOPT_DOWARN, 1725 &pq); 1726 if (error) { 1727 ASSERT(error != -ENOENT); 1728 goto error_rele; 1729 } 1730 xfs_dqunlock(pq); 1731 lockflags = XFS_ILOCK_SHARED; 1732 xfs_ilock(ip, lockflags); 1733 } else { 1734 ASSERT(ip->i_pdquot); 1735 pq = xfs_qm_dqhold(ip->i_pdquot); 1736 } 1737 } 1738 if (uq) 1739 trace_xfs_dquot_dqalloc(ip); 1740 1741 xfs_iunlock(ip, lockflags); 1742 if (O_udqpp) 1743 *O_udqpp = uq; 1744 else 1745 xfs_qm_dqrele(uq); 1746 if (O_gdqpp) 1747 *O_gdqpp = gq; 1748 else 1749 xfs_qm_dqrele(gq); 1750 if (O_pdqpp) 1751 *O_pdqpp = pq; 1752 else 1753 xfs_qm_dqrele(pq); 1754 return 0; 1755 1756 error_rele: 1757 xfs_qm_dqrele(gq); 1758 xfs_qm_dqrele(uq); 1759 return error; 1760 } 1761 1762 /* 1763 * Actually transfer ownership, and do dquot modifications. 1764 * These were already reserved. 1765 */ 1766 xfs_dquot_t * 1767 xfs_qm_vop_chown( 1768 xfs_trans_t *tp, 1769 xfs_inode_t *ip, 1770 xfs_dquot_t **IO_olddq, 1771 xfs_dquot_t *newdq) 1772 { 1773 xfs_dquot_t *prevdq; 1774 uint bfield = XFS_IS_REALTIME_INODE(ip) ? 1775 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; 1776 1777 1778 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1779 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); 1780 1781 /* old dquot */ 1782 prevdq = *IO_olddq; 1783 ASSERT(prevdq); 1784 ASSERT(prevdq != newdq); 1785 1786 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks)); 1787 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); 1788 1789 /* the sparkling new dquot */ 1790 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); 1791 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); 1792 1793 /* 1794 * Take an extra reference, because the inode is going to keep 1795 * this dquot pointer even after the trans_commit. 1796 */ 1797 *IO_olddq = xfs_qm_dqhold(newdq); 1798 1799 return prevdq; 1800 } 1801 1802 /* 1803 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID). 1804 */ 1805 int 1806 xfs_qm_vop_chown_reserve( 1807 struct xfs_trans *tp, 1808 struct xfs_inode *ip, 1809 struct xfs_dquot *udqp, 1810 struct xfs_dquot *gdqp, 1811 struct xfs_dquot *pdqp, 1812 uint flags) 1813 { 1814 struct xfs_mount *mp = ip->i_mount; 1815 uint delblks, blkflags, prjflags = 0; 1816 struct xfs_dquot *udq_unres = NULL; 1817 struct xfs_dquot *gdq_unres = NULL; 1818 struct xfs_dquot *pdq_unres = NULL; 1819 struct xfs_dquot *udq_delblks = NULL; 1820 struct xfs_dquot *gdq_delblks = NULL; 1821 struct xfs_dquot *pdq_delblks = NULL; 1822 int error; 1823 1824 1825 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 1826 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1827 1828 delblks = ip->i_delayed_blks; 1829 blkflags = XFS_IS_REALTIME_INODE(ip) ? 1830 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; 1831 1832 if (XFS_IS_UQUOTA_ON(mp) && udqp && 1833 ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) { 1834 udq_delblks = udqp; 1835 /* 1836 * If there are delayed allocation blocks, then we have to 1837 * unreserve those from the old dquot, and add them to the 1838 * new dquot. 1839 */ 1840 if (delblks) { 1841 ASSERT(ip->i_udquot); 1842 udq_unres = ip->i_udquot; 1843 } 1844 } 1845 if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp && 1846 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) { 1847 gdq_delblks = gdqp; 1848 if (delblks) { 1849 ASSERT(ip->i_gdquot); 1850 gdq_unres = ip->i_gdquot; 1851 } 1852 } 1853 1854 if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp && 1855 xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) { 1856 prjflags = XFS_QMOPT_ENOSPC; 1857 pdq_delblks = pdqp; 1858 if (delblks) { 1859 ASSERT(ip->i_pdquot); 1860 pdq_unres = ip->i_pdquot; 1861 } 1862 } 1863 1864 error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, 1865 udq_delblks, gdq_delblks, pdq_delblks, 1866 ip->i_d.di_nblocks, 1, 1867 flags | blkflags | prjflags); 1868 if (error) 1869 return error; 1870 1871 /* 1872 * Do the delayed blks reservations/unreservations now. Since, these 1873 * are done without the help of a transaction, if a reservation fails 1874 * its previous reservations won't be automatically undone by trans 1875 * code. So, we have to do it manually here. 1876 */ 1877 if (delblks) { 1878 /* 1879 * Do the reservations first. Unreservation can't fail. 1880 */ 1881 ASSERT(udq_delblks || gdq_delblks || pdq_delblks); 1882 ASSERT(udq_unres || gdq_unres || pdq_unres); 1883 error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 1884 udq_delblks, gdq_delblks, pdq_delblks, 1885 (xfs_qcnt_t)delblks, 0, 1886 flags | blkflags | prjflags); 1887 if (error) 1888 return error; 1889 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 1890 udq_unres, gdq_unres, pdq_unres, 1891 -((xfs_qcnt_t)delblks), 0, blkflags); 1892 } 1893 1894 return 0; 1895 } 1896 1897 int 1898 xfs_qm_vop_rename_dqattach( 1899 struct xfs_inode **i_tab) 1900 { 1901 struct xfs_mount *mp = i_tab[0]->i_mount; 1902 int i; 1903 1904 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1905 return 0; 1906 1907 for (i = 0; (i < 4 && i_tab[i]); i++) { 1908 struct xfs_inode *ip = i_tab[i]; 1909 int error; 1910 1911 /* 1912 * Watch out for duplicate entries in the table. 1913 */ 1914 if (i == 0 || ip != i_tab[i-1]) { 1915 if (XFS_NOT_DQATTACHED(mp, ip)) { 1916 error = xfs_qm_dqattach(ip, 0); 1917 if (error) 1918 return error; 1919 } 1920 } 1921 } 1922 return 0; 1923 } 1924 1925 void 1926 xfs_qm_vop_create_dqattach( 1927 struct xfs_trans *tp, 1928 struct xfs_inode *ip, 1929 struct xfs_dquot *udqp, 1930 struct xfs_dquot *gdqp, 1931 struct xfs_dquot *pdqp) 1932 { 1933 struct xfs_mount *mp = tp->t_mountp; 1934 1935 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1936 return; 1937 1938 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1939 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1940 1941 if (udqp && XFS_IS_UQUOTA_ON(mp)) { 1942 ASSERT(ip->i_udquot == NULL); 1943 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); 1944 1945 ip->i_udquot = xfs_qm_dqhold(udqp); 1946 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 1947 } 1948 if (gdqp && XFS_IS_GQUOTA_ON(mp)) { 1949 ASSERT(ip->i_gdquot == NULL); 1950 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); 1951 ip->i_gdquot = xfs_qm_dqhold(gdqp); 1952 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 1953 } 1954 if (pdqp && XFS_IS_PQUOTA_ON(mp)) { 1955 ASSERT(ip->i_pdquot == NULL); 1956 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); 1957 1958 ip->i_pdquot = xfs_qm_dqhold(pdqp); 1959 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1); 1960 } 1961 } 1962 1963