1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_sb.h" 26 #include "xfs_mount.h" 27 #include "xfs_inode.h" 28 #include "xfs_ialloc.h" 29 #include "xfs_itable.h" 30 #include "xfs_quota.h" 31 #include "xfs_error.h" 32 #include "xfs_bmap.h" 33 #include "xfs_bmap_btree.h" 34 #include "xfs_trans.h" 35 #include "xfs_trans_space.h" 36 #include "xfs_qm.h" 37 #include "xfs_trace.h" 38 #include "xfs_icache.h" 39 #include "xfs_cksum.h" 40 41 /* 42 * The global quota manager. There is only one of these for the entire 43 * system, _not_ one per file system. XQM keeps track of the overall 44 * quota functionality, including maintaining the freelist and hash 45 * tables of dquots. 46 */ 47 STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 48 STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 49 50 51 STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); 52 /* 53 * We use the batch lookup interface to iterate over the dquots as it 54 * currently is the only interface into the radix tree code that allows 55 * fuzzy lookups instead of exact matches. Holding the lock over multiple 56 * operations is fine as all callers are used either during mount/umount 57 * or quotaoff. 58 */ 59 #define XFS_DQ_LOOKUP_BATCH 32 60 61 STATIC int 62 xfs_qm_dquot_walk( 63 struct xfs_mount *mp, 64 int type, 65 int (*execute)(struct xfs_dquot *dqp, void *data), 66 void *data) 67 { 68 struct xfs_quotainfo *qi = mp->m_quotainfo; 69 struct radix_tree_root *tree = xfs_dquot_tree(qi, type); 70 uint32_t next_index; 71 int last_error = 0; 72 int skipped; 73 int nr_found; 74 75 restart: 76 skipped = 0; 77 next_index = 0; 78 nr_found = 0; 79 80 while (1) { 81 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; 82 int error = 0; 83 int i; 84 85 mutex_lock(&qi->qi_tree_lock); 86 nr_found = radix_tree_gang_lookup(tree, (void **)batch, 87 next_index, XFS_DQ_LOOKUP_BATCH); 88 if (!nr_found) { 89 mutex_unlock(&qi->qi_tree_lock); 90 break; 91 } 92 93 for (i = 0; i < nr_found; i++) { 94 struct xfs_dquot *dqp = batch[i]; 95 96 next_index = be32_to_cpu(dqp->q_core.d_id) + 1; 97 98 error = execute(batch[i], data); 99 if (error == -EAGAIN) { 100 skipped++; 101 continue; 102 } 103 if (error && last_error != -EFSCORRUPTED) 104 last_error = error; 105 } 106 107 mutex_unlock(&qi->qi_tree_lock); 108 109 /* bail out if the filesystem is corrupted. */ 110 if (last_error == -EFSCORRUPTED) { 111 skipped = 0; 112 break; 113 } 114 /* we're done if id overflows back to zero */ 115 if (!next_index) 116 break; 117 } 118 119 if (skipped) { 120 delay(1); 121 goto restart; 122 } 123 124 return last_error; 125 } 126 127 128 /* 129 * Purge a dquot from all tracking data structures and free it. 130 */ 131 STATIC int 132 xfs_qm_dqpurge( 133 struct xfs_dquot *dqp, 134 void *data) 135 { 136 struct xfs_mount *mp = dqp->q_mount; 137 struct xfs_quotainfo *qi = mp->m_quotainfo; 138 139 xfs_dqlock(dqp); 140 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { 141 xfs_dqunlock(dqp); 142 return -EAGAIN; 143 } 144 145 dqp->dq_flags |= XFS_DQ_FREEING; 146 147 xfs_dqflock(dqp); 148 149 /* 150 * If we are turning this type of quotas off, we don't care 151 * about the dirty metadata sitting in this dquot. OTOH, if 152 * we're unmounting, we do care, so we flush it and wait. 153 */ 154 if (XFS_DQ_IS_DIRTY(dqp)) { 155 struct xfs_buf *bp = NULL; 156 int error; 157 158 /* 159 * We don't care about getting disk errors here. We need 160 * to purge this dquot anyway, so we go ahead regardless. 161 */ 162 error = xfs_qm_dqflush(dqp, &bp); 163 if (error) { 164 xfs_warn(mp, "%s: dquot %p flush failed", 165 __func__, dqp); 166 } else { 167 error = xfs_bwrite(bp); 168 xfs_buf_relse(bp); 169 } 170 xfs_dqflock(dqp); 171 } 172 173 ASSERT(atomic_read(&dqp->q_pincount) == 0); 174 ASSERT(XFS_FORCED_SHUTDOWN(mp) || 175 !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); 176 177 xfs_dqfunlock(dqp); 178 xfs_dqunlock(dqp); 179 180 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), 181 be32_to_cpu(dqp->q_core.d_id)); 182 qi->qi_dquots--; 183 184 /* 185 * We move dquots to the freelist as soon as their reference count 186 * hits zero, so it really should be on the freelist here. 187 */ 188 ASSERT(!list_empty(&dqp->q_lru)); 189 list_lru_del(&qi->qi_lru, &dqp->q_lru); 190 XFS_STATS_DEC(mp, xs_qm_dquot_unused); 191 192 xfs_qm_dqdestroy(dqp); 193 return 0; 194 } 195 196 /* 197 * Purge the dquot cache. 198 */ 199 void 200 xfs_qm_dqpurge_all( 201 struct xfs_mount *mp, 202 uint flags) 203 { 204 if (flags & XFS_QMOPT_UQUOTA) 205 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); 206 if (flags & XFS_QMOPT_GQUOTA) 207 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); 208 if (flags & XFS_QMOPT_PQUOTA) 209 xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL); 210 } 211 212 /* 213 * Just destroy the quotainfo structure. 214 */ 215 void 216 xfs_qm_unmount( 217 struct xfs_mount *mp) 218 { 219 if (mp->m_quotainfo) { 220 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); 221 xfs_qm_destroy_quotainfo(mp); 222 } 223 } 224 225 /* 226 * Called from the vfsops layer. 227 */ 228 void 229 xfs_qm_unmount_quotas( 230 xfs_mount_t *mp) 231 { 232 /* 233 * Release the dquots that root inode, et al might be holding, 234 * before we flush quotas and blow away the quotainfo structure. 235 */ 236 ASSERT(mp->m_rootip); 237 xfs_qm_dqdetach(mp->m_rootip); 238 if (mp->m_rbmip) 239 xfs_qm_dqdetach(mp->m_rbmip); 240 if (mp->m_rsumip) 241 xfs_qm_dqdetach(mp->m_rsumip); 242 243 /* 244 * Release the quota inodes. 245 */ 246 if (mp->m_quotainfo) { 247 if (mp->m_quotainfo->qi_uquotaip) { 248 IRELE(mp->m_quotainfo->qi_uquotaip); 249 mp->m_quotainfo->qi_uquotaip = NULL; 250 } 251 if (mp->m_quotainfo->qi_gquotaip) { 252 IRELE(mp->m_quotainfo->qi_gquotaip); 253 mp->m_quotainfo->qi_gquotaip = NULL; 254 } 255 if (mp->m_quotainfo->qi_pquotaip) { 256 IRELE(mp->m_quotainfo->qi_pquotaip); 257 mp->m_quotainfo->qi_pquotaip = NULL; 258 } 259 } 260 } 261 262 STATIC int 263 xfs_qm_dqattach_one( 264 xfs_inode_t *ip, 265 xfs_dqid_t id, 266 uint type, 267 uint doalloc, 268 xfs_dquot_t **IO_idqpp) 269 { 270 xfs_dquot_t *dqp; 271 int error; 272 273 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 274 error = 0; 275 276 /* 277 * See if we already have it in the inode itself. IO_idqpp is &i_udquot 278 * or &i_gdquot. This made the code look weird, but made the logic a lot 279 * simpler. 280 */ 281 dqp = *IO_idqpp; 282 if (dqp) { 283 trace_xfs_dqattach_found(dqp); 284 return 0; 285 } 286 287 /* 288 * Find the dquot from somewhere. This bumps the reference count of 289 * dquot and returns it locked. This can return ENOENT if dquot didn't 290 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got 291 * turned off suddenly. 292 */ 293 error = xfs_qm_dqget(ip->i_mount, ip, id, type, 294 doalloc | XFS_QMOPT_DOWARN, &dqp); 295 if (error) 296 return error; 297 298 trace_xfs_dqattach_get(dqp); 299 300 /* 301 * dqget may have dropped and re-acquired the ilock, but it guarantees 302 * that the dquot returned is the one that should go in the inode. 303 */ 304 *IO_idqpp = dqp; 305 xfs_dqunlock(dqp); 306 return 0; 307 } 308 309 static bool 310 xfs_qm_need_dqattach( 311 struct xfs_inode *ip) 312 { 313 struct xfs_mount *mp = ip->i_mount; 314 315 if (!XFS_IS_QUOTA_RUNNING(mp)) 316 return false; 317 if (!XFS_IS_QUOTA_ON(mp)) 318 return false; 319 if (!XFS_NOT_DQATTACHED(mp, ip)) 320 return false; 321 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) 322 return false; 323 return true; 324 } 325 326 /* 327 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON 328 * into account. 329 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. 330 * Inode may get unlocked and relocked in here, and the caller must deal with 331 * the consequences. 332 */ 333 int 334 xfs_qm_dqattach_locked( 335 xfs_inode_t *ip, 336 uint flags) 337 { 338 xfs_mount_t *mp = ip->i_mount; 339 int error = 0; 340 341 if (!xfs_qm_need_dqattach(ip)) 342 return 0; 343 344 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 345 346 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) { 347 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, 348 flags & XFS_QMOPT_DQALLOC, 349 &ip->i_udquot); 350 if (error) 351 goto done; 352 ASSERT(ip->i_udquot); 353 } 354 355 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) { 356 error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, 357 flags & XFS_QMOPT_DQALLOC, 358 &ip->i_gdquot); 359 if (error) 360 goto done; 361 ASSERT(ip->i_gdquot); 362 } 363 364 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) { 365 error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, 366 flags & XFS_QMOPT_DQALLOC, 367 &ip->i_pdquot); 368 if (error) 369 goto done; 370 ASSERT(ip->i_pdquot); 371 } 372 373 done: 374 /* 375 * Don't worry about the dquots that we may have attached before any 376 * error - they'll get detached later if it has not already been done. 377 */ 378 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 379 return error; 380 } 381 382 int 383 xfs_qm_dqattach( 384 struct xfs_inode *ip, 385 uint flags) 386 { 387 int error; 388 389 if (!xfs_qm_need_dqattach(ip)) 390 return 0; 391 392 xfs_ilock(ip, XFS_ILOCK_EXCL); 393 error = xfs_qm_dqattach_locked(ip, flags); 394 xfs_iunlock(ip, XFS_ILOCK_EXCL); 395 396 return error; 397 } 398 399 /* 400 * Release dquots (and their references) if any. 401 * The inode should be locked EXCL except when this's called by 402 * xfs_ireclaim. 403 */ 404 void 405 xfs_qm_dqdetach( 406 xfs_inode_t *ip) 407 { 408 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot)) 409 return; 410 411 trace_xfs_dquot_dqdetach(ip); 412 413 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino)); 414 if (ip->i_udquot) { 415 xfs_qm_dqrele(ip->i_udquot); 416 ip->i_udquot = NULL; 417 } 418 if (ip->i_gdquot) { 419 xfs_qm_dqrele(ip->i_gdquot); 420 ip->i_gdquot = NULL; 421 } 422 if (ip->i_pdquot) { 423 xfs_qm_dqrele(ip->i_pdquot); 424 ip->i_pdquot = NULL; 425 } 426 } 427 428 struct xfs_qm_isolate { 429 struct list_head buffers; 430 struct list_head dispose; 431 }; 432 433 static enum lru_status 434 xfs_qm_dquot_isolate( 435 struct list_head *item, 436 struct list_lru_one *lru, 437 spinlock_t *lru_lock, 438 void *arg) 439 __releases(lru_lock) __acquires(lru_lock) 440 { 441 struct xfs_dquot *dqp = container_of(item, 442 struct xfs_dquot, q_lru); 443 struct xfs_qm_isolate *isol = arg; 444 445 if (!xfs_dqlock_nowait(dqp)) 446 goto out_miss_busy; 447 448 /* 449 * This dquot has acquired a reference in the meantime remove it from 450 * the freelist and try again. 451 */ 452 if (dqp->q_nrefs) { 453 xfs_dqunlock(dqp); 454 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants); 455 456 trace_xfs_dqreclaim_want(dqp); 457 list_lru_isolate(lru, &dqp->q_lru); 458 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); 459 return LRU_REMOVED; 460 } 461 462 /* 463 * If the dquot is dirty, flush it. If it's already being flushed, just 464 * skip it so there is time for the IO to complete before we try to 465 * reclaim it again on the next LRU pass. 466 */ 467 if (!xfs_dqflock_nowait(dqp)) { 468 xfs_dqunlock(dqp); 469 goto out_miss_busy; 470 } 471 472 if (XFS_DQ_IS_DIRTY(dqp)) { 473 struct xfs_buf *bp = NULL; 474 int error; 475 476 trace_xfs_dqreclaim_dirty(dqp); 477 478 /* we have to drop the LRU lock to flush the dquot */ 479 spin_unlock(lru_lock); 480 481 error = xfs_qm_dqflush(dqp, &bp); 482 if (error) { 483 xfs_warn(dqp->q_mount, "%s: dquot %p flush failed", 484 __func__, dqp); 485 goto out_unlock_dirty; 486 } 487 488 xfs_buf_delwri_queue(bp, &isol->buffers); 489 xfs_buf_relse(bp); 490 goto out_unlock_dirty; 491 } 492 xfs_dqfunlock(dqp); 493 494 /* 495 * Prevent lookups now that we are past the point of no return. 496 */ 497 dqp->dq_flags |= XFS_DQ_FREEING; 498 xfs_dqunlock(dqp); 499 500 ASSERT(dqp->q_nrefs == 0); 501 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose); 502 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); 503 trace_xfs_dqreclaim_done(dqp); 504 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims); 505 return LRU_REMOVED; 506 507 out_miss_busy: 508 trace_xfs_dqreclaim_busy(dqp); 509 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses); 510 return LRU_SKIP; 511 512 out_unlock_dirty: 513 trace_xfs_dqreclaim_busy(dqp); 514 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses); 515 xfs_dqunlock(dqp); 516 spin_lock(lru_lock); 517 return LRU_RETRY; 518 } 519 520 static unsigned long 521 xfs_qm_shrink_scan( 522 struct shrinker *shrink, 523 struct shrink_control *sc) 524 { 525 struct xfs_quotainfo *qi = container_of(shrink, 526 struct xfs_quotainfo, qi_shrinker); 527 struct xfs_qm_isolate isol; 528 unsigned long freed; 529 int error; 530 531 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM)) 532 return 0; 533 534 INIT_LIST_HEAD(&isol.buffers); 535 INIT_LIST_HEAD(&isol.dispose); 536 537 freed = list_lru_shrink_walk(&qi->qi_lru, sc, 538 xfs_qm_dquot_isolate, &isol); 539 540 error = xfs_buf_delwri_submit(&isol.buffers); 541 if (error) 542 xfs_warn(NULL, "%s: dquot reclaim failed", __func__); 543 544 while (!list_empty(&isol.dispose)) { 545 struct xfs_dquot *dqp; 546 547 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru); 548 list_del_init(&dqp->q_lru); 549 xfs_qm_dqfree_one(dqp); 550 } 551 552 return freed; 553 } 554 555 static unsigned long 556 xfs_qm_shrink_count( 557 struct shrinker *shrink, 558 struct shrink_control *sc) 559 { 560 struct xfs_quotainfo *qi = container_of(shrink, 561 struct xfs_quotainfo, qi_shrinker); 562 563 return list_lru_shrink_count(&qi->qi_lru, sc); 564 } 565 566 STATIC void 567 xfs_qm_set_defquota( 568 xfs_mount_t *mp, 569 uint type, 570 xfs_quotainfo_t *qinf) 571 { 572 xfs_dquot_t *dqp; 573 struct xfs_def_quota *defq; 574 int error; 575 576 error = xfs_qm_dqread(mp, 0, type, XFS_QMOPT_DOWARN, &dqp); 577 578 if (!error) { 579 xfs_disk_dquot_t *ddqp = &dqp->q_core; 580 581 defq = xfs_get_defquota(dqp, qinf); 582 583 /* 584 * Timers and warnings have been already set, let's just set the 585 * default limits for this quota type 586 */ 587 defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); 588 defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); 589 defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); 590 defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); 591 defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); 592 defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); 593 xfs_qm_dqdestroy(dqp); 594 } 595 } 596 597 /* 598 * This initializes all the quota information that's kept in the 599 * mount structure 600 */ 601 STATIC int 602 xfs_qm_init_quotainfo( 603 xfs_mount_t *mp) 604 { 605 xfs_quotainfo_t *qinf; 606 int error; 607 xfs_dquot_t *dqp; 608 609 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 610 611 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); 612 613 error = list_lru_init(&qinf->qi_lru); 614 if (error) 615 goto out_free_qinf; 616 617 /* 618 * See if quotainodes are setup, and if not, allocate them, 619 * and change the superblock accordingly. 620 */ 621 error = xfs_qm_init_quotainos(mp); 622 if (error) 623 goto out_free_lru; 624 625 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); 626 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); 627 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS); 628 mutex_init(&qinf->qi_tree_lock); 629 630 /* mutex used to serialize quotaoffs */ 631 mutex_init(&qinf->qi_quotaofflock); 632 633 /* Precalc some constants */ 634 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); 635 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen); 636 637 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); 638 639 /* 640 * We try to get the limits from the superuser's limits fields. 641 * This is quite hacky, but it is standard quota practice. 642 * 643 * Since we may not have done a quotacheck by this point, just read 644 * the dquot without attaching it to any hashtables or lists. 645 * 646 * Timers and warnings are globally set by the first timer found in 647 * user/group/proj quota types, otherwise a default value is used. 648 * This should be split into different fields per quota type. 649 */ 650 error = xfs_qm_dqread(mp, 0, 651 XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : 652 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP : 653 XFS_DQ_PROJ), 654 XFS_QMOPT_DOWARN, &dqp); 655 656 if (!error) { 657 xfs_disk_dquot_t *ddqp = &dqp->q_core; 658 659 /* 660 * The warnings and timers set the grace period given to 661 * a user or group before he or she can not perform any 662 * more writing. If it is zero, a default is used. 663 */ 664 qinf->qi_btimelimit = ddqp->d_btimer ? 665 be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT; 666 qinf->qi_itimelimit = ddqp->d_itimer ? 667 be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT; 668 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ? 669 be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT; 670 qinf->qi_bwarnlimit = ddqp->d_bwarns ? 671 be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT; 672 qinf->qi_iwarnlimit = ddqp->d_iwarns ? 673 be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT; 674 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ? 675 be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT; 676 xfs_qm_dqdestroy(dqp); 677 } else { 678 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; 679 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; 680 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; 681 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; 682 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; 683 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; 684 } 685 686 if (XFS_IS_UQUOTA_RUNNING(mp)) 687 xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf); 688 if (XFS_IS_GQUOTA_RUNNING(mp)) 689 xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf); 690 if (XFS_IS_PQUOTA_RUNNING(mp)) 691 xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf); 692 693 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count; 694 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; 695 qinf->qi_shrinker.seeks = DEFAULT_SEEKS; 696 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; 697 register_shrinker(&qinf->qi_shrinker); 698 return 0; 699 700 out_free_lru: 701 list_lru_destroy(&qinf->qi_lru); 702 out_free_qinf: 703 kmem_free(qinf); 704 mp->m_quotainfo = NULL; 705 return error; 706 } 707 708 709 /* 710 * Gets called when unmounting a filesystem or when all quotas get 711 * turned off. 712 * This purges the quota inodes, destroys locks and frees itself. 713 */ 714 void 715 xfs_qm_destroy_quotainfo( 716 xfs_mount_t *mp) 717 { 718 xfs_quotainfo_t *qi; 719 720 qi = mp->m_quotainfo; 721 ASSERT(qi != NULL); 722 723 unregister_shrinker(&qi->qi_shrinker); 724 list_lru_destroy(&qi->qi_lru); 725 726 if (qi->qi_uquotaip) { 727 IRELE(qi->qi_uquotaip); 728 qi->qi_uquotaip = NULL; /* paranoia */ 729 } 730 if (qi->qi_gquotaip) { 731 IRELE(qi->qi_gquotaip); 732 qi->qi_gquotaip = NULL; 733 } 734 if (qi->qi_pquotaip) { 735 IRELE(qi->qi_pquotaip); 736 qi->qi_pquotaip = NULL; 737 } 738 mutex_destroy(&qi->qi_quotaofflock); 739 kmem_free(qi); 740 mp->m_quotainfo = NULL; 741 } 742 743 /* 744 * Create an inode and return with a reference already taken, but unlocked 745 * This is how we create quota inodes 746 */ 747 STATIC int 748 xfs_qm_qino_alloc( 749 xfs_mount_t *mp, 750 xfs_inode_t **ip, 751 uint flags) 752 { 753 xfs_trans_t *tp; 754 int error; 755 int committed; 756 bool need_alloc = true; 757 758 *ip = NULL; 759 /* 760 * With superblock that doesn't have separate pquotino, we 761 * share an inode between gquota and pquota. If the on-disk 762 * superblock has GQUOTA and the filesystem is now mounted 763 * with PQUOTA, just use sb_gquotino for sb_pquotino and 764 * vice-versa. 765 */ 766 if (!xfs_sb_version_has_pquotino(&mp->m_sb) && 767 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) { 768 xfs_ino_t ino = NULLFSINO; 769 770 if ((flags & XFS_QMOPT_PQUOTA) && 771 (mp->m_sb.sb_gquotino != NULLFSINO)) { 772 ino = mp->m_sb.sb_gquotino; 773 ASSERT(mp->m_sb.sb_pquotino == NULLFSINO); 774 } else if ((flags & XFS_QMOPT_GQUOTA) && 775 (mp->m_sb.sb_pquotino != NULLFSINO)) { 776 ino = mp->m_sb.sb_pquotino; 777 ASSERT(mp->m_sb.sb_gquotino == NULLFSINO); 778 } 779 if (ino != NULLFSINO) { 780 error = xfs_iget(mp, NULL, ino, 0, 0, ip); 781 if (error) 782 return error; 783 mp->m_sb.sb_gquotino = NULLFSINO; 784 mp->m_sb.sb_pquotino = NULLFSINO; 785 need_alloc = false; 786 } 787 } 788 789 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create, 790 XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp); 791 if (error) 792 return error; 793 794 if (need_alloc) { 795 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, 796 &committed); 797 if (error) { 798 xfs_trans_cancel(tp); 799 return error; 800 } 801 } 802 803 /* 804 * Make the changes in the superblock, and log those too. 805 * sbfields arg may contain fields other than *QUOTINO; 806 * VERSIONNUM for example. 807 */ 808 spin_lock(&mp->m_sb_lock); 809 if (flags & XFS_QMOPT_SBVERSION) { 810 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); 811 812 xfs_sb_version_addquota(&mp->m_sb); 813 mp->m_sb.sb_uquotino = NULLFSINO; 814 mp->m_sb.sb_gquotino = NULLFSINO; 815 mp->m_sb.sb_pquotino = NULLFSINO; 816 817 /* qflags will get updated fully _after_ quotacheck */ 818 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT; 819 } 820 if (flags & XFS_QMOPT_UQUOTA) 821 mp->m_sb.sb_uquotino = (*ip)->i_ino; 822 else if (flags & XFS_QMOPT_GQUOTA) 823 mp->m_sb.sb_gquotino = (*ip)->i_ino; 824 else 825 mp->m_sb.sb_pquotino = (*ip)->i_ino; 826 spin_unlock(&mp->m_sb_lock); 827 xfs_log_sb(tp); 828 829 error = xfs_trans_commit(tp); 830 if (error) { 831 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 832 xfs_alert(mp, "%s failed (error %d)!", __func__, error); 833 } 834 if (need_alloc) 835 xfs_finish_inode_setup(*ip); 836 return error; 837 } 838 839 840 STATIC void 841 xfs_qm_reset_dqcounts( 842 xfs_mount_t *mp, 843 xfs_buf_t *bp, 844 xfs_dqid_t id, 845 uint type) 846 { 847 struct xfs_dqblk *dqb; 848 int j; 849 850 trace_xfs_reset_dqcounts(bp, _RET_IP_); 851 852 /* 853 * Reset all counters and timers. They'll be 854 * started afresh by xfs_qm_quotacheck. 855 */ 856 #ifdef DEBUG 857 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) / 858 sizeof(xfs_dqblk_t); 859 ASSERT(mp->m_quotainfo->qi_dqperchunk == j); 860 #endif 861 dqb = bp->b_addr; 862 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { 863 struct xfs_disk_dquot *ddq; 864 865 ddq = (struct xfs_disk_dquot *)&dqb[j]; 866 867 /* 868 * Do a sanity check, and if needed, repair the dqblk. Don't 869 * output any warnings because it's perfectly possible to 870 * find uninitialised dquot blks. See comment in xfs_dqcheck. 871 */ 872 xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, 873 "xfs_quotacheck"); 874 /* 875 * Reset type in case we are reusing group quota file for 876 * project quotas or vice versa 877 */ 878 ddq->d_flags = type; 879 ddq->d_bcount = 0; 880 ddq->d_icount = 0; 881 ddq->d_rtbcount = 0; 882 ddq->d_btimer = 0; 883 ddq->d_itimer = 0; 884 ddq->d_rtbtimer = 0; 885 ddq->d_bwarns = 0; 886 ddq->d_iwarns = 0; 887 ddq->d_rtbwarns = 0; 888 889 if (xfs_sb_version_hascrc(&mp->m_sb)) { 890 xfs_update_cksum((char *)&dqb[j], 891 sizeof(struct xfs_dqblk), 892 XFS_DQUOT_CRC_OFF); 893 } 894 } 895 } 896 897 STATIC int 898 xfs_qm_dqiter_bufs( 899 struct xfs_mount *mp, 900 xfs_dqid_t firstid, 901 xfs_fsblock_t bno, 902 xfs_filblks_t blkcnt, 903 uint flags, 904 struct list_head *buffer_list) 905 { 906 struct xfs_buf *bp; 907 int error; 908 int type; 909 910 ASSERT(blkcnt > 0); 911 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : 912 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); 913 error = 0; 914 915 /* 916 * Blkcnt arg can be a very big number, and might even be 917 * larger than the log itself. So, we have to break it up into 918 * manageable-sized transactions. 919 * Note that we don't start a permanent transaction here; we might 920 * not be able to get a log reservation for the whole thing up front, 921 * and we don't really care to either, because we just discard 922 * everything if we were to crash in the middle of this loop. 923 */ 924 while (blkcnt--) { 925 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 926 XFS_FSB_TO_DADDR(mp, bno), 927 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 928 &xfs_dquot_buf_ops); 929 930 /* 931 * CRC and validation errors will return a EFSCORRUPTED here. If 932 * this occurs, re-read without CRC validation so that we can 933 * repair the damage via xfs_qm_reset_dqcounts(). This process 934 * will leave a trace in the log indicating corruption has 935 * been detected. 936 */ 937 if (error == -EFSCORRUPTED) { 938 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 939 XFS_FSB_TO_DADDR(mp, bno), 940 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 941 NULL); 942 } 943 944 if (error) 945 break; 946 947 /* 948 * A corrupt buffer might not have a verifier attached, so 949 * make sure we have the correct one attached before writeback 950 * occurs. 951 */ 952 bp->b_ops = &xfs_dquot_buf_ops; 953 xfs_qm_reset_dqcounts(mp, bp, firstid, type); 954 xfs_buf_delwri_queue(bp, buffer_list); 955 xfs_buf_relse(bp); 956 957 /* goto the next block. */ 958 bno++; 959 firstid += mp->m_quotainfo->qi_dqperchunk; 960 } 961 962 return error; 963 } 964 965 /* 966 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a 967 * caller supplied function for every chunk of dquots that we find. 968 */ 969 STATIC int 970 xfs_qm_dqiterate( 971 struct xfs_mount *mp, 972 struct xfs_inode *qip, 973 uint flags, 974 struct list_head *buffer_list) 975 { 976 struct xfs_bmbt_irec *map; 977 int i, nmaps; /* number of map entries */ 978 int error; /* return value */ 979 xfs_fileoff_t lblkno; 980 xfs_filblks_t maxlblkcnt; 981 xfs_dqid_t firstid; 982 xfs_fsblock_t rablkno; 983 xfs_filblks_t rablkcnt; 984 985 error = 0; 986 /* 987 * This looks racy, but we can't keep an inode lock across a 988 * trans_reserve. But, this gets called during quotacheck, and that 989 * happens only at mount time which is single threaded. 990 */ 991 if (qip->i_d.di_nblocks == 0) 992 return 0; 993 994 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); 995 996 lblkno = 0; 997 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 998 do { 999 uint lock_mode; 1000 1001 nmaps = XFS_DQITER_MAP_SIZE; 1002 /* 1003 * We aren't changing the inode itself. Just changing 1004 * some of its data. No new blocks are added here, and 1005 * the inode is never added to the transaction. 1006 */ 1007 lock_mode = xfs_ilock_data_map_shared(qip); 1008 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, 1009 map, &nmaps, 0); 1010 xfs_iunlock(qip, lock_mode); 1011 if (error) 1012 break; 1013 1014 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); 1015 for (i = 0; i < nmaps; i++) { 1016 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); 1017 ASSERT(map[i].br_blockcount); 1018 1019 1020 lblkno += map[i].br_blockcount; 1021 1022 if (map[i].br_startblock == HOLESTARTBLOCK) 1023 continue; 1024 1025 firstid = (xfs_dqid_t) map[i].br_startoff * 1026 mp->m_quotainfo->qi_dqperchunk; 1027 /* 1028 * Do a read-ahead on the next extent. 1029 */ 1030 if ((i+1 < nmaps) && 1031 (map[i+1].br_startblock != HOLESTARTBLOCK)) { 1032 rablkcnt = map[i+1].br_blockcount; 1033 rablkno = map[i+1].br_startblock; 1034 while (rablkcnt--) { 1035 xfs_buf_readahead(mp->m_ddev_targp, 1036 XFS_FSB_TO_DADDR(mp, rablkno), 1037 mp->m_quotainfo->qi_dqchunklen, 1038 &xfs_dquot_buf_ops); 1039 rablkno++; 1040 } 1041 } 1042 /* 1043 * Iterate thru all the blks in the extent and 1044 * reset the counters of all the dquots inside them. 1045 */ 1046 error = xfs_qm_dqiter_bufs(mp, firstid, 1047 map[i].br_startblock, 1048 map[i].br_blockcount, 1049 flags, buffer_list); 1050 if (error) 1051 goto out; 1052 } 1053 } while (nmaps > 0); 1054 1055 out: 1056 kmem_free(map); 1057 return error; 1058 } 1059 1060 /* 1061 * Called by dqusage_adjust in doing a quotacheck. 1062 * 1063 * Given the inode, and a dquot id this updates both the incore dqout as well 1064 * as the buffer copy. This is so that once the quotacheck is done, we can 1065 * just log all the buffers, as opposed to logging numerous updates to 1066 * individual dquots. 1067 */ 1068 STATIC int 1069 xfs_qm_quotacheck_dqadjust( 1070 struct xfs_inode *ip, 1071 xfs_dqid_t id, 1072 uint type, 1073 xfs_qcnt_t nblks, 1074 xfs_qcnt_t rtblks) 1075 { 1076 struct xfs_mount *mp = ip->i_mount; 1077 struct xfs_dquot *dqp; 1078 int error; 1079 1080 error = xfs_qm_dqget(mp, ip, id, type, 1081 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp); 1082 if (error) { 1083 /* 1084 * Shouldn't be able to turn off quotas here. 1085 */ 1086 ASSERT(error != -ESRCH); 1087 ASSERT(error != -ENOENT); 1088 return error; 1089 } 1090 1091 trace_xfs_dqadjust(dqp); 1092 1093 /* 1094 * Adjust the inode count and the block count to reflect this inode's 1095 * resource usage. 1096 */ 1097 be64_add_cpu(&dqp->q_core.d_icount, 1); 1098 dqp->q_res_icount++; 1099 if (nblks) { 1100 be64_add_cpu(&dqp->q_core.d_bcount, nblks); 1101 dqp->q_res_bcount += nblks; 1102 } 1103 if (rtblks) { 1104 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); 1105 dqp->q_res_rtbcount += rtblks; 1106 } 1107 1108 /* 1109 * Set default limits, adjust timers (since we changed usages) 1110 * 1111 * There are no timers for the default values set in the root dquot. 1112 */ 1113 if (dqp->q_core.d_id) { 1114 xfs_qm_adjust_dqlimits(mp, dqp); 1115 xfs_qm_adjust_dqtimers(mp, &dqp->q_core); 1116 } 1117 1118 dqp->dq_flags |= XFS_DQ_DIRTY; 1119 xfs_qm_dqput(dqp); 1120 return 0; 1121 } 1122 1123 STATIC int 1124 xfs_qm_get_rtblks( 1125 xfs_inode_t *ip, 1126 xfs_qcnt_t *O_rtblks) 1127 { 1128 xfs_filblks_t rtblks; /* total rt blks */ 1129 xfs_extnum_t idx; /* extent record index */ 1130 xfs_ifork_t *ifp; /* inode fork pointer */ 1131 xfs_extnum_t nextents; /* number of extent entries */ 1132 int error; 1133 1134 ASSERT(XFS_IS_REALTIME_INODE(ip)); 1135 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1136 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1137 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) 1138 return error; 1139 } 1140 rtblks = 0; 1141 nextents = xfs_iext_count(ifp); 1142 for (idx = 0; idx < nextents; idx++) 1143 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx)); 1144 *O_rtblks = (xfs_qcnt_t)rtblks; 1145 return 0; 1146 } 1147 1148 /* 1149 * callback routine supplied to bulkstat(). Given an inumber, find its 1150 * dquots and update them to account for resources taken by that inode. 1151 */ 1152 /* ARGSUSED */ 1153 STATIC int 1154 xfs_qm_dqusage_adjust( 1155 xfs_mount_t *mp, /* mount point for filesystem */ 1156 xfs_ino_t ino, /* inode number to get data for */ 1157 void __user *buffer, /* not used */ 1158 int ubsize, /* not used */ 1159 int *ubused, /* not used */ 1160 int *res) /* result code value */ 1161 { 1162 xfs_inode_t *ip; 1163 xfs_qcnt_t nblks, rtblks = 0; 1164 int error; 1165 1166 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1167 1168 /* 1169 * rootino must have its resources accounted for, not so with the quota 1170 * inodes. 1171 */ 1172 if (xfs_is_quota_inode(&mp->m_sb, ino)) { 1173 *res = BULKSTAT_RV_NOTHING; 1174 return -EINVAL; 1175 } 1176 1177 /* 1178 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget 1179 * interface expects the inode to be exclusively locked because that's 1180 * the case in all other instances. It's OK that we do this because 1181 * quotacheck is done only at mount time. 1182 */ 1183 error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL, 1184 &ip); 1185 if (error) { 1186 *res = BULKSTAT_RV_NOTHING; 1187 return error; 1188 } 1189 1190 ASSERT(ip->i_delayed_blks == 0); 1191 1192 if (XFS_IS_REALTIME_INODE(ip)) { 1193 /* 1194 * Walk thru the extent list and count the realtime blocks. 1195 */ 1196 error = xfs_qm_get_rtblks(ip, &rtblks); 1197 if (error) 1198 goto error0; 1199 } 1200 1201 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; 1202 1203 /* 1204 * Add the (disk blocks and inode) resources occupied by this 1205 * inode to its dquots. We do this adjustment in the incore dquot, 1206 * and also copy the changes to its buffer. 1207 * We don't care about putting these changes in a transaction 1208 * envelope because if we crash in the middle of a 'quotacheck' 1209 * we have to start from the beginning anyway. 1210 * Once we're done, we'll log all the dquot bufs. 1211 * 1212 * The *QUOTA_ON checks below may look pretty racy, but quotachecks 1213 * and quotaoffs don't race. (Quotachecks happen at mount time only). 1214 */ 1215 if (XFS_IS_UQUOTA_ON(mp)) { 1216 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, 1217 XFS_DQ_USER, nblks, rtblks); 1218 if (error) 1219 goto error0; 1220 } 1221 1222 if (XFS_IS_GQUOTA_ON(mp)) { 1223 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, 1224 XFS_DQ_GROUP, nblks, rtblks); 1225 if (error) 1226 goto error0; 1227 } 1228 1229 if (XFS_IS_PQUOTA_ON(mp)) { 1230 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), 1231 XFS_DQ_PROJ, nblks, rtblks); 1232 if (error) 1233 goto error0; 1234 } 1235 1236 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1237 IRELE(ip); 1238 *res = BULKSTAT_RV_DIDONE; 1239 return 0; 1240 1241 error0: 1242 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1243 IRELE(ip); 1244 *res = BULKSTAT_RV_GIVEUP; 1245 return error; 1246 } 1247 1248 STATIC int 1249 xfs_qm_flush_one( 1250 struct xfs_dquot *dqp, 1251 void *data) 1252 { 1253 struct xfs_mount *mp = dqp->q_mount; 1254 struct list_head *buffer_list = data; 1255 struct xfs_buf *bp = NULL; 1256 int error = 0; 1257 1258 xfs_dqlock(dqp); 1259 if (dqp->dq_flags & XFS_DQ_FREEING) 1260 goto out_unlock; 1261 if (!XFS_DQ_IS_DIRTY(dqp)) 1262 goto out_unlock; 1263 1264 /* 1265 * The only way the dquot is already flush locked by the time quotacheck 1266 * gets here is if reclaim flushed it before the dqadjust walk dirtied 1267 * it for the final time. Quotacheck collects all dquot bufs in the 1268 * local delwri queue before dquots are dirtied, so reclaim can't have 1269 * possibly queued it for I/O. The only way out is to push the buffer to 1270 * cycle the flush lock. 1271 */ 1272 if (!xfs_dqflock_nowait(dqp)) { 1273 /* buf is pinned in-core by delwri list */ 1274 DEFINE_SINGLE_BUF_MAP(map, dqp->q_blkno, 1275 mp->m_quotainfo->qi_dqchunklen); 1276 bp = _xfs_buf_find(mp->m_ddev_targp, &map, 1, 0, NULL); 1277 if (!bp) { 1278 error = -EINVAL; 1279 goto out_unlock; 1280 } 1281 xfs_buf_unlock(bp); 1282 1283 xfs_buf_delwri_pushbuf(bp, buffer_list); 1284 xfs_buf_rele(bp); 1285 1286 error = -EAGAIN; 1287 goto out_unlock; 1288 } 1289 1290 error = xfs_qm_dqflush(dqp, &bp); 1291 if (error) 1292 goto out_unlock; 1293 1294 xfs_buf_delwri_queue(bp, buffer_list); 1295 xfs_buf_relse(bp); 1296 out_unlock: 1297 xfs_dqunlock(dqp); 1298 return error; 1299 } 1300 1301 /* 1302 * Walk thru all the filesystem inodes and construct a consistent view 1303 * of the disk quota world. If the quotacheck fails, disable quotas. 1304 */ 1305 STATIC int 1306 xfs_qm_quotacheck( 1307 xfs_mount_t *mp) 1308 { 1309 int done, count, error, error2; 1310 xfs_ino_t lastino; 1311 size_t structsz; 1312 uint flags; 1313 LIST_HEAD (buffer_list); 1314 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip; 1315 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip; 1316 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip; 1317 1318 count = INT_MAX; 1319 structsz = 1; 1320 lastino = 0; 1321 flags = 0; 1322 1323 ASSERT(uip || gip || pip); 1324 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1325 1326 xfs_notice(mp, "Quotacheck needed: Please wait."); 1327 1328 /* 1329 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset 1330 * their counters to zero. We need a clean slate. 1331 * We don't log our changes till later. 1332 */ 1333 if (uip) { 1334 error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA, 1335 &buffer_list); 1336 if (error) 1337 goto error_return; 1338 flags |= XFS_UQUOTA_CHKD; 1339 } 1340 1341 if (gip) { 1342 error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA, 1343 &buffer_list); 1344 if (error) 1345 goto error_return; 1346 flags |= XFS_GQUOTA_CHKD; 1347 } 1348 1349 if (pip) { 1350 error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA, 1351 &buffer_list); 1352 if (error) 1353 goto error_return; 1354 flags |= XFS_PQUOTA_CHKD; 1355 } 1356 1357 do { 1358 /* 1359 * Iterate thru all the inodes in the file system, 1360 * adjusting the corresponding dquot counters in core. 1361 */ 1362 error = xfs_bulkstat(mp, &lastino, &count, 1363 xfs_qm_dqusage_adjust, 1364 structsz, NULL, &done); 1365 if (error) 1366 break; 1367 1368 } while (!done); 1369 1370 /* 1371 * We've made all the changes that we need to make incore. Flush them 1372 * down to disk buffers if everything was updated successfully. 1373 */ 1374 if (XFS_IS_UQUOTA_ON(mp)) { 1375 error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one, 1376 &buffer_list); 1377 } 1378 if (XFS_IS_GQUOTA_ON(mp)) { 1379 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one, 1380 &buffer_list); 1381 if (!error) 1382 error = error2; 1383 } 1384 if (XFS_IS_PQUOTA_ON(mp)) { 1385 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one, 1386 &buffer_list); 1387 if (!error) 1388 error = error2; 1389 } 1390 1391 error2 = xfs_buf_delwri_submit(&buffer_list); 1392 if (!error) 1393 error = error2; 1394 1395 /* 1396 * We can get this error if we couldn't do a dquot allocation inside 1397 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the 1398 * dirty dquots that might be cached, we just want to get rid of them 1399 * and turn quotaoff. The dquots won't be attached to any of the inodes 1400 * at this point (because we intentionally didn't in dqget_noattach). 1401 */ 1402 if (error) { 1403 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); 1404 goto error_return; 1405 } 1406 1407 /* 1408 * If one type of quotas is off, then it will lose its 1409 * quotachecked status, since we won't be doing accounting for 1410 * that type anymore. 1411 */ 1412 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD; 1413 mp->m_qflags |= flags; 1414 1415 error_return: 1416 xfs_buf_delwri_cancel(&buffer_list); 1417 1418 if (error) { 1419 xfs_warn(mp, 1420 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", 1421 error); 1422 /* 1423 * We must turn off quotas. 1424 */ 1425 ASSERT(mp->m_quotainfo != NULL); 1426 xfs_qm_destroy_quotainfo(mp); 1427 if (xfs_mount_reset_sbqflags(mp)) { 1428 xfs_warn(mp, 1429 "Quotacheck: Failed to reset quota flags."); 1430 } 1431 } else 1432 xfs_notice(mp, "Quotacheck: Done."); 1433 return error; 1434 } 1435 1436 /* 1437 * This is called from xfs_mountfs to start quotas and initialize all 1438 * necessary data structures like quotainfo. This is also responsible for 1439 * running a quotacheck as necessary. We are guaranteed that the superblock 1440 * is consistently read in at this point. 1441 * 1442 * If we fail here, the mount will continue with quota turned off. We don't 1443 * need to inidicate success or failure at all. 1444 */ 1445 void 1446 xfs_qm_mount_quotas( 1447 struct xfs_mount *mp) 1448 { 1449 int error = 0; 1450 uint sbf; 1451 1452 /* 1453 * If quotas on realtime volumes is not supported, we disable 1454 * quotas immediately. 1455 */ 1456 if (mp->m_sb.sb_rextents) { 1457 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); 1458 mp->m_qflags = 0; 1459 goto write_changes; 1460 } 1461 1462 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1463 1464 /* 1465 * Allocate the quotainfo structure inside the mount struct, and 1466 * create quotainode(s), and change/rev superblock if necessary. 1467 */ 1468 error = xfs_qm_init_quotainfo(mp); 1469 if (error) { 1470 /* 1471 * We must turn off quotas. 1472 */ 1473 ASSERT(mp->m_quotainfo == NULL); 1474 mp->m_qflags = 0; 1475 goto write_changes; 1476 } 1477 /* 1478 * If any of the quotas are not consistent, do a quotacheck. 1479 */ 1480 if (XFS_QM_NEED_QUOTACHECK(mp)) { 1481 error = xfs_qm_quotacheck(mp); 1482 if (error) { 1483 /* Quotacheck failed and disabled quotas. */ 1484 return; 1485 } 1486 } 1487 /* 1488 * If one type of quotas is off, then it will lose its 1489 * quotachecked status, since we won't be doing accounting for 1490 * that type anymore. 1491 */ 1492 if (!XFS_IS_UQUOTA_ON(mp)) 1493 mp->m_qflags &= ~XFS_UQUOTA_CHKD; 1494 if (!XFS_IS_GQUOTA_ON(mp)) 1495 mp->m_qflags &= ~XFS_GQUOTA_CHKD; 1496 if (!XFS_IS_PQUOTA_ON(mp)) 1497 mp->m_qflags &= ~XFS_PQUOTA_CHKD; 1498 1499 write_changes: 1500 /* 1501 * We actually don't have to acquire the m_sb_lock at all. 1502 * This can only be called from mount, and that's single threaded. XXX 1503 */ 1504 spin_lock(&mp->m_sb_lock); 1505 sbf = mp->m_sb.sb_qflags; 1506 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; 1507 spin_unlock(&mp->m_sb_lock); 1508 1509 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 1510 if (xfs_sync_sb(mp, false)) { 1511 /* 1512 * We could only have been turning quotas off. 1513 * We aren't in very good shape actually because 1514 * the incore structures are convinced that quotas are 1515 * off, but the on disk superblock doesn't know that ! 1516 */ 1517 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); 1518 xfs_alert(mp, "%s: Superblock update failed!", 1519 __func__); 1520 } 1521 } 1522 1523 if (error) { 1524 xfs_warn(mp, "Failed to initialize disk quotas."); 1525 return; 1526 } 1527 } 1528 1529 /* 1530 * This is called after the superblock has been read in and we're ready to 1531 * iget the quota inodes. 1532 */ 1533 STATIC int 1534 xfs_qm_init_quotainos( 1535 xfs_mount_t *mp) 1536 { 1537 struct xfs_inode *uip = NULL; 1538 struct xfs_inode *gip = NULL; 1539 struct xfs_inode *pip = NULL; 1540 int error; 1541 uint flags = 0; 1542 1543 ASSERT(mp->m_quotainfo); 1544 1545 /* 1546 * Get the uquota and gquota inodes 1547 */ 1548 if (xfs_sb_version_hasquota(&mp->m_sb)) { 1549 if (XFS_IS_UQUOTA_ON(mp) && 1550 mp->m_sb.sb_uquotino != NULLFSINO) { 1551 ASSERT(mp->m_sb.sb_uquotino > 0); 1552 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 1553 0, 0, &uip); 1554 if (error) 1555 return error; 1556 } 1557 if (XFS_IS_GQUOTA_ON(mp) && 1558 mp->m_sb.sb_gquotino != NULLFSINO) { 1559 ASSERT(mp->m_sb.sb_gquotino > 0); 1560 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 1561 0, 0, &gip); 1562 if (error) 1563 goto error_rele; 1564 } 1565 if (XFS_IS_PQUOTA_ON(mp) && 1566 mp->m_sb.sb_pquotino != NULLFSINO) { 1567 ASSERT(mp->m_sb.sb_pquotino > 0); 1568 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino, 1569 0, 0, &pip); 1570 if (error) 1571 goto error_rele; 1572 } 1573 } else { 1574 flags |= XFS_QMOPT_SBVERSION; 1575 } 1576 1577 /* 1578 * Create the three inodes, if they don't exist already. The changes 1579 * made above will get added to a transaction and logged in one of 1580 * the qino_alloc calls below. If the device is readonly, 1581 * temporarily switch to read-write to do this. 1582 */ 1583 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { 1584 error = xfs_qm_qino_alloc(mp, &uip, 1585 flags | XFS_QMOPT_UQUOTA); 1586 if (error) 1587 goto error_rele; 1588 1589 flags &= ~XFS_QMOPT_SBVERSION; 1590 } 1591 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) { 1592 error = xfs_qm_qino_alloc(mp, &gip, 1593 flags | XFS_QMOPT_GQUOTA); 1594 if (error) 1595 goto error_rele; 1596 1597 flags &= ~XFS_QMOPT_SBVERSION; 1598 } 1599 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) { 1600 error = xfs_qm_qino_alloc(mp, &pip, 1601 flags | XFS_QMOPT_PQUOTA); 1602 if (error) 1603 goto error_rele; 1604 } 1605 1606 mp->m_quotainfo->qi_uquotaip = uip; 1607 mp->m_quotainfo->qi_gquotaip = gip; 1608 mp->m_quotainfo->qi_pquotaip = pip; 1609 1610 return 0; 1611 1612 error_rele: 1613 if (uip) 1614 IRELE(uip); 1615 if (gip) 1616 IRELE(gip); 1617 if (pip) 1618 IRELE(pip); 1619 return error; 1620 } 1621 1622 STATIC void 1623 xfs_qm_dqfree_one( 1624 struct xfs_dquot *dqp) 1625 { 1626 struct xfs_mount *mp = dqp->q_mount; 1627 struct xfs_quotainfo *qi = mp->m_quotainfo; 1628 1629 mutex_lock(&qi->qi_tree_lock); 1630 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), 1631 be32_to_cpu(dqp->q_core.d_id)); 1632 1633 qi->qi_dquots--; 1634 mutex_unlock(&qi->qi_tree_lock); 1635 1636 xfs_qm_dqdestroy(dqp); 1637 } 1638 1639 /* --------------- utility functions for vnodeops ---------------- */ 1640 1641 1642 /* 1643 * Given an inode, a uid, gid and prid make sure that we have 1644 * allocated relevant dquot(s) on disk, and that we won't exceed inode 1645 * quotas by creating this file. 1646 * This also attaches dquot(s) to the given inode after locking it, 1647 * and returns the dquots corresponding to the uid and/or gid. 1648 * 1649 * in : inode (unlocked) 1650 * out : udquot, gdquot with references taken and unlocked 1651 */ 1652 int 1653 xfs_qm_vop_dqalloc( 1654 struct xfs_inode *ip, 1655 xfs_dqid_t uid, 1656 xfs_dqid_t gid, 1657 prid_t prid, 1658 uint flags, 1659 struct xfs_dquot **O_udqpp, 1660 struct xfs_dquot **O_gdqpp, 1661 struct xfs_dquot **O_pdqpp) 1662 { 1663 struct xfs_mount *mp = ip->i_mount; 1664 struct xfs_dquot *uq = NULL; 1665 struct xfs_dquot *gq = NULL; 1666 struct xfs_dquot *pq = NULL; 1667 int error; 1668 uint lockflags; 1669 1670 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1671 return 0; 1672 1673 lockflags = XFS_ILOCK_EXCL; 1674 xfs_ilock(ip, lockflags); 1675 1676 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) 1677 gid = ip->i_d.di_gid; 1678 1679 /* 1680 * Attach the dquot(s) to this inode, doing a dquot allocation 1681 * if necessary. The dquot(s) will not be locked. 1682 */ 1683 if (XFS_NOT_DQATTACHED(mp, ip)) { 1684 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC); 1685 if (error) { 1686 xfs_iunlock(ip, lockflags); 1687 return error; 1688 } 1689 } 1690 1691 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { 1692 if (ip->i_d.di_uid != uid) { 1693 /* 1694 * What we need is the dquot that has this uid, and 1695 * if we send the inode to dqget, the uid of the inode 1696 * takes priority over what's sent in the uid argument. 1697 * We must unlock inode here before calling dqget if 1698 * we're not sending the inode, because otherwise 1699 * we'll deadlock by doing trans_reserve while 1700 * holding ilock. 1701 */ 1702 xfs_iunlock(ip, lockflags); 1703 error = xfs_qm_dqget(mp, NULL, uid, 1704 XFS_DQ_USER, 1705 XFS_QMOPT_DQALLOC | 1706 XFS_QMOPT_DOWARN, 1707 &uq); 1708 if (error) { 1709 ASSERT(error != -ENOENT); 1710 return error; 1711 } 1712 /* 1713 * Get the ilock in the right order. 1714 */ 1715 xfs_dqunlock(uq); 1716 lockflags = XFS_ILOCK_SHARED; 1717 xfs_ilock(ip, lockflags); 1718 } else { 1719 /* 1720 * Take an extra reference, because we'll return 1721 * this to caller 1722 */ 1723 ASSERT(ip->i_udquot); 1724 uq = xfs_qm_dqhold(ip->i_udquot); 1725 } 1726 } 1727 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { 1728 if (ip->i_d.di_gid != gid) { 1729 xfs_iunlock(ip, lockflags); 1730 error = xfs_qm_dqget(mp, NULL, gid, 1731 XFS_DQ_GROUP, 1732 XFS_QMOPT_DQALLOC | 1733 XFS_QMOPT_DOWARN, 1734 &gq); 1735 if (error) { 1736 ASSERT(error != -ENOENT); 1737 goto error_rele; 1738 } 1739 xfs_dqunlock(gq); 1740 lockflags = XFS_ILOCK_SHARED; 1741 xfs_ilock(ip, lockflags); 1742 } else { 1743 ASSERT(ip->i_gdquot); 1744 gq = xfs_qm_dqhold(ip->i_gdquot); 1745 } 1746 } 1747 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { 1748 if (xfs_get_projid(ip) != prid) { 1749 xfs_iunlock(ip, lockflags); 1750 error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, 1751 XFS_DQ_PROJ, 1752 XFS_QMOPT_DQALLOC | 1753 XFS_QMOPT_DOWARN, 1754 &pq); 1755 if (error) { 1756 ASSERT(error != -ENOENT); 1757 goto error_rele; 1758 } 1759 xfs_dqunlock(pq); 1760 lockflags = XFS_ILOCK_SHARED; 1761 xfs_ilock(ip, lockflags); 1762 } else { 1763 ASSERT(ip->i_pdquot); 1764 pq = xfs_qm_dqhold(ip->i_pdquot); 1765 } 1766 } 1767 if (uq) 1768 trace_xfs_dquot_dqalloc(ip); 1769 1770 xfs_iunlock(ip, lockflags); 1771 if (O_udqpp) 1772 *O_udqpp = uq; 1773 else 1774 xfs_qm_dqrele(uq); 1775 if (O_gdqpp) 1776 *O_gdqpp = gq; 1777 else 1778 xfs_qm_dqrele(gq); 1779 if (O_pdqpp) 1780 *O_pdqpp = pq; 1781 else 1782 xfs_qm_dqrele(pq); 1783 return 0; 1784 1785 error_rele: 1786 xfs_qm_dqrele(gq); 1787 xfs_qm_dqrele(uq); 1788 return error; 1789 } 1790 1791 /* 1792 * Actually transfer ownership, and do dquot modifications. 1793 * These were already reserved. 1794 */ 1795 xfs_dquot_t * 1796 xfs_qm_vop_chown( 1797 xfs_trans_t *tp, 1798 xfs_inode_t *ip, 1799 xfs_dquot_t **IO_olddq, 1800 xfs_dquot_t *newdq) 1801 { 1802 xfs_dquot_t *prevdq; 1803 uint bfield = XFS_IS_REALTIME_INODE(ip) ? 1804 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; 1805 1806 1807 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1808 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); 1809 1810 /* old dquot */ 1811 prevdq = *IO_olddq; 1812 ASSERT(prevdq); 1813 ASSERT(prevdq != newdq); 1814 1815 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks)); 1816 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); 1817 1818 /* the sparkling new dquot */ 1819 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); 1820 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); 1821 1822 /* 1823 * Take an extra reference, because the inode is going to keep 1824 * this dquot pointer even after the trans_commit. 1825 */ 1826 *IO_olddq = xfs_qm_dqhold(newdq); 1827 1828 return prevdq; 1829 } 1830 1831 /* 1832 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID). 1833 */ 1834 int 1835 xfs_qm_vop_chown_reserve( 1836 struct xfs_trans *tp, 1837 struct xfs_inode *ip, 1838 struct xfs_dquot *udqp, 1839 struct xfs_dquot *gdqp, 1840 struct xfs_dquot *pdqp, 1841 uint flags) 1842 { 1843 struct xfs_mount *mp = ip->i_mount; 1844 uint delblks, blkflags, prjflags = 0; 1845 struct xfs_dquot *udq_unres = NULL; 1846 struct xfs_dquot *gdq_unres = NULL; 1847 struct xfs_dquot *pdq_unres = NULL; 1848 struct xfs_dquot *udq_delblks = NULL; 1849 struct xfs_dquot *gdq_delblks = NULL; 1850 struct xfs_dquot *pdq_delblks = NULL; 1851 int error; 1852 1853 1854 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 1855 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1856 1857 delblks = ip->i_delayed_blks; 1858 blkflags = XFS_IS_REALTIME_INODE(ip) ? 1859 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; 1860 1861 if (XFS_IS_UQUOTA_ON(mp) && udqp && 1862 ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) { 1863 udq_delblks = udqp; 1864 /* 1865 * If there are delayed allocation blocks, then we have to 1866 * unreserve those from the old dquot, and add them to the 1867 * new dquot. 1868 */ 1869 if (delblks) { 1870 ASSERT(ip->i_udquot); 1871 udq_unres = ip->i_udquot; 1872 } 1873 } 1874 if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp && 1875 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) { 1876 gdq_delblks = gdqp; 1877 if (delblks) { 1878 ASSERT(ip->i_gdquot); 1879 gdq_unres = ip->i_gdquot; 1880 } 1881 } 1882 1883 if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp && 1884 xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) { 1885 prjflags = XFS_QMOPT_ENOSPC; 1886 pdq_delblks = pdqp; 1887 if (delblks) { 1888 ASSERT(ip->i_pdquot); 1889 pdq_unres = ip->i_pdquot; 1890 } 1891 } 1892 1893 error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, 1894 udq_delblks, gdq_delblks, pdq_delblks, 1895 ip->i_d.di_nblocks, 1, 1896 flags | blkflags | prjflags); 1897 if (error) 1898 return error; 1899 1900 /* 1901 * Do the delayed blks reservations/unreservations now. Since, these 1902 * are done without the help of a transaction, if a reservation fails 1903 * its previous reservations won't be automatically undone by trans 1904 * code. So, we have to do it manually here. 1905 */ 1906 if (delblks) { 1907 /* 1908 * Do the reservations first. Unreservation can't fail. 1909 */ 1910 ASSERT(udq_delblks || gdq_delblks || pdq_delblks); 1911 ASSERT(udq_unres || gdq_unres || pdq_unres); 1912 error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 1913 udq_delblks, gdq_delblks, pdq_delblks, 1914 (xfs_qcnt_t)delblks, 0, 1915 flags | blkflags | prjflags); 1916 if (error) 1917 return error; 1918 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 1919 udq_unres, gdq_unres, pdq_unres, 1920 -((xfs_qcnt_t)delblks), 0, blkflags); 1921 } 1922 1923 return 0; 1924 } 1925 1926 int 1927 xfs_qm_vop_rename_dqattach( 1928 struct xfs_inode **i_tab) 1929 { 1930 struct xfs_mount *mp = i_tab[0]->i_mount; 1931 int i; 1932 1933 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1934 return 0; 1935 1936 for (i = 0; (i < 4 && i_tab[i]); i++) { 1937 struct xfs_inode *ip = i_tab[i]; 1938 int error; 1939 1940 /* 1941 * Watch out for duplicate entries in the table. 1942 */ 1943 if (i == 0 || ip != i_tab[i-1]) { 1944 if (XFS_NOT_DQATTACHED(mp, ip)) { 1945 error = xfs_qm_dqattach(ip, 0); 1946 if (error) 1947 return error; 1948 } 1949 } 1950 } 1951 return 0; 1952 } 1953 1954 void 1955 xfs_qm_vop_create_dqattach( 1956 struct xfs_trans *tp, 1957 struct xfs_inode *ip, 1958 struct xfs_dquot *udqp, 1959 struct xfs_dquot *gdqp, 1960 struct xfs_dquot *pdqp) 1961 { 1962 struct xfs_mount *mp = tp->t_mountp; 1963 1964 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1965 return; 1966 1967 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1968 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1969 1970 if (udqp && XFS_IS_UQUOTA_ON(mp)) { 1971 ASSERT(ip->i_udquot == NULL); 1972 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); 1973 1974 ip->i_udquot = xfs_qm_dqhold(udqp); 1975 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 1976 } 1977 if (gdqp && XFS_IS_GQUOTA_ON(mp)) { 1978 ASSERT(ip->i_gdquot == NULL); 1979 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); 1980 ip->i_gdquot = xfs_qm_dqhold(gdqp); 1981 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 1982 } 1983 if (pdqp && XFS_IS_PQUOTA_ON(mp)) { 1984 ASSERT(ip->i_pdquot == NULL); 1985 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); 1986 1987 ip->i_pdquot = xfs_qm_dqhold(pdqp); 1988 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1); 1989 } 1990 } 1991 1992