1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_sb.h" 26 #include "xfs_ag.h" 27 #include "xfs_mount.h" 28 #include "xfs_inode.h" 29 #include "xfs_ialloc.h" 30 #include "xfs_itable.h" 31 #include "xfs_quota.h" 32 #include "xfs_error.h" 33 #include "xfs_bmap.h" 34 #include "xfs_bmap_btree.h" 35 #include "xfs_trans.h" 36 #include "xfs_trans_space.h" 37 #include "xfs_qm.h" 38 #include "xfs_trace.h" 39 #include "xfs_icache.h" 40 #include "xfs_cksum.h" 41 #include "xfs_dinode.h" 42 43 /* 44 * The global quota manager. There is only one of these for the entire 45 * system, _not_ one per file system. XQM keeps track of the overall 46 * quota functionality, including maintaining the freelist and hash 47 * tables of dquots. 48 */ 49 STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 50 STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 51 52 53 STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); 54 /* 55 * We use the batch lookup interface to iterate over the dquots as it 56 * currently is the only interface into the radix tree code that allows 57 * fuzzy lookups instead of exact matches. Holding the lock over multiple 58 * operations is fine as all callers are used either during mount/umount 59 * or quotaoff. 60 */ 61 #define XFS_DQ_LOOKUP_BATCH 32 62 63 STATIC int 64 xfs_qm_dquot_walk( 65 struct xfs_mount *mp, 66 int type, 67 int (*execute)(struct xfs_dquot *dqp, void *data), 68 void *data) 69 { 70 struct xfs_quotainfo *qi = mp->m_quotainfo; 71 struct radix_tree_root *tree = xfs_dquot_tree(qi, type); 72 uint32_t next_index; 73 int last_error = 0; 74 int skipped; 75 int nr_found; 76 77 restart: 78 skipped = 0; 79 next_index = 0; 80 nr_found = 0; 81 82 while (1) { 83 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; 84 int error = 0; 85 int i; 86 87 mutex_lock(&qi->qi_tree_lock); 88 nr_found = radix_tree_gang_lookup(tree, (void **)batch, 89 next_index, XFS_DQ_LOOKUP_BATCH); 90 if (!nr_found) { 91 mutex_unlock(&qi->qi_tree_lock); 92 break; 93 } 94 95 for (i = 0; i < nr_found; i++) { 96 struct xfs_dquot *dqp = batch[i]; 97 98 next_index = be32_to_cpu(dqp->q_core.d_id) + 1; 99 100 error = execute(batch[i], data); 101 if (error == EAGAIN) { 102 skipped++; 103 continue; 104 } 105 if (error && last_error != EFSCORRUPTED) 106 last_error = error; 107 } 108 109 mutex_unlock(&qi->qi_tree_lock); 110 111 /* bail out if the filesystem is corrupted. */ 112 if (last_error == EFSCORRUPTED) { 113 skipped = 0; 114 break; 115 } 116 } 117 118 if (skipped) { 119 delay(1); 120 goto restart; 121 } 122 123 return last_error; 124 } 125 126 127 /* 128 * Purge a dquot from all tracking data structures and free it. 129 */ 130 STATIC int 131 xfs_qm_dqpurge( 132 struct xfs_dquot *dqp, 133 void *data) 134 { 135 struct xfs_mount *mp = dqp->q_mount; 136 struct xfs_quotainfo *qi = mp->m_quotainfo; 137 138 xfs_dqlock(dqp); 139 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { 140 xfs_dqunlock(dqp); 141 return EAGAIN; 142 } 143 144 dqp->dq_flags |= XFS_DQ_FREEING; 145 146 xfs_dqflock(dqp); 147 148 /* 149 * If we are turning this type of quotas off, we don't care 150 * about the dirty metadata sitting in this dquot. OTOH, if 151 * we're unmounting, we do care, so we flush it and wait. 152 */ 153 if (XFS_DQ_IS_DIRTY(dqp)) { 154 struct xfs_buf *bp = NULL; 155 int error; 156 157 /* 158 * We don't care about getting disk errors here. We need 159 * to purge this dquot anyway, so we go ahead regardless. 160 */ 161 error = xfs_qm_dqflush(dqp, &bp); 162 if (error) { 163 xfs_warn(mp, "%s: dquot %p flush failed", 164 __func__, dqp); 165 } else { 166 error = xfs_bwrite(bp); 167 xfs_buf_relse(bp); 168 } 169 xfs_dqflock(dqp); 170 } 171 172 ASSERT(atomic_read(&dqp->q_pincount) == 0); 173 ASSERT(XFS_FORCED_SHUTDOWN(mp) || 174 !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); 175 176 xfs_dqfunlock(dqp); 177 xfs_dqunlock(dqp); 178 179 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), 180 be32_to_cpu(dqp->q_core.d_id)); 181 qi->qi_dquots--; 182 183 /* 184 * We move dquots to the freelist as soon as their reference count 185 * hits zero, so it really should be on the freelist here. 186 */ 187 ASSERT(!list_empty(&dqp->q_lru)); 188 list_lru_del(&qi->qi_lru, &dqp->q_lru); 189 XFS_STATS_DEC(xs_qm_dquot_unused); 190 191 xfs_qm_dqdestroy(dqp); 192 return 0; 193 } 194 195 /* 196 * Purge the dquot cache. 197 */ 198 void 199 xfs_qm_dqpurge_all( 200 struct xfs_mount *mp, 201 uint flags) 202 { 203 if (flags & XFS_QMOPT_UQUOTA) 204 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); 205 if (flags & XFS_QMOPT_GQUOTA) 206 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); 207 if (flags & XFS_QMOPT_PQUOTA) 208 xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL); 209 } 210 211 /* 212 * Just destroy the quotainfo structure. 213 */ 214 void 215 xfs_qm_unmount( 216 struct xfs_mount *mp) 217 { 218 if (mp->m_quotainfo) { 219 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); 220 xfs_qm_destroy_quotainfo(mp); 221 } 222 } 223 224 225 /* 226 * This is called from xfs_mountfs to start quotas and initialize all 227 * necessary data structures like quotainfo. This is also responsible for 228 * running a quotacheck as necessary. We are guaranteed that the superblock 229 * is consistently read in at this point. 230 * 231 * If we fail here, the mount will continue with quota turned off. We don't 232 * need to inidicate success or failure at all. 233 */ 234 void 235 xfs_qm_mount_quotas( 236 xfs_mount_t *mp) 237 { 238 int error = 0; 239 uint sbf; 240 241 /* 242 * If quotas on realtime volumes is not supported, we disable 243 * quotas immediately. 244 */ 245 if (mp->m_sb.sb_rextents) { 246 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); 247 mp->m_qflags = 0; 248 goto write_changes; 249 } 250 251 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 252 253 /* 254 * Allocate the quotainfo structure inside the mount struct, and 255 * create quotainode(s), and change/rev superblock if necessary. 256 */ 257 error = xfs_qm_init_quotainfo(mp); 258 if (error) { 259 /* 260 * We must turn off quotas. 261 */ 262 ASSERT(mp->m_quotainfo == NULL); 263 mp->m_qflags = 0; 264 goto write_changes; 265 } 266 /* 267 * If any of the quotas are not consistent, do a quotacheck. 268 */ 269 if (XFS_QM_NEED_QUOTACHECK(mp)) { 270 error = xfs_qm_quotacheck(mp); 271 if (error) { 272 /* Quotacheck failed and disabled quotas. */ 273 return; 274 } 275 } 276 /* 277 * If one type of quotas is off, then it will lose its 278 * quotachecked status, since we won't be doing accounting for 279 * that type anymore. 280 */ 281 if (!XFS_IS_UQUOTA_ON(mp)) 282 mp->m_qflags &= ~XFS_UQUOTA_CHKD; 283 if (!XFS_IS_GQUOTA_ON(mp)) 284 mp->m_qflags &= ~XFS_GQUOTA_CHKD; 285 if (!XFS_IS_PQUOTA_ON(mp)) 286 mp->m_qflags &= ~XFS_PQUOTA_CHKD; 287 288 write_changes: 289 /* 290 * We actually don't have to acquire the m_sb_lock at all. 291 * This can only be called from mount, and that's single threaded. XXX 292 */ 293 spin_lock(&mp->m_sb_lock); 294 sbf = mp->m_sb.sb_qflags; 295 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; 296 spin_unlock(&mp->m_sb_lock); 297 298 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 299 if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { 300 /* 301 * We could only have been turning quotas off. 302 * We aren't in very good shape actually because 303 * the incore structures are convinced that quotas are 304 * off, but the on disk superblock doesn't know that ! 305 */ 306 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); 307 xfs_alert(mp, "%s: Superblock update failed!", 308 __func__); 309 } 310 } 311 312 if (error) { 313 xfs_warn(mp, "Failed to initialize disk quotas."); 314 return; 315 } 316 } 317 318 /* 319 * Called from the vfsops layer. 320 */ 321 void 322 xfs_qm_unmount_quotas( 323 xfs_mount_t *mp) 324 { 325 /* 326 * Release the dquots that root inode, et al might be holding, 327 * before we flush quotas and blow away the quotainfo structure. 328 */ 329 ASSERT(mp->m_rootip); 330 xfs_qm_dqdetach(mp->m_rootip); 331 if (mp->m_rbmip) 332 xfs_qm_dqdetach(mp->m_rbmip); 333 if (mp->m_rsumip) 334 xfs_qm_dqdetach(mp->m_rsumip); 335 336 /* 337 * Release the quota inodes. 338 */ 339 if (mp->m_quotainfo) { 340 if (mp->m_quotainfo->qi_uquotaip) { 341 IRELE(mp->m_quotainfo->qi_uquotaip); 342 mp->m_quotainfo->qi_uquotaip = NULL; 343 } 344 if (mp->m_quotainfo->qi_gquotaip) { 345 IRELE(mp->m_quotainfo->qi_gquotaip); 346 mp->m_quotainfo->qi_gquotaip = NULL; 347 } 348 if (mp->m_quotainfo->qi_pquotaip) { 349 IRELE(mp->m_quotainfo->qi_pquotaip); 350 mp->m_quotainfo->qi_pquotaip = NULL; 351 } 352 } 353 } 354 355 STATIC int 356 xfs_qm_dqattach_one( 357 xfs_inode_t *ip, 358 xfs_dqid_t id, 359 uint type, 360 uint doalloc, 361 xfs_dquot_t **IO_idqpp) 362 { 363 xfs_dquot_t *dqp; 364 int error; 365 366 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 367 error = 0; 368 369 /* 370 * See if we already have it in the inode itself. IO_idqpp is &i_udquot 371 * or &i_gdquot. This made the code look weird, but made the logic a lot 372 * simpler. 373 */ 374 dqp = *IO_idqpp; 375 if (dqp) { 376 trace_xfs_dqattach_found(dqp); 377 return 0; 378 } 379 380 /* 381 * Find the dquot from somewhere. This bumps the reference count of 382 * dquot and returns it locked. This can return ENOENT if dquot didn't 383 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got 384 * turned off suddenly. 385 */ 386 error = xfs_qm_dqget(ip->i_mount, ip, id, type, 387 doalloc | XFS_QMOPT_DOWARN, &dqp); 388 if (error) 389 return error; 390 391 trace_xfs_dqattach_get(dqp); 392 393 /* 394 * dqget may have dropped and re-acquired the ilock, but it guarantees 395 * that the dquot returned is the one that should go in the inode. 396 */ 397 *IO_idqpp = dqp; 398 xfs_dqunlock(dqp); 399 return 0; 400 } 401 402 static bool 403 xfs_qm_need_dqattach( 404 struct xfs_inode *ip) 405 { 406 struct xfs_mount *mp = ip->i_mount; 407 408 if (!XFS_IS_QUOTA_RUNNING(mp)) 409 return false; 410 if (!XFS_IS_QUOTA_ON(mp)) 411 return false; 412 if (!XFS_NOT_DQATTACHED(mp, ip)) 413 return false; 414 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) 415 return false; 416 return true; 417 } 418 419 /* 420 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON 421 * into account. 422 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. 423 * Inode may get unlocked and relocked in here, and the caller must deal with 424 * the consequences. 425 */ 426 int 427 xfs_qm_dqattach_locked( 428 xfs_inode_t *ip, 429 uint flags) 430 { 431 xfs_mount_t *mp = ip->i_mount; 432 int error = 0; 433 434 if (!xfs_qm_need_dqattach(ip)) 435 return 0; 436 437 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 438 439 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) { 440 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, 441 flags & XFS_QMOPT_DQALLOC, 442 &ip->i_udquot); 443 if (error) 444 goto done; 445 ASSERT(ip->i_udquot); 446 } 447 448 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) { 449 error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, 450 flags & XFS_QMOPT_DQALLOC, 451 &ip->i_gdquot); 452 if (error) 453 goto done; 454 ASSERT(ip->i_gdquot); 455 } 456 457 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) { 458 error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, 459 flags & XFS_QMOPT_DQALLOC, 460 &ip->i_pdquot); 461 if (error) 462 goto done; 463 ASSERT(ip->i_pdquot); 464 } 465 466 done: 467 /* 468 * Don't worry about the dquots that we may have attached before any 469 * error - they'll get detached later if it has not already been done. 470 */ 471 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 472 return error; 473 } 474 475 int 476 xfs_qm_dqattach( 477 struct xfs_inode *ip, 478 uint flags) 479 { 480 int error; 481 482 if (!xfs_qm_need_dqattach(ip)) 483 return 0; 484 485 xfs_ilock(ip, XFS_ILOCK_EXCL); 486 error = xfs_qm_dqattach_locked(ip, flags); 487 xfs_iunlock(ip, XFS_ILOCK_EXCL); 488 489 return error; 490 } 491 492 /* 493 * Release dquots (and their references) if any. 494 * The inode should be locked EXCL except when this's called by 495 * xfs_ireclaim. 496 */ 497 void 498 xfs_qm_dqdetach( 499 xfs_inode_t *ip) 500 { 501 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot)) 502 return; 503 504 trace_xfs_dquot_dqdetach(ip); 505 506 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino)); 507 if (ip->i_udquot) { 508 xfs_qm_dqrele(ip->i_udquot); 509 ip->i_udquot = NULL; 510 } 511 if (ip->i_gdquot) { 512 xfs_qm_dqrele(ip->i_gdquot); 513 ip->i_gdquot = NULL; 514 } 515 if (ip->i_pdquot) { 516 xfs_qm_dqrele(ip->i_pdquot); 517 ip->i_pdquot = NULL; 518 } 519 } 520 521 struct xfs_qm_isolate { 522 struct list_head buffers; 523 struct list_head dispose; 524 }; 525 526 static enum lru_status 527 xfs_qm_dquot_isolate( 528 struct list_head *item, 529 spinlock_t *lru_lock, 530 void *arg) 531 { 532 struct xfs_dquot *dqp = container_of(item, 533 struct xfs_dquot, q_lru); 534 struct xfs_qm_isolate *isol = arg; 535 536 if (!xfs_dqlock_nowait(dqp)) 537 goto out_miss_busy; 538 539 /* 540 * This dquot has acquired a reference in the meantime remove it from 541 * the freelist and try again. 542 */ 543 if (dqp->q_nrefs) { 544 xfs_dqunlock(dqp); 545 XFS_STATS_INC(xs_qm_dqwants); 546 547 trace_xfs_dqreclaim_want(dqp); 548 list_del_init(&dqp->q_lru); 549 XFS_STATS_DEC(xs_qm_dquot_unused); 550 return LRU_REMOVED; 551 } 552 553 /* 554 * If the dquot is dirty, flush it. If it's already being flushed, just 555 * skip it so there is time for the IO to complete before we try to 556 * reclaim it again on the next LRU pass. 557 */ 558 if (!xfs_dqflock_nowait(dqp)) { 559 xfs_dqunlock(dqp); 560 goto out_miss_busy; 561 } 562 563 if (XFS_DQ_IS_DIRTY(dqp)) { 564 struct xfs_buf *bp = NULL; 565 int error; 566 567 trace_xfs_dqreclaim_dirty(dqp); 568 569 /* we have to drop the LRU lock to flush the dquot */ 570 spin_unlock(lru_lock); 571 572 error = xfs_qm_dqflush(dqp, &bp); 573 if (error) { 574 xfs_warn(dqp->q_mount, "%s: dquot %p flush failed", 575 __func__, dqp); 576 goto out_unlock_dirty; 577 } 578 579 xfs_buf_delwri_queue(bp, &isol->buffers); 580 xfs_buf_relse(bp); 581 goto out_unlock_dirty; 582 } 583 xfs_dqfunlock(dqp); 584 585 /* 586 * Prevent lookups now that we are past the point of no return. 587 */ 588 dqp->dq_flags |= XFS_DQ_FREEING; 589 xfs_dqunlock(dqp); 590 591 ASSERT(dqp->q_nrefs == 0); 592 list_move_tail(&dqp->q_lru, &isol->dispose); 593 XFS_STATS_DEC(xs_qm_dquot_unused); 594 trace_xfs_dqreclaim_done(dqp); 595 XFS_STATS_INC(xs_qm_dqreclaims); 596 return LRU_REMOVED; 597 598 out_miss_busy: 599 trace_xfs_dqreclaim_busy(dqp); 600 XFS_STATS_INC(xs_qm_dqreclaim_misses); 601 return LRU_SKIP; 602 603 out_unlock_dirty: 604 trace_xfs_dqreclaim_busy(dqp); 605 XFS_STATS_INC(xs_qm_dqreclaim_misses); 606 xfs_dqunlock(dqp); 607 spin_lock(lru_lock); 608 return LRU_RETRY; 609 } 610 611 static unsigned long 612 xfs_qm_shrink_scan( 613 struct shrinker *shrink, 614 struct shrink_control *sc) 615 { 616 struct xfs_quotainfo *qi = container_of(shrink, 617 struct xfs_quotainfo, qi_shrinker); 618 struct xfs_qm_isolate isol; 619 unsigned long freed; 620 int error; 621 unsigned long nr_to_scan = sc->nr_to_scan; 622 623 if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT)) 624 return 0; 625 626 INIT_LIST_HEAD(&isol.buffers); 627 INIT_LIST_HEAD(&isol.dispose); 628 629 freed = list_lru_walk_node(&qi->qi_lru, sc->nid, xfs_qm_dquot_isolate, &isol, 630 &nr_to_scan); 631 632 error = xfs_buf_delwri_submit(&isol.buffers); 633 if (error) 634 xfs_warn(NULL, "%s: dquot reclaim failed", __func__); 635 636 while (!list_empty(&isol.dispose)) { 637 struct xfs_dquot *dqp; 638 639 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru); 640 list_del_init(&dqp->q_lru); 641 xfs_qm_dqfree_one(dqp); 642 } 643 644 return freed; 645 } 646 647 static unsigned long 648 xfs_qm_shrink_count( 649 struct shrinker *shrink, 650 struct shrink_control *sc) 651 { 652 struct xfs_quotainfo *qi = container_of(shrink, 653 struct xfs_quotainfo, qi_shrinker); 654 655 return list_lru_count_node(&qi->qi_lru, sc->nid); 656 } 657 658 /* 659 * This initializes all the quota information that's kept in the 660 * mount structure 661 */ 662 STATIC int 663 xfs_qm_init_quotainfo( 664 xfs_mount_t *mp) 665 { 666 xfs_quotainfo_t *qinf; 667 int error; 668 xfs_dquot_t *dqp; 669 670 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 671 672 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); 673 674 error = -list_lru_init(&qinf->qi_lru); 675 if (error) 676 goto out_free_qinf; 677 678 /* 679 * See if quotainodes are setup, and if not, allocate them, 680 * and change the superblock accordingly. 681 */ 682 error = xfs_qm_init_quotainos(mp); 683 if (error) 684 goto out_free_lru; 685 686 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); 687 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); 688 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS); 689 mutex_init(&qinf->qi_tree_lock); 690 691 /* mutex used to serialize quotaoffs */ 692 mutex_init(&qinf->qi_quotaofflock); 693 694 /* Precalc some constants */ 695 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); 696 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen); 697 698 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); 699 700 /* 701 * We try to get the limits from the superuser's limits fields. 702 * This is quite hacky, but it is standard quota practice. 703 * 704 * We look at the USR dquot with id == 0 first, but if user quotas 705 * are not enabled we goto the GRP dquot with id == 0. 706 * We don't really care to keep separate default limits for user 707 * and group quotas, at least not at this point. 708 * 709 * Since we may not have done a quotacheck by this point, just read 710 * the dquot without attaching it to any hashtables or lists. 711 */ 712 error = xfs_qm_dqread(mp, 0, 713 XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : 714 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP : 715 XFS_DQ_PROJ), 716 XFS_QMOPT_DOWARN, &dqp); 717 if (!error) { 718 xfs_disk_dquot_t *ddqp = &dqp->q_core; 719 720 /* 721 * The warnings and timers set the grace period given to 722 * a user or group before he or she can not perform any 723 * more writing. If it is zero, a default is used. 724 */ 725 qinf->qi_btimelimit = ddqp->d_btimer ? 726 be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT; 727 qinf->qi_itimelimit = ddqp->d_itimer ? 728 be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT; 729 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ? 730 be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT; 731 qinf->qi_bwarnlimit = ddqp->d_bwarns ? 732 be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT; 733 qinf->qi_iwarnlimit = ddqp->d_iwarns ? 734 be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT; 735 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ? 736 be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT; 737 qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); 738 qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); 739 qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); 740 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); 741 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); 742 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); 743 744 xfs_qm_dqdestroy(dqp); 745 } else { 746 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; 747 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; 748 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; 749 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; 750 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; 751 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; 752 } 753 754 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count; 755 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; 756 qinf->qi_shrinker.seeks = DEFAULT_SEEKS; 757 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; 758 register_shrinker(&qinf->qi_shrinker); 759 return 0; 760 761 out_free_lru: 762 list_lru_destroy(&qinf->qi_lru); 763 out_free_qinf: 764 kmem_free(qinf); 765 mp->m_quotainfo = NULL; 766 return error; 767 } 768 769 770 /* 771 * Gets called when unmounting a filesystem or when all quotas get 772 * turned off. 773 * This purges the quota inodes, destroys locks and frees itself. 774 */ 775 void 776 xfs_qm_destroy_quotainfo( 777 xfs_mount_t *mp) 778 { 779 xfs_quotainfo_t *qi; 780 781 qi = mp->m_quotainfo; 782 ASSERT(qi != NULL); 783 784 unregister_shrinker(&qi->qi_shrinker); 785 list_lru_destroy(&qi->qi_lru); 786 787 if (qi->qi_uquotaip) { 788 IRELE(qi->qi_uquotaip); 789 qi->qi_uquotaip = NULL; /* paranoia */ 790 } 791 if (qi->qi_gquotaip) { 792 IRELE(qi->qi_gquotaip); 793 qi->qi_gquotaip = NULL; 794 } 795 if (qi->qi_pquotaip) { 796 IRELE(qi->qi_pquotaip); 797 qi->qi_pquotaip = NULL; 798 } 799 mutex_destroy(&qi->qi_quotaofflock); 800 kmem_free(qi); 801 mp->m_quotainfo = NULL; 802 } 803 804 /* 805 * Create an inode and return with a reference already taken, but unlocked 806 * This is how we create quota inodes 807 */ 808 STATIC int 809 xfs_qm_qino_alloc( 810 xfs_mount_t *mp, 811 xfs_inode_t **ip, 812 __int64_t sbfields, 813 uint flags) 814 { 815 xfs_trans_t *tp; 816 int error; 817 int committed; 818 819 *ip = NULL; 820 /* 821 * With superblock that doesn't have separate pquotino, we 822 * share an inode between gquota and pquota. If the on-disk 823 * superblock has GQUOTA and the filesystem is now mounted 824 * with PQUOTA, just use sb_gquotino for sb_pquotino and 825 * vice-versa. 826 */ 827 if (!xfs_sb_version_has_pquotino(&mp->m_sb) && 828 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) { 829 xfs_ino_t ino = NULLFSINO; 830 831 if ((flags & XFS_QMOPT_PQUOTA) && 832 (mp->m_sb.sb_gquotino != NULLFSINO)) { 833 ino = mp->m_sb.sb_gquotino; 834 ASSERT(mp->m_sb.sb_pquotino == NULLFSINO); 835 } else if ((flags & XFS_QMOPT_GQUOTA) && 836 (mp->m_sb.sb_pquotino != NULLFSINO)) { 837 ino = mp->m_sb.sb_pquotino; 838 ASSERT(mp->m_sb.sb_gquotino == NULLFSINO); 839 } 840 if (ino != NULLFSINO) { 841 error = xfs_iget(mp, NULL, ino, 0, 0, ip); 842 if (error) 843 return error; 844 mp->m_sb.sb_gquotino = NULLFSINO; 845 mp->m_sb.sb_pquotino = NULLFSINO; 846 } 847 } 848 849 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); 850 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create, 851 XFS_QM_QINOCREATE_SPACE_RES(mp), 0); 852 if (error) { 853 xfs_trans_cancel(tp, 0); 854 return error; 855 } 856 857 if (!*ip) { 858 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, 859 &committed); 860 if (error) { 861 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | 862 XFS_TRANS_ABORT); 863 return error; 864 } 865 } 866 867 /* 868 * Make the changes in the superblock, and log those too. 869 * sbfields arg may contain fields other than *QUOTINO; 870 * VERSIONNUM for example. 871 */ 872 spin_lock(&mp->m_sb_lock); 873 if (flags & XFS_QMOPT_SBVERSION) { 874 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); 875 ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 876 XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) == 877 (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 878 XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | 879 XFS_SB_QFLAGS)); 880 881 xfs_sb_version_addquota(&mp->m_sb); 882 mp->m_sb.sb_uquotino = NULLFSINO; 883 mp->m_sb.sb_gquotino = NULLFSINO; 884 mp->m_sb.sb_pquotino = NULLFSINO; 885 886 /* qflags will get updated fully _after_ quotacheck */ 887 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT; 888 } 889 if (flags & XFS_QMOPT_UQUOTA) 890 mp->m_sb.sb_uquotino = (*ip)->i_ino; 891 else if (flags & XFS_QMOPT_GQUOTA) 892 mp->m_sb.sb_gquotino = (*ip)->i_ino; 893 else 894 mp->m_sb.sb_pquotino = (*ip)->i_ino; 895 spin_unlock(&mp->m_sb_lock); 896 xfs_mod_sb(tp, sbfields); 897 898 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { 899 xfs_alert(mp, "%s failed (error %d)!", __func__, error); 900 return error; 901 } 902 return 0; 903 } 904 905 906 STATIC void 907 xfs_qm_reset_dqcounts( 908 xfs_mount_t *mp, 909 xfs_buf_t *bp, 910 xfs_dqid_t id, 911 uint type) 912 { 913 struct xfs_dqblk *dqb; 914 int j; 915 916 trace_xfs_reset_dqcounts(bp, _RET_IP_); 917 918 /* 919 * Reset all counters and timers. They'll be 920 * started afresh by xfs_qm_quotacheck. 921 */ 922 #ifdef DEBUG 923 j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); 924 do_div(j, sizeof(xfs_dqblk_t)); 925 ASSERT(mp->m_quotainfo->qi_dqperchunk == j); 926 #endif 927 dqb = bp->b_addr; 928 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { 929 struct xfs_disk_dquot *ddq; 930 931 ddq = (struct xfs_disk_dquot *)&dqb[j]; 932 933 /* 934 * Do a sanity check, and if needed, repair the dqblk. Don't 935 * output any warnings because it's perfectly possible to 936 * find uninitialised dquot blks. See comment in xfs_dqcheck. 937 */ 938 xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, 939 "xfs_quotacheck"); 940 ddq->d_bcount = 0; 941 ddq->d_icount = 0; 942 ddq->d_rtbcount = 0; 943 ddq->d_btimer = 0; 944 ddq->d_itimer = 0; 945 ddq->d_rtbtimer = 0; 946 ddq->d_bwarns = 0; 947 ddq->d_iwarns = 0; 948 ddq->d_rtbwarns = 0; 949 950 if (xfs_sb_version_hascrc(&mp->m_sb)) { 951 xfs_update_cksum((char *)&dqb[j], 952 sizeof(struct xfs_dqblk), 953 XFS_DQUOT_CRC_OFF); 954 } 955 } 956 } 957 958 STATIC int 959 xfs_qm_dqiter_bufs( 960 struct xfs_mount *mp, 961 xfs_dqid_t firstid, 962 xfs_fsblock_t bno, 963 xfs_filblks_t blkcnt, 964 uint flags, 965 struct list_head *buffer_list) 966 { 967 struct xfs_buf *bp; 968 int error; 969 int type; 970 971 ASSERT(blkcnt > 0); 972 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : 973 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); 974 error = 0; 975 976 /* 977 * Blkcnt arg can be a very big number, and might even be 978 * larger than the log itself. So, we have to break it up into 979 * manageable-sized transactions. 980 * Note that we don't start a permanent transaction here; we might 981 * not be able to get a log reservation for the whole thing up front, 982 * and we don't really care to either, because we just discard 983 * everything if we were to crash in the middle of this loop. 984 */ 985 while (blkcnt--) { 986 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 987 XFS_FSB_TO_DADDR(mp, bno), 988 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 989 &xfs_dquot_buf_ops); 990 991 /* 992 * CRC and validation errors will return a EFSCORRUPTED here. If 993 * this occurs, re-read without CRC validation so that we can 994 * repair the damage via xfs_qm_reset_dqcounts(). This process 995 * will leave a trace in the log indicating corruption has 996 * been detected. 997 */ 998 if (error == EFSCORRUPTED) { 999 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 1000 XFS_FSB_TO_DADDR(mp, bno), 1001 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 1002 NULL); 1003 } 1004 1005 if (error) 1006 break; 1007 1008 xfs_qm_reset_dqcounts(mp, bp, firstid, type); 1009 xfs_buf_delwri_queue(bp, buffer_list); 1010 xfs_buf_relse(bp); 1011 1012 /* goto the next block. */ 1013 bno++; 1014 firstid += mp->m_quotainfo->qi_dqperchunk; 1015 } 1016 1017 return error; 1018 } 1019 1020 /* 1021 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a 1022 * caller supplied function for every chunk of dquots that we find. 1023 */ 1024 STATIC int 1025 xfs_qm_dqiterate( 1026 struct xfs_mount *mp, 1027 struct xfs_inode *qip, 1028 uint flags, 1029 struct list_head *buffer_list) 1030 { 1031 struct xfs_bmbt_irec *map; 1032 int i, nmaps; /* number of map entries */ 1033 int error; /* return value */ 1034 xfs_fileoff_t lblkno; 1035 xfs_filblks_t maxlblkcnt; 1036 xfs_dqid_t firstid; 1037 xfs_fsblock_t rablkno; 1038 xfs_filblks_t rablkcnt; 1039 1040 error = 0; 1041 /* 1042 * This looks racy, but we can't keep an inode lock across a 1043 * trans_reserve. But, this gets called during quotacheck, and that 1044 * happens only at mount time which is single threaded. 1045 */ 1046 if (qip->i_d.di_nblocks == 0) 1047 return 0; 1048 1049 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); 1050 1051 lblkno = 0; 1052 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 1053 do { 1054 uint lock_mode; 1055 1056 nmaps = XFS_DQITER_MAP_SIZE; 1057 /* 1058 * We aren't changing the inode itself. Just changing 1059 * some of its data. No new blocks are added here, and 1060 * the inode is never added to the transaction. 1061 */ 1062 lock_mode = xfs_ilock_data_map_shared(qip); 1063 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, 1064 map, &nmaps, 0); 1065 xfs_iunlock(qip, lock_mode); 1066 if (error) 1067 break; 1068 1069 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); 1070 for (i = 0; i < nmaps; i++) { 1071 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); 1072 ASSERT(map[i].br_blockcount); 1073 1074 1075 lblkno += map[i].br_blockcount; 1076 1077 if (map[i].br_startblock == HOLESTARTBLOCK) 1078 continue; 1079 1080 firstid = (xfs_dqid_t) map[i].br_startoff * 1081 mp->m_quotainfo->qi_dqperchunk; 1082 /* 1083 * Do a read-ahead on the next extent. 1084 */ 1085 if ((i+1 < nmaps) && 1086 (map[i+1].br_startblock != HOLESTARTBLOCK)) { 1087 rablkcnt = map[i+1].br_blockcount; 1088 rablkno = map[i+1].br_startblock; 1089 while (rablkcnt--) { 1090 xfs_buf_readahead(mp->m_ddev_targp, 1091 XFS_FSB_TO_DADDR(mp, rablkno), 1092 mp->m_quotainfo->qi_dqchunklen, 1093 NULL); 1094 rablkno++; 1095 } 1096 } 1097 /* 1098 * Iterate thru all the blks in the extent and 1099 * reset the counters of all the dquots inside them. 1100 */ 1101 error = xfs_qm_dqiter_bufs(mp, firstid, 1102 map[i].br_startblock, 1103 map[i].br_blockcount, 1104 flags, buffer_list); 1105 if (error) 1106 goto out; 1107 } 1108 } while (nmaps > 0); 1109 1110 out: 1111 kmem_free(map); 1112 return error; 1113 } 1114 1115 /* 1116 * Called by dqusage_adjust in doing a quotacheck. 1117 * 1118 * Given the inode, and a dquot id this updates both the incore dqout as well 1119 * as the buffer copy. This is so that once the quotacheck is done, we can 1120 * just log all the buffers, as opposed to logging numerous updates to 1121 * individual dquots. 1122 */ 1123 STATIC int 1124 xfs_qm_quotacheck_dqadjust( 1125 struct xfs_inode *ip, 1126 xfs_dqid_t id, 1127 uint type, 1128 xfs_qcnt_t nblks, 1129 xfs_qcnt_t rtblks) 1130 { 1131 struct xfs_mount *mp = ip->i_mount; 1132 struct xfs_dquot *dqp; 1133 int error; 1134 1135 error = xfs_qm_dqget(mp, ip, id, type, 1136 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp); 1137 if (error) { 1138 /* 1139 * Shouldn't be able to turn off quotas here. 1140 */ 1141 ASSERT(error != ESRCH); 1142 ASSERT(error != ENOENT); 1143 return error; 1144 } 1145 1146 trace_xfs_dqadjust(dqp); 1147 1148 /* 1149 * Adjust the inode count and the block count to reflect this inode's 1150 * resource usage. 1151 */ 1152 be64_add_cpu(&dqp->q_core.d_icount, 1); 1153 dqp->q_res_icount++; 1154 if (nblks) { 1155 be64_add_cpu(&dqp->q_core.d_bcount, nblks); 1156 dqp->q_res_bcount += nblks; 1157 } 1158 if (rtblks) { 1159 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); 1160 dqp->q_res_rtbcount += rtblks; 1161 } 1162 1163 /* 1164 * Set default limits, adjust timers (since we changed usages) 1165 * 1166 * There are no timers for the default values set in the root dquot. 1167 */ 1168 if (dqp->q_core.d_id) { 1169 xfs_qm_adjust_dqlimits(mp, dqp); 1170 xfs_qm_adjust_dqtimers(mp, &dqp->q_core); 1171 } 1172 1173 dqp->dq_flags |= XFS_DQ_DIRTY; 1174 xfs_qm_dqput(dqp); 1175 return 0; 1176 } 1177 1178 STATIC int 1179 xfs_qm_get_rtblks( 1180 xfs_inode_t *ip, 1181 xfs_qcnt_t *O_rtblks) 1182 { 1183 xfs_filblks_t rtblks; /* total rt blks */ 1184 xfs_extnum_t idx; /* extent record index */ 1185 xfs_ifork_t *ifp; /* inode fork pointer */ 1186 xfs_extnum_t nextents; /* number of extent entries */ 1187 int error; 1188 1189 ASSERT(XFS_IS_REALTIME_INODE(ip)); 1190 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1191 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1192 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) 1193 return error; 1194 } 1195 rtblks = 0; 1196 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 1197 for (idx = 0; idx < nextents; idx++) 1198 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx)); 1199 *O_rtblks = (xfs_qcnt_t)rtblks; 1200 return 0; 1201 } 1202 1203 /* 1204 * callback routine supplied to bulkstat(). Given an inumber, find its 1205 * dquots and update them to account for resources taken by that inode. 1206 */ 1207 /* ARGSUSED */ 1208 STATIC int 1209 xfs_qm_dqusage_adjust( 1210 xfs_mount_t *mp, /* mount point for filesystem */ 1211 xfs_ino_t ino, /* inode number to get data for */ 1212 void __user *buffer, /* not used */ 1213 int ubsize, /* not used */ 1214 int *ubused, /* not used */ 1215 int *res) /* result code value */ 1216 { 1217 xfs_inode_t *ip; 1218 xfs_qcnt_t nblks, rtblks = 0; 1219 int error; 1220 1221 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1222 1223 /* 1224 * rootino must have its resources accounted for, not so with the quota 1225 * inodes. 1226 */ 1227 if (xfs_is_quota_inode(&mp->m_sb, ino)) { 1228 *res = BULKSTAT_RV_NOTHING; 1229 return XFS_ERROR(EINVAL); 1230 } 1231 1232 /* 1233 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget 1234 * interface expects the inode to be exclusively locked because that's 1235 * the case in all other instances. It's OK that we do this because 1236 * quotacheck is done only at mount time. 1237 */ 1238 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); 1239 if (error) { 1240 *res = BULKSTAT_RV_NOTHING; 1241 return error; 1242 } 1243 1244 ASSERT(ip->i_delayed_blks == 0); 1245 1246 if (XFS_IS_REALTIME_INODE(ip)) { 1247 /* 1248 * Walk thru the extent list and count the realtime blocks. 1249 */ 1250 error = xfs_qm_get_rtblks(ip, &rtblks); 1251 if (error) 1252 goto error0; 1253 } 1254 1255 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; 1256 1257 /* 1258 * Add the (disk blocks and inode) resources occupied by this 1259 * inode to its dquots. We do this adjustment in the incore dquot, 1260 * and also copy the changes to its buffer. 1261 * We don't care about putting these changes in a transaction 1262 * envelope because if we crash in the middle of a 'quotacheck' 1263 * we have to start from the beginning anyway. 1264 * Once we're done, we'll log all the dquot bufs. 1265 * 1266 * The *QUOTA_ON checks below may look pretty racy, but quotachecks 1267 * and quotaoffs don't race. (Quotachecks happen at mount time only). 1268 */ 1269 if (XFS_IS_UQUOTA_ON(mp)) { 1270 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, 1271 XFS_DQ_USER, nblks, rtblks); 1272 if (error) 1273 goto error0; 1274 } 1275 1276 if (XFS_IS_GQUOTA_ON(mp)) { 1277 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, 1278 XFS_DQ_GROUP, nblks, rtblks); 1279 if (error) 1280 goto error0; 1281 } 1282 1283 if (XFS_IS_PQUOTA_ON(mp)) { 1284 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), 1285 XFS_DQ_PROJ, nblks, rtblks); 1286 if (error) 1287 goto error0; 1288 } 1289 1290 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1291 IRELE(ip); 1292 *res = BULKSTAT_RV_DIDONE; 1293 return 0; 1294 1295 error0: 1296 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1297 IRELE(ip); 1298 *res = BULKSTAT_RV_GIVEUP; 1299 return error; 1300 } 1301 1302 STATIC int 1303 xfs_qm_flush_one( 1304 struct xfs_dquot *dqp, 1305 void *data) 1306 { 1307 struct list_head *buffer_list = data; 1308 struct xfs_buf *bp = NULL; 1309 int error = 0; 1310 1311 xfs_dqlock(dqp); 1312 if (dqp->dq_flags & XFS_DQ_FREEING) 1313 goto out_unlock; 1314 if (!XFS_DQ_IS_DIRTY(dqp)) 1315 goto out_unlock; 1316 1317 xfs_dqflock(dqp); 1318 error = xfs_qm_dqflush(dqp, &bp); 1319 if (error) 1320 goto out_unlock; 1321 1322 xfs_buf_delwri_queue(bp, buffer_list); 1323 xfs_buf_relse(bp); 1324 out_unlock: 1325 xfs_dqunlock(dqp); 1326 return error; 1327 } 1328 1329 /* 1330 * Walk thru all the filesystem inodes and construct a consistent view 1331 * of the disk quota world. If the quotacheck fails, disable quotas. 1332 */ 1333 int 1334 xfs_qm_quotacheck( 1335 xfs_mount_t *mp) 1336 { 1337 int done, count, error, error2; 1338 xfs_ino_t lastino; 1339 size_t structsz; 1340 uint flags; 1341 LIST_HEAD (buffer_list); 1342 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip; 1343 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip; 1344 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip; 1345 1346 count = INT_MAX; 1347 structsz = 1; 1348 lastino = 0; 1349 flags = 0; 1350 1351 ASSERT(uip || gip || pip); 1352 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1353 1354 xfs_notice(mp, "Quotacheck needed: Please wait."); 1355 1356 /* 1357 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset 1358 * their counters to zero. We need a clean slate. 1359 * We don't log our changes till later. 1360 */ 1361 if (uip) { 1362 error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA, 1363 &buffer_list); 1364 if (error) 1365 goto error_return; 1366 flags |= XFS_UQUOTA_CHKD; 1367 } 1368 1369 if (gip) { 1370 error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA, 1371 &buffer_list); 1372 if (error) 1373 goto error_return; 1374 flags |= XFS_GQUOTA_CHKD; 1375 } 1376 1377 if (pip) { 1378 error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA, 1379 &buffer_list); 1380 if (error) 1381 goto error_return; 1382 flags |= XFS_PQUOTA_CHKD; 1383 } 1384 1385 do { 1386 /* 1387 * Iterate thru all the inodes in the file system, 1388 * adjusting the corresponding dquot counters in core. 1389 */ 1390 error = xfs_bulkstat(mp, &lastino, &count, 1391 xfs_qm_dqusage_adjust, 1392 structsz, NULL, &done); 1393 if (error) 1394 break; 1395 1396 } while (!done); 1397 1398 /* 1399 * We've made all the changes that we need to make incore. Flush them 1400 * down to disk buffers if everything was updated successfully. 1401 */ 1402 if (XFS_IS_UQUOTA_ON(mp)) { 1403 error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one, 1404 &buffer_list); 1405 } 1406 if (XFS_IS_GQUOTA_ON(mp)) { 1407 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one, 1408 &buffer_list); 1409 if (!error) 1410 error = error2; 1411 } 1412 if (XFS_IS_PQUOTA_ON(mp)) { 1413 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one, 1414 &buffer_list); 1415 if (!error) 1416 error = error2; 1417 } 1418 1419 error2 = xfs_buf_delwri_submit(&buffer_list); 1420 if (!error) 1421 error = error2; 1422 1423 /* 1424 * We can get this error if we couldn't do a dquot allocation inside 1425 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the 1426 * dirty dquots that might be cached, we just want to get rid of them 1427 * and turn quotaoff. The dquots won't be attached to any of the inodes 1428 * at this point (because we intentionally didn't in dqget_noattach). 1429 */ 1430 if (error) { 1431 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); 1432 goto error_return; 1433 } 1434 1435 /* 1436 * If one type of quotas is off, then it will lose its 1437 * quotachecked status, since we won't be doing accounting for 1438 * that type anymore. 1439 */ 1440 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD; 1441 mp->m_qflags |= flags; 1442 1443 error_return: 1444 while (!list_empty(&buffer_list)) { 1445 struct xfs_buf *bp = 1446 list_first_entry(&buffer_list, struct xfs_buf, b_list); 1447 list_del_init(&bp->b_list); 1448 xfs_buf_relse(bp); 1449 } 1450 1451 if (error) { 1452 xfs_warn(mp, 1453 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", 1454 error); 1455 /* 1456 * We must turn off quotas. 1457 */ 1458 ASSERT(mp->m_quotainfo != NULL); 1459 xfs_qm_destroy_quotainfo(mp); 1460 if (xfs_mount_reset_sbqflags(mp)) { 1461 xfs_warn(mp, 1462 "Quotacheck: Failed to reset quota flags."); 1463 } 1464 } else 1465 xfs_notice(mp, "Quotacheck: Done."); 1466 return (error); 1467 } 1468 1469 /* 1470 * This is called after the superblock has been read in and we're ready to 1471 * iget the quota inodes. 1472 */ 1473 STATIC int 1474 xfs_qm_init_quotainos( 1475 xfs_mount_t *mp) 1476 { 1477 struct xfs_inode *uip = NULL; 1478 struct xfs_inode *gip = NULL; 1479 struct xfs_inode *pip = NULL; 1480 int error; 1481 __int64_t sbflags = 0; 1482 uint flags = 0; 1483 1484 ASSERT(mp->m_quotainfo); 1485 1486 /* 1487 * Get the uquota and gquota inodes 1488 */ 1489 if (xfs_sb_version_hasquota(&mp->m_sb)) { 1490 if (XFS_IS_UQUOTA_ON(mp) && 1491 mp->m_sb.sb_uquotino != NULLFSINO) { 1492 ASSERT(mp->m_sb.sb_uquotino > 0); 1493 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 1494 0, 0, &uip); 1495 if (error) 1496 return XFS_ERROR(error); 1497 } 1498 if (XFS_IS_GQUOTA_ON(mp) && 1499 mp->m_sb.sb_gquotino != NULLFSINO) { 1500 ASSERT(mp->m_sb.sb_gquotino > 0); 1501 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 1502 0, 0, &gip); 1503 if (error) 1504 goto error_rele; 1505 } 1506 if (XFS_IS_PQUOTA_ON(mp) && 1507 mp->m_sb.sb_pquotino != NULLFSINO) { 1508 ASSERT(mp->m_sb.sb_pquotino > 0); 1509 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino, 1510 0, 0, &pip); 1511 if (error) 1512 goto error_rele; 1513 } 1514 } else { 1515 flags |= XFS_QMOPT_SBVERSION; 1516 sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 1517 XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | 1518 XFS_SB_QFLAGS); 1519 } 1520 1521 /* 1522 * Create the three inodes, if they don't exist already. The changes 1523 * made above will get added to a transaction and logged in one of 1524 * the qino_alloc calls below. If the device is readonly, 1525 * temporarily switch to read-write to do this. 1526 */ 1527 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { 1528 error = xfs_qm_qino_alloc(mp, &uip, 1529 sbflags | XFS_SB_UQUOTINO, 1530 flags | XFS_QMOPT_UQUOTA); 1531 if (error) 1532 goto error_rele; 1533 1534 flags &= ~XFS_QMOPT_SBVERSION; 1535 } 1536 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) { 1537 error = xfs_qm_qino_alloc(mp, &gip, 1538 sbflags | XFS_SB_GQUOTINO, 1539 flags | XFS_QMOPT_GQUOTA); 1540 if (error) 1541 goto error_rele; 1542 1543 flags &= ~XFS_QMOPT_SBVERSION; 1544 } 1545 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) { 1546 error = xfs_qm_qino_alloc(mp, &pip, 1547 sbflags | XFS_SB_PQUOTINO, 1548 flags | XFS_QMOPT_PQUOTA); 1549 if (error) 1550 goto error_rele; 1551 } 1552 1553 mp->m_quotainfo->qi_uquotaip = uip; 1554 mp->m_quotainfo->qi_gquotaip = gip; 1555 mp->m_quotainfo->qi_pquotaip = pip; 1556 1557 return 0; 1558 1559 error_rele: 1560 if (uip) 1561 IRELE(uip); 1562 if (gip) 1563 IRELE(gip); 1564 if (pip) 1565 IRELE(pip); 1566 return XFS_ERROR(error); 1567 } 1568 1569 STATIC void 1570 xfs_qm_dqfree_one( 1571 struct xfs_dquot *dqp) 1572 { 1573 struct xfs_mount *mp = dqp->q_mount; 1574 struct xfs_quotainfo *qi = mp->m_quotainfo; 1575 1576 mutex_lock(&qi->qi_tree_lock); 1577 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), 1578 be32_to_cpu(dqp->q_core.d_id)); 1579 1580 qi->qi_dquots--; 1581 mutex_unlock(&qi->qi_tree_lock); 1582 1583 xfs_qm_dqdestroy(dqp); 1584 } 1585 1586 /* 1587 * Start a transaction and write the incore superblock changes to 1588 * disk. flags parameter indicates which fields have changed. 1589 */ 1590 int 1591 xfs_qm_write_sb_changes( 1592 xfs_mount_t *mp, 1593 __int64_t flags) 1594 { 1595 xfs_trans_t *tp; 1596 int error; 1597 1598 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); 1599 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0); 1600 if (error) { 1601 xfs_trans_cancel(tp, 0); 1602 return error; 1603 } 1604 1605 xfs_mod_sb(tp, flags); 1606 error = xfs_trans_commit(tp, 0); 1607 1608 return error; 1609 } 1610 1611 1612 /* --------------- utility functions for vnodeops ---------------- */ 1613 1614 1615 /* 1616 * Given an inode, a uid, gid and prid make sure that we have 1617 * allocated relevant dquot(s) on disk, and that we won't exceed inode 1618 * quotas by creating this file. 1619 * This also attaches dquot(s) to the given inode after locking it, 1620 * and returns the dquots corresponding to the uid and/or gid. 1621 * 1622 * in : inode (unlocked) 1623 * out : udquot, gdquot with references taken and unlocked 1624 */ 1625 int 1626 xfs_qm_vop_dqalloc( 1627 struct xfs_inode *ip, 1628 xfs_dqid_t uid, 1629 xfs_dqid_t gid, 1630 prid_t prid, 1631 uint flags, 1632 struct xfs_dquot **O_udqpp, 1633 struct xfs_dquot **O_gdqpp, 1634 struct xfs_dquot **O_pdqpp) 1635 { 1636 struct xfs_mount *mp = ip->i_mount; 1637 struct xfs_dquot *uq = NULL; 1638 struct xfs_dquot *gq = NULL; 1639 struct xfs_dquot *pq = NULL; 1640 int error; 1641 uint lockflags; 1642 1643 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1644 return 0; 1645 1646 lockflags = XFS_ILOCK_EXCL; 1647 xfs_ilock(ip, lockflags); 1648 1649 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) 1650 gid = ip->i_d.di_gid; 1651 1652 /* 1653 * Attach the dquot(s) to this inode, doing a dquot allocation 1654 * if necessary. The dquot(s) will not be locked. 1655 */ 1656 if (XFS_NOT_DQATTACHED(mp, ip)) { 1657 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC); 1658 if (error) { 1659 xfs_iunlock(ip, lockflags); 1660 return error; 1661 } 1662 } 1663 1664 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { 1665 if (ip->i_d.di_uid != uid) { 1666 /* 1667 * What we need is the dquot that has this uid, and 1668 * if we send the inode to dqget, the uid of the inode 1669 * takes priority over what's sent in the uid argument. 1670 * We must unlock inode here before calling dqget if 1671 * we're not sending the inode, because otherwise 1672 * we'll deadlock by doing trans_reserve while 1673 * holding ilock. 1674 */ 1675 xfs_iunlock(ip, lockflags); 1676 error = xfs_qm_dqget(mp, NULL, uid, 1677 XFS_DQ_USER, 1678 XFS_QMOPT_DQALLOC | 1679 XFS_QMOPT_DOWARN, 1680 &uq); 1681 if (error) { 1682 ASSERT(error != ENOENT); 1683 return error; 1684 } 1685 /* 1686 * Get the ilock in the right order. 1687 */ 1688 xfs_dqunlock(uq); 1689 lockflags = XFS_ILOCK_SHARED; 1690 xfs_ilock(ip, lockflags); 1691 } else { 1692 /* 1693 * Take an extra reference, because we'll return 1694 * this to caller 1695 */ 1696 ASSERT(ip->i_udquot); 1697 uq = xfs_qm_dqhold(ip->i_udquot); 1698 } 1699 } 1700 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { 1701 if (ip->i_d.di_gid != gid) { 1702 xfs_iunlock(ip, lockflags); 1703 error = xfs_qm_dqget(mp, NULL, gid, 1704 XFS_DQ_GROUP, 1705 XFS_QMOPT_DQALLOC | 1706 XFS_QMOPT_DOWARN, 1707 &gq); 1708 if (error) { 1709 ASSERT(error != ENOENT); 1710 goto error_rele; 1711 } 1712 xfs_dqunlock(gq); 1713 lockflags = XFS_ILOCK_SHARED; 1714 xfs_ilock(ip, lockflags); 1715 } else { 1716 ASSERT(ip->i_gdquot); 1717 gq = xfs_qm_dqhold(ip->i_gdquot); 1718 } 1719 } 1720 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { 1721 if (xfs_get_projid(ip) != prid) { 1722 xfs_iunlock(ip, lockflags); 1723 error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, 1724 XFS_DQ_PROJ, 1725 XFS_QMOPT_DQALLOC | 1726 XFS_QMOPT_DOWARN, 1727 &pq); 1728 if (error) { 1729 ASSERT(error != ENOENT); 1730 goto error_rele; 1731 } 1732 xfs_dqunlock(pq); 1733 lockflags = XFS_ILOCK_SHARED; 1734 xfs_ilock(ip, lockflags); 1735 } else { 1736 ASSERT(ip->i_pdquot); 1737 pq = xfs_qm_dqhold(ip->i_pdquot); 1738 } 1739 } 1740 if (uq) 1741 trace_xfs_dquot_dqalloc(ip); 1742 1743 xfs_iunlock(ip, lockflags); 1744 if (O_udqpp) 1745 *O_udqpp = uq; 1746 else if (uq) 1747 xfs_qm_dqrele(uq); 1748 if (O_gdqpp) 1749 *O_gdqpp = gq; 1750 else if (gq) 1751 xfs_qm_dqrele(gq); 1752 if (O_pdqpp) 1753 *O_pdqpp = pq; 1754 else if (pq) 1755 xfs_qm_dqrele(pq); 1756 return 0; 1757 1758 error_rele: 1759 if (gq) 1760 xfs_qm_dqrele(gq); 1761 if (uq) 1762 xfs_qm_dqrele(uq); 1763 return error; 1764 } 1765 1766 /* 1767 * Actually transfer ownership, and do dquot modifications. 1768 * These were already reserved. 1769 */ 1770 xfs_dquot_t * 1771 xfs_qm_vop_chown( 1772 xfs_trans_t *tp, 1773 xfs_inode_t *ip, 1774 xfs_dquot_t **IO_olddq, 1775 xfs_dquot_t *newdq) 1776 { 1777 xfs_dquot_t *prevdq; 1778 uint bfield = XFS_IS_REALTIME_INODE(ip) ? 1779 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; 1780 1781 1782 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1783 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); 1784 1785 /* old dquot */ 1786 prevdq = *IO_olddq; 1787 ASSERT(prevdq); 1788 ASSERT(prevdq != newdq); 1789 1790 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks)); 1791 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); 1792 1793 /* the sparkling new dquot */ 1794 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); 1795 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); 1796 1797 /* 1798 * Take an extra reference, because the inode is going to keep 1799 * this dquot pointer even after the trans_commit. 1800 */ 1801 *IO_olddq = xfs_qm_dqhold(newdq); 1802 1803 return prevdq; 1804 } 1805 1806 /* 1807 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID). 1808 */ 1809 int 1810 xfs_qm_vop_chown_reserve( 1811 struct xfs_trans *tp, 1812 struct xfs_inode *ip, 1813 struct xfs_dquot *udqp, 1814 struct xfs_dquot *gdqp, 1815 struct xfs_dquot *pdqp, 1816 uint flags) 1817 { 1818 struct xfs_mount *mp = ip->i_mount; 1819 uint delblks, blkflags, prjflags = 0; 1820 struct xfs_dquot *udq_unres = NULL; 1821 struct xfs_dquot *gdq_unres = NULL; 1822 struct xfs_dquot *pdq_unres = NULL; 1823 struct xfs_dquot *udq_delblks = NULL; 1824 struct xfs_dquot *gdq_delblks = NULL; 1825 struct xfs_dquot *pdq_delblks = NULL; 1826 int error; 1827 1828 1829 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 1830 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1831 1832 delblks = ip->i_delayed_blks; 1833 blkflags = XFS_IS_REALTIME_INODE(ip) ? 1834 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; 1835 1836 if (XFS_IS_UQUOTA_ON(mp) && udqp && 1837 ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) { 1838 udq_delblks = udqp; 1839 /* 1840 * If there are delayed allocation blocks, then we have to 1841 * unreserve those from the old dquot, and add them to the 1842 * new dquot. 1843 */ 1844 if (delblks) { 1845 ASSERT(ip->i_udquot); 1846 udq_unres = ip->i_udquot; 1847 } 1848 } 1849 if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp && 1850 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) { 1851 gdq_delblks = gdqp; 1852 if (delblks) { 1853 ASSERT(ip->i_gdquot); 1854 gdq_unres = ip->i_gdquot; 1855 } 1856 } 1857 1858 if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp && 1859 xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) { 1860 prjflags = XFS_QMOPT_ENOSPC; 1861 pdq_delblks = pdqp; 1862 if (delblks) { 1863 ASSERT(ip->i_pdquot); 1864 pdq_unres = ip->i_pdquot; 1865 } 1866 } 1867 1868 error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, 1869 udq_delblks, gdq_delblks, pdq_delblks, 1870 ip->i_d.di_nblocks, 1, 1871 flags | blkflags | prjflags); 1872 if (error) 1873 return error; 1874 1875 /* 1876 * Do the delayed blks reservations/unreservations now. Since, these 1877 * are done without the help of a transaction, if a reservation fails 1878 * its previous reservations won't be automatically undone by trans 1879 * code. So, we have to do it manually here. 1880 */ 1881 if (delblks) { 1882 /* 1883 * Do the reservations first. Unreservation can't fail. 1884 */ 1885 ASSERT(udq_delblks || gdq_delblks || pdq_delblks); 1886 ASSERT(udq_unres || gdq_unres || pdq_unres); 1887 error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 1888 udq_delblks, gdq_delblks, pdq_delblks, 1889 (xfs_qcnt_t)delblks, 0, 1890 flags | blkflags | prjflags); 1891 if (error) 1892 return error; 1893 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 1894 udq_unres, gdq_unres, pdq_unres, 1895 -((xfs_qcnt_t)delblks), 0, blkflags); 1896 } 1897 1898 return (0); 1899 } 1900 1901 int 1902 xfs_qm_vop_rename_dqattach( 1903 struct xfs_inode **i_tab) 1904 { 1905 struct xfs_mount *mp = i_tab[0]->i_mount; 1906 int i; 1907 1908 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1909 return 0; 1910 1911 for (i = 0; (i < 4 && i_tab[i]); i++) { 1912 struct xfs_inode *ip = i_tab[i]; 1913 int error; 1914 1915 /* 1916 * Watch out for duplicate entries in the table. 1917 */ 1918 if (i == 0 || ip != i_tab[i-1]) { 1919 if (XFS_NOT_DQATTACHED(mp, ip)) { 1920 error = xfs_qm_dqattach(ip, 0); 1921 if (error) 1922 return error; 1923 } 1924 } 1925 } 1926 return 0; 1927 } 1928 1929 void 1930 xfs_qm_vop_create_dqattach( 1931 struct xfs_trans *tp, 1932 struct xfs_inode *ip, 1933 struct xfs_dquot *udqp, 1934 struct xfs_dquot *gdqp, 1935 struct xfs_dquot *pdqp) 1936 { 1937 struct xfs_mount *mp = tp->t_mountp; 1938 1939 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1940 return; 1941 1942 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1943 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1944 1945 if (udqp && XFS_IS_UQUOTA_ON(mp)) { 1946 ASSERT(ip->i_udquot == NULL); 1947 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); 1948 1949 ip->i_udquot = xfs_qm_dqhold(udqp); 1950 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 1951 } 1952 if (gdqp && XFS_IS_GQUOTA_ON(mp)) { 1953 ASSERT(ip->i_gdquot == NULL); 1954 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); 1955 ip->i_gdquot = xfs_qm_dqhold(gdqp); 1956 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 1957 } 1958 if (pdqp && XFS_IS_PQUOTA_ON(mp)) { 1959 ASSERT(ip->i_pdquot == NULL); 1960 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); 1961 1962 ip->i_pdquot = xfs_qm_dqhold(pdqp); 1963 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1); 1964 } 1965 } 1966 1967