1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_bit.h" 21 #include "xfs_log.h" 22 #include "xfs_trans.h" 23 #include "xfs_sb.h" 24 #include "xfs_ag.h" 25 #include "xfs_alloc.h" 26 #include "xfs_quota.h" 27 #include "xfs_mount.h" 28 #include "xfs_bmap_btree.h" 29 #include "xfs_ialloc_btree.h" 30 #include "xfs_dinode.h" 31 #include "xfs_inode.h" 32 #include "xfs_ialloc.h" 33 #include "xfs_itable.h" 34 #include "xfs_rtalloc.h" 35 #include "xfs_error.h" 36 #include "xfs_bmap.h" 37 #include "xfs_attr.h" 38 #include "xfs_buf_item.h" 39 #include "xfs_trans_space.h" 40 #include "xfs_utils.h" 41 #include "xfs_qm.h" 42 #include "xfs_trace.h" 43 #include "xfs_icache.h" 44 #include "xfs_cksum.h" 45 46 /* 47 * The global quota manager. There is only one of these for the entire 48 * system, _not_ one per file system. XQM keeps track of the overall 49 * quota functionality, including maintaining the freelist and hash 50 * tables of dquots. 51 */ 52 STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 53 STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 54 STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); 55 56 /* 57 * We use the batch lookup interface to iterate over the dquots as it 58 * currently is the only interface into the radix tree code that allows 59 * fuzzy lookups instead of exact matches. Holding the lock over multiple 60 * operations is fine as all callers are used either during mount/umount 61 * or quotaoff. 62 */ 63 #define XFS_DQ_LOOKUP_BATCH 32 64 65 STATIC int 66 xfs_qm_dquot_walk( 67 struct xfs_mount *mp, 68 int type, 69 int (*execute)(struct xfs_dquot *dqp, void *data), 70 void *data) 71 { 72 struct xfs_quotainfo *qi = mp->m_quotainfo; 73 struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type); 74 uint32_t next_index; 75 int last_error = 0; 76 int skipped; 77 int nr_found; 78 79 restart: 80 skipped = 0; 81 next_index = 0; 82 nr_found = 0; 83 84 while (1) { 85 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; 86 int error = 0; 87 int i; 88 89 mutex_lock(&qi->qi_tree_lock); 90 nr_found = radix_tree_gang_lookup(tree, (void **)batch, 91 next_index, XFS_DQ_LOOKUP_BATCH); 92 if (!nr_found) { 93 mutex_unlock(&qi->qi_tree_lock); 94 break; 95 } 96 97 for (i = 0; i < nr_found; i++) { 98 struct xfs_dquot *dqp = batch[i]; 99 100 next_index = be32_to_cpu(dqp->q_core.d_id) + 1; 101 102 error = execute(batch[i], data); 103 if (error == EAGAIN) { 104 skipped++; 105 continue; 106 } 107 if (error && last_error != EFSCORRUPTED) 108 last_error = error; 109 } 110 111 mutex_unlock(&qi->qi_tree_lock); 112 113 /* bail out if the filesystem is corrupted. */ 114 if (last_error == EFSCORRUPTED) { 115 skipped = 0; 116 break; 117 } 118 } 119 120 if (skipped) { 121 delay(1); 122 goto restart; 123 } 124 125 return last_error; 126 } 127 128 129 /* 130 * Purge a dquot from all tracking data structures and free it. 131 */ 132 STATIC int 133 xfs_qm_dqpurge( 134 struct xfs_dquot *dqp, 135 void *data) 136 { 137 struct xfs_mount *mp = dqp->q_mount; 138 struct xfs_quotainfo *qi = mp->m_quotainfo; 139 struct xfs_dquot *gdqp = NULL; 140 141 xfs_dqlock(dqp); 142 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { 143 xfs_dqunlock(dqp); 144 return EAGAIN; 145 } 146 147 /* 148 * If this quota has a group hint attached, prepare for releasing it 149 * now. 150 */ 151 gdqp = dqp->q_gdquot; 152 if (gdqp) { 153 xfs_dqlock(gdqp); 154 dqp->q_gdquot = NULL; 155 } 156 157 dqp->dq_flags |= XFS_DQ_FREEING; 158 159 xfs_dqflock(dqp); 160 161 /* 162 * If we are turning this type of quotas off, we don't care 163 * about the dirty metadata sitting in this dquot. OTOH, if 164 * we're unmounting, we do care, so we flush it and wait. 165 */ 166 if (XFS_DQ_IS_DIRTY(dqp)) { 167 struct xfs_buf *bp = NULL; 168 int error; 169 170 /* 171 * We don't care about getting disk errors here. We need 172 * to purge this dquot anyway, so we go ahead regardless. 173 */ 174 error = xfs_qm_dqflush(dqp, &bp); 175 if (error) { 176 xfs_warn(mp, "%s: dquot %p flush failed", 177 __func__, dqp); 178 } else { 179 error = xfs_bwrite(bp); 180 xfs_buf_relse(bp); 181 } 182 xfs_dqflock(dqp); 183 } 184 185 ASSERT(atomic_read(&dqp->q_pincount) == 0); 186 ASSERT(XFS_FORCED_SHUTDOWN(mp) || 187 !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); 188 189 xfs_dqfunlock(dqp); 190 xfs_dqunlock(dqp); 191 192 radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags), 193 be32_to_cpu(dqp->q_core.d_id)); 194 qi->qi_dquots--; 195 196 /* 197 * We move dquots to the freelist as soon as their reference count 198 * hits zero, so it really should be on the freelist here. 199 */ 200 mutex_lock(&qi->qi_lru_lock); 201 ASSERT(!list_empty(&dqp->q_lru)); 202 list_del_init(&dqp->q_lru); 203 qi->qi_lru_count--; 204 XFS_STATS_DEC(xs_qm_dquot_unused); 205 mutex_unlock(&qi->qi_lru_lock); 206 207 xfs_qm_dqdestroy(dqp); 208 209 if (gdqp) 210 xfs_qm_dqput(gdqp); 211 return 0; 212 } 213 214 /* 215 * Purge the dquot cache. 216 */ 217 void 218 xfs_qm_dqpurge_all( 219 struct xfs_mount *mp, 220 uint flags) 221 { 222 if (flags & XFS_QMOPT_UQUOTA) 223 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); 224 if (flags & XFS_QMOPT_GQUOTA) 225 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); 226 if (flags & XFS_QMOPT_PQUOTA) 227 xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL); 228 } 229 230 /* 231 * Just destroy the quotainfo structure. 232 */ 233 void 234 xfs_qm_unmount( 235 struct xfs_mount *mp) 236 { 237 if (mp->m_quotainfo) { 238 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); 239 xfs_qm_destroy_quotainfo(mp); 240 } 241 } 242 243 244 /* 245 * This is called from xfs_mountfs to start quotas and initialize all 246 * necessary data structures like quotainfo. This is also responsible for 247 * running a quotacheck as necessary. We are guaranteed that the superblock 248 * is consistently read in at this point. 249 * 250 * If we fail here, the mount will continue with quota turned off. We don't 251 * need to inidicate success or failure at all. 252 */ 253 void 254 xfs_qm_mount_quotas( 255 xfs_mount_t *mp) 256 { 257 int error = 0; 258 uint sbf; 259 260 /* 261 * If quotas on realtime volumes is not supported, we disable 262 * quotas immediately. 263 */ 264 if (mp->m_sb.sb_rextents) { 265 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); 266 mp->m_qflags = 0; 267 goto write_changes; 268 } 269 270 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 271 272 /* 273 * Allocate the quotainfo structure inside the mount struct, and 274 * create quotainode(s), and change/rev superblock if necessary. 275 */ 276 error = xfs_qm_init_quotainfo(mp); 277 if (error) { 278 /* 279 * We must turn off quotas. 280 */ 281 ASSERT(mp->m_quotainfo == NULL); 282 mp->m_qflags = 0; 283 goto write_changes; 284 } 285 /* 286 * If any of the quotas are not consistent, do a quotacheck. 287 */ 288 if (XFS_QM_NEED_QUOTACHECK(mp)) { 289 error = xfs_qm_quotacheck(mp); 290 if (error) { 291 /* Quotacheck failed and disabled quotas. */ 292 return; 293 } 294 } 295 /* 296 * If one type of quotas is off, then it will lose its 297 * quotachecked status, since we won't be doing accounting for 298 * that type anymore. 299 */ 300 if (!XFS_IS_UQUOTA_ON(mp)) 301 mp->m_qflags &= ~XFS_UQUOTA_CHKD; 302 if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp))) 303 mp->m_qflags &= ~XFS_OQUOTA_CHKD; 304 305 write_changes: 306 /* 307 * We actually don't have to acquire the m_sb_lock at all. 308 * This can only be called from mount, and that's single threaded. XXX 309 */ 310 spin_lock(&mp->m_sb_lock); 311 sbf = mp->m_sb.sb_qflags; 312 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; 313 spin_unlock(&mp->m_sb_lock); 314 315 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 316 if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { 317 /* 318 * We could only have been turning quotas off. 319 * We aren't in very good shape actually because 320 * the incore structures are convinced that quotas are 321 * off, but the on disk superblock doesn't know that ! 322 */ 323 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); 324 xfs_alert(mp, "%s: Superblock update failed!", 325 __func__); 326 } 327 } 328 329 if (error) { 330 xfs_warn(mp, "Failed to initialize disk quotas."); 331 return; 332 } 333 } 334 335 /* 336 * Called from the vfsops layer. 337 */ 338 void 339 xfs_qm_unmount_quotas( 340 xfs_mount_t *mp) 341 { 342 /* 343 * Release the dquots that root inode, et al might be holding, 344 * before we flush quotas and blow away the quotainfo structure. 345 */ 346 ASSERT(mp->m_rootip); 347 xfs_qm_dqdetach(mp->m_rootip); 348 if (mp->m_rbmip) 349 xfs_qm_dqdetach(mp->m_rbmip); 350 if (mp->m_rsumip) 351 xfs_qm_dqdetach(mp->m_rsumip); 352 353 /* 354 * Release the quota inodes. 355 */ 356 if (mp->m_quotainfo) { 357 if (mp->m_quotainfo->qi_uquotaip) { 358 IRELE(mp->m_quotainfo->qi_uquotaip); 359 mp->m_quotainfo->qi_uquotaip = NULL; 360 } 361 if (mp->m_quotainfo->qi_gquotaip) { 362 IRELE(mp->m_quotainfo->qi_gquotaip); 363 mp->m_quotainfo->qi_gquotaip = NULL; 364 } 365 } 366 } 367 368 STATIC int 369 xfs_qm_dqattach_one( 370 xfs_inode_t *ip, 371 xfs_dqid_t id, 372 uint type, 373 uint doalloc, 374 xfs_dquot_t *udqhint, /* hint */ 375 xfs_dquot_t **IO_idqpp) 376 { 377 xfs_dquot_t *dqp; 378 int error; 379 380 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 381 error = 0; 382 383 /* 384 * See if we already have it in the inode itself. IO_idqpp is 385 * &i_udquot or &i_gdquot. This made the code look weird, but 386 * made the logic a lot simpler. 387 */ 388 dqp = *IO_idqpp; 389 if (dqp) { 390 trace_xfs_dqattach_found(dqp); 391 return 0; 392 } 393 394 /* 395 * udqhint is the i_udquot field in inode, and is non-NULL only 396 * when the type arg is group/project. Its purpose is to save a 397 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside 398 * the user dquot. 399 */ 400 if (udqhint) { 401 ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ); 402 xfs_dqlock(udqhint); 403 404 /* 405 * No need to take dqlock to look at the id. 406 * 407 * The ID can't change until it gets reclaimed, and it won't 408 * be reclaimed as long as we have a ref from inode and we 409 * hold the ilock. 410 */ 411 dqp = udqhint->q_gdquot; 412 if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) { 413 ASSERT(*IO_idqpp == NULL); 414 415 *IO_idqpp = xfs_qm_dqhold(dqp); 416 xfs_dqunlock(udqhint); 417 return 0; 418 } 419 420 /* 421 * We can't hold a dquot lock when we call the dqget code. 422 * We'll deadlock in no time, because of (not conforming to) 423 * lock ordering - the inodelock comes before any dquot lock, 424 * and we may drop and reacquire the ilock in xfs_qm_dqget(). 425 */ 426 xfs_dqunlock(udqhint); 427 } 428 429 /* 430 * Find the dquot from somewhere. This bumps the 431 * reference count of dquot and returns it locked. 432 * This can return ENOENT if dquot didn't exist on 433 * disk and we didn't ask it to allocate; 434 * ESRCH if quotas got turned off suddenly. 435 */ 436 error = xfs_qm_dqget(ip->i_mount, ip, id, type, 437 doalloc | XFS_QMOPT_DOWARN, &dqp); 438 if (error) 439 return error; 440 441 trace_xfs_dqattach_get(dqp); 442 443 /* 444 * dqget may have dropped and re-acquired the ilock, but it guarantees 445 * that the dquot returned is the one that should go in the inode. 446 */ 447 *IO_idqpp = dqp; 448 xfs_dqunlock(dqp); 449 return 0; 450 } 451 452 453 /* 454 * Given a udquot and gdquot, attach a ptr to the group dquot in the 455 * udquot as a hint for future lookups. 456 */ 457 STATIC void 458 xfs_qm_dqattach_grouphint( 459 xfs_dquot_t *udq, 460 xfs_dquot_t *gdq) 461 { 462 xfs_dquot_t *tmp; 463 464 xfs_dqlock(udq); 465 466 tmp = udq->q_gdquot; 467 if (tmp) { 468 if (tmp == gdq) 469 goto done; 470 471 udq->q_gdquot = NULL; 472 xfs_qm_dqrele(tmp); 473 } 474 475 udq->q_gdquot = xfs_qm_dqhold(gdq); 476 done: 477 xfs_dqunlock(udq); 478 } 479 480 static bool 481 xfs_qm_need_dqattach( 482 struct xfs_inode *ip) 483 { 484 struct xfs_mount *mp = ip->i_mount; 485 486 if (!XFS_IS_QUOTA_RUNNING(mp)) 487 return false; 488 if (!XFS_IS_QUOTA_ON(mp)) 489 return false; 490 if (!XFS_NOT_DQATTACHED(mp, ip)) 491 return false; 492 if (ip->i_ino == mp->m_sb.sb_uquotino || 493 ip->i_ino == mp->m_sb.sb_gquotino) 494 return false; 495 return true; 496 } 497 498 /* 499 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON 500 * into account. 501 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. 502 * Inode may get unlocked and relocked in here, and the caller must deal with 503 * the consequences. 504 */ 505 int 506 xfs_qm_dqattach_locked( 507 xfs_inode_t *ip, 508 uint flags) 509 { 510 xfs_mount_t *mp = ip->i_mount; 511 uint nquotas = 0; 512 int error = 0; 513 514 if (!xfs_qm_need_dqattach(ip)) 515 return 0; 516 517 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 518 519 if (XFS_IS_UQUOTA_ON(mp)) { 520 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, 521 flags & XFS_QMOPT_DQALLOC, 522 NULL, &ip->i_udquot); 523 if (error) 524 goto done; 525 nquotas++; 526 } 527 528 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 529 if (XFS_IS_OQUOTA_ON(mp)) { 530 error = XFS_IS_GQUOTA_ON(mp) ? 531 xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, 532 flags & XFS_QMOPT_DQALLOC, 533 ip->i_udquot, &ip->i_gdquot) : 534 xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, 535 flags & XFS_QMOPT_DQALLOC, 536 ip->i_udquot, &ip->i_gdquot); 537 /* 538 * Don't worry about the udquot that we may have 539 * attached above. It'll get detached, if not already. 540 */ 541 if (error) 542 goto done; 543 nquotas++; 544 } 545 546 /* 547 * Attach this group quota to the user quota as a hint. 548 * This WON'T, in general, result in a thrash. 549 */ 550 if (nquotas == 2) { 551 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 552 ASSERT(ip->i_udquot); 553 ASSERT(ip->i_gdquot); 554 555 /* 556 * We do not have i_udquot locked at this point, but this check 557 * is OK since we don't depend on the i_gdquot to be accurate 558 * 100% all the time. It is just a hint, and this will 559 * succeed in general. 560 */ 561 if (ip->i_udquot->q_gdquot != ip->i_gdquot) 562 xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot); 563 } 564 565 done: 566 #ifdef DEBUG 567 if (!error) { 568 if (XFS_IS_UQUOTA_ON(mp)) 569 ASSERT(ip->i_udquot); 570 if (XFS_IS_OQUOTA_ON(mp)) 571 ASSERT(ip->i_gdquot); 572 } 573 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 574 #endif 575 return error; 576 } 577 578 int 579 xfs_qm_dqattach( 580 struct xfs_inode *ip, 581 uint flags) 582 { 583 int error; 584 585 if (!xfs_qm_need_dqattach(ip)) 586 return 0; 587 588 xfs_ilock(ip, XFS_ILOCK_EXCL); 589 error = xfs_qm_dqattach_locked(ip, flags); 590 xfs_iunlock(ip, XFS_ILOCK_EXCL); 591 592 return error; 593 } 594 595 /* 596 * Release dquots (and their references) if any. 597 * The inode should be locked EXCL except when this's called by 598 * xfs_ireclaim. 599 */ 600 void 601 xfs_qm_dqdetach( 602 xfs_inode_t *ip) 603 { 604 if (!(ip->i_udquot || ip->i_gdquot)) 605 return; 606 607 trace_xfs_dquot_dqdetach(ip); 608 609 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino); 610 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino); 611 if (ip->i_udquot) { 612 xfs_qm_dqrele(ip->i_udquot); 613 ip->i_udquot = NULL; 614 } 615 if (ip->i_gdquot) { 616 xfs_qm_dqrele(ip->i_gdquot); 617 ip->i_gdquot = NULL; 618 } 619 } 620 621 int 622 xfs_qm_calc_dquots_per_chunk( 623 struct xfs_mount *mp, 624 unsigned int nbblks) /* basic block units */ 625 { 626 unsigned int ndquots; 627 628 ASSERT(nbblks > 0); 629 ndquots = BBTOB(nbblks); 630 do_div(ndquots, sizeof(xfs_dqblk_t)); 631 632 return ndquots; 633 } 634 635 /* 636 * This initializes all the quota information that's kept in the 637 * mount structure 638 */ 639 STATIC int 640 xfs_qm_init_quotainfo( 641 xfs_mount_t *mp) 642 { 643 xfs_quotainfo_t *qinf; 644 int error; 645 xfs_dquot_t *dqp; 646 647 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 648 649 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); 650 651 /* 652 * See if quotainodes are setup, and if not, allocate them, 653 * and change the superblock accordingly. 654 */ 655 if ((error = xfs_qm_init_quotainos(mp))) { 656 kmem_free(qinf); 657 mp->m_quotainfo = NULL; 658 return error; 659 } 660 661 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); 662 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); 663 mutex_init(&qinf->qi_tree_lock); 664 665 INIT_LIST_HEAD(&qinf->qi_lru_list); 666 qinf->qi_lru_count = 0; 667 mutex_init(&qinf->qi_lru_lock); 668 669 /* mutex used to serialize quotaoffs */ 670 mutex_init(&qinf->qi_quotaofflock); 671 672 /* Precalc some constants */ 673 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); 674 qinf->qi_dqperchunk = xfs_qm_calc_dquots_per_chunk(mp, 675 qinf->qi_dqchunklen); 676 677 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); 678 679 /* 680 * We try to get the limits from the superuser's limits fields. 681 * This is quite hacky, but it is standard quota practice. 682 * 683 * We look at the USR dquot with id == 0 first, but if user quotas 684 * are not enabled we goto the GRP dquot with id == 0. 685 * We don't really care to keep separate default limits for user 686 * and group quotas, at least not at this point. 687 * 688 * Since we may not have done a quotacheck by this point, just read 689 * the dquot without attaching it to any hashtables or lists. 690 */ 691 error = xfs_qm_dqread(mp, 0, 692 XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : 693 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP : 694 XFS_DQ_PROJ), 695 XFS_QMOPT_DOWARN, &dqp); 696 if (!error) { 697 xfs_disk_dquot_t *ddqp = &dqp->q_core; 698 699 /* 700 * The warnings and timers set the grace period given to 701 * a user or group before he or she can not perform any 702 * more writing. If it is zero, a default is used. 703 */ 704 qinf->qi_btimelimit = ddqp->d_btimer ? 705 be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT; 706 qinf->qi_itimelimit = ddqp->d_itimer ? 707 be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT; 708 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ? 709 be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT; 710 qinf->qi_bwarnlimit = ddqp->d_bwarns ? 711 be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT; 712 qinf->qi_iwarnlimit = ddqp->d_iwarns ? 713 be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT; 714 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ? 715 be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT; 716 qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); 717 qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); 718 qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); 719 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); 720 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); 721 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); 722 723 xfs_qm_dqdestroy(dqp); 724 } else { 725 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; 726 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; 727 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; 728 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; 729 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; 730 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; 731 } 732 733 qinf->qi_shrinker.shrink = xfs_qm_shake; 734 qinf->qi_shrinker.seeks = DEFAULT_SEEKS; 735 register_shrinker(&qinf->qi_shrinker); 736 return 0; 737 } 738 739 740 /* 741 * Gets called when unmounting a filesystem or when all quotas get 742 * turned off. 743 * This purges the quota inodes, destroys locks and frees itself. 744 */ 745 void 746 xfs_qm_destroy_quotainfo( 747 xfs_mount_t *mp) 748 { 749 xfs_quotainfo_t *qi; 750 751 qi = mp->m_quotainfo; 752 ASSERT(qi != NULL); 753 754 unregister_shrinker(&qi->qi_shrinker); 755 756 if (qi->qi_uquotaip) { 757 IRELE(qi->qi_uquotaip); 758 qi->qi_uquotaip = NULL; /* paranoia */ 759 } 760 if (qi->qi_gquotaip) { 761 IRELE(qi->qi_gquotaip); 762 qi->qi_gquotaip = NULL; 763 } 764 mutex_destroy(&qi->qi_quotaofflock); 765 kmem_free(qi); 766 mp->m_quotainfo = NULL; 767 } 768 769 /* 770 * Create an inode and return with a reference already taken, but unlocked 771 * This is how we create quota inodes 772 */ 773 STATIC int 774 xfs_qm_qino_alloc( 775 xfs_mount_t *mp, 776 xfs_inode_t **ip, 777 __int64_t sbfields, 778 uint flags) 779 { 780 xfs_trans_t *tp; 781 int error; 782 int committed; 783 784 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); 785 if ((error = xfs_trans_reserve(tp, 786 XFS_QM_QINOCREATE_SPACE_RES(mp), 787 XFS_CREATE_LOG_RES(mp), 0, 788 XFS_TRANS_PERM_LOG_RES, 789 XFS_CREATE_LOG_COUNT))) { 790 xfs_trans_cancel(tp, 0); 791 return error; 792 } 793 794 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed); 795 if (error) { 796 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | 797 XFS_TRANS_ABORT); 798 return error; 799 } 800 801 /* 802 * Make the changes in the superblock, and log those too. 803 * sbfields arg may contain fields other than *QUOTINO; 804 * VERSIONNUM for example. 805 */ 806 spin_lock(&mp->m_sb_lock); 807 if (flags & XFS_QMOPT_SBVERSION) { 808 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); 809 ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 810 XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) == 811 (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 812 XFS_SB_GQUOTINO | XFS_SB_QFLAGS)); 813 814 xfs_sb_version_addquota(&mp->m_sb); 815 mp->m_sb.sb_uquotino = NULLFSINO; 816 mp->m_sb.sb_gquotino = NULLFSINO; 817 818 /* qflags will get updated _after_ quotacheck */ 819 mp->m_sb.sb_qflags = 0; 820 } 821 if (flags & XFS_QMOPT_UQUOTA) 822 mp->m_sb.sb_uquotino = (*ip)->i_ino; 823 else 824 mp->m_sb.sb_gquotino = (*ip)->i_ino; 825 spin_unlock(&mp->m_sb_lock); 826 xfs_mod_sb(tp, sbfields); 827 828 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { 829 xfs_alert(mp, "%s failed (error %d)!", __func__, error); 830 return error; 831 } 832 return 0; 833 } 834 835 836 STATIC void 837 xfs_qm_reset_dqcounts( 838 xfs_mount_t *mp, 839 xfs_buf_t *bp, 840 xfs_dqid_t id, 841 uint type) 842 { 843 struct xfs_dqblk *dqb; 844 int j; 845 846 trace_xfs_reset_dqcounts(bp, _RET_IP_); 847 848 /* 849 * Reset all counters and timers. They'll be 850 * started afresh by xfs_qm_quotacheck. 851 */ 852 #ifdef DEBUG 853 j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); 854 do_div(j, sizeof(xfs_dqblk_t)); 855 ASSERT(mp->m_quotainfo->qi_dqperchunk == j); 856 #endif 857 dqb = bp->b_addr; 858 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { 859 struct xfs_disk_dquot *ddq; 860 861 ddq = (struct xfs_disk_dquot *)&dqb[j]; 862 863 /* 864 * Do a sanity check, and if needed, repair the dqblk. Don't 865 * output any warnings because it's perfectly possible to 866 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck. 867 */ 868 (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, 869 "xfs_quotacheck"); 870 ddq->d_bcount = 0; 871 ddq->d_icount = 0; 872 ddq->d_rtbcount = 0; 873 ddq->d_btimer = 0; 874 ddq->d_itimer = 0; 875 ddq->d_rtbtimer = 0; 876 ddq->d_bwarns = 0; 877 ddq->d_iwarns = 0; 878 ddq->d_rtbwarns = 0; 879 880 if (xfs_sb_version_hascrc(&mp->m_sb)) { 881 xfs_update_cksum((char *)&dqb[j], 882 sizeof(struct xfs_dqblk), 883 XFS_DQUOT_CRC_OFF); 884 } 885 } 886 } 887 888 STATIC int 889 xfs_qm_dqiter_bufs( 890 struct xfs_mount *mp, 891 xfs_dqid_t firstid, 892 xfs_fsblock_t bno, 893 xfs_filblks_t blkcnt, 894 uint flags, 895 struct list_head *buffer_list) 896 { 897 struct xfs_buf *bp; 898 int error; 899 int type; 900 901 ASSERT(blkcnt > 0); 902 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : 903 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); 904 error = 0; 905 906 /* 907 * Blkcnt arg can be a very big number, and might even be 908 * larger than the log itself. So, we have to break it up into 909 * manageable-sized transactions. 910 * Note that we don't start a permanent transaction here; we might 911 * not be able to get a log reservation for the whole thing up front, 912 * and we don't really care to either, because we just discard 913 * everything if we were to crash in the middle of this loop. 914 */ 915 while (blkcnt--) { 916 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 917 XFS_FSB_TO_DADDR(mp, bno), 918 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 919 &xfs_dquot_buf_ops); 920 921 /* 922 * CRC and validation errors will return a EFSCORRUPTED here. If 923 * this occurs, re-read without CRC validation so that we can 924 * repair the damage via xfs_qm_reset_dqcounts(). This process 925 * will leave a trace in the log indicating corruption has 926 * been detected. 927 */ 928 if (error == EFSCORRUPTED) { 929 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 930 XFS_FSB_TO_DADDR(mp, bno), 931 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 932 NULL); 933 } 934 935 if (error) 936 break; 937 938 xfs_qm_reset_dqcounts(mp, bp, firstid, type); 939 xfs_buf_delwri_queue(bp, buffer_list); 940 xfs_buf_relse(bp); 941 942 /* goto the next block. */ 943 bno++; 944 firstid += mp->m_quotainfo->qi_dqperchunk; 945 } 946 947 return error; 948 } 949 950 /* 951 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a 952 * caller supplied function for every chunk of dquots that we find. 953 */ 954 STATIC int 955 xfs_qm_dqiterate( 956 struct xfs_mount *mp, 957 struct xfs_inode *qip, 958 uint flags, 959 struct list_head *buffer_list) 960 { 961 struct xfs_bmbt_irec *map; 962 int i, nmaps; /* number of map entries */ 963 int error; /* return value */ 964 xfs_fileoff_t lblkno; 965 xfs_filblks_t maxlblkcnt; 966 xfs_dqid_t firstid; 967 xfs_fsblock_t rablkno; 968 xfs_filblks_t rablkcnt; 969 970 error = 0; 971 /* 972 * This looks racy, but we can't keep an inode lock across a 973 * trans_reserve. But, this gets called during quotacheck, and that 974 * happens only at mount time which is single threaded. 975 */ 976 if (qip->i_d.di_nblocks == 0) 977 return 0; 978 979 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); 980 981 lblkno = 0; 982 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 983 do { 984 nmaps = XFS_DQITER_MAP_SIZE; 985 /* 986 * We aren't changing the inode itself. Just changing 987 * some of its data. No new blocks are added here, and 988 * the inode is never added to the transaction. 989 */ 990 xfs_ilock(qip, XFS_ILOCK_SHARED); 991 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, 992 map, &nmaps, 0); 993 xfs_iunlock(qip, XFS_ILOCK_SHARED); 994 if (error) 995 break; 996 997 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); 998 for (i = 0; i < nmaps; i++) { 999 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); 1000 ASSERT(map[i].br_blockcount); 1001 1002 1003 lblkno += map[i].br_blockcount; 1004 1005 if (map[i].br_startblock == HOLESTARTBLOCK) 1006 continue; 1007 1008 firstid = (xfs_dqid_t) map[i].br_startoff * 1009 mp->m_quotainfo->qi_dqperchunk; 1010 /* 1011 * Do a read-ahead on the next extent. 1012 */ 1013 if ((i+1 < nmaps) && 1014 (map[i+1].br_startblock != HOLESTARTBLOCK)) { 1015 rablkcnt = map[i+1].br_blockcount; 1016 rablkno = map[i+1].br_startblock; 1017 while (rablkcnt--) { 1018 xfs_buf_readahead(mp->m_ddev_targp, 1019 XFS_FSB_TO_DADDR(mp, rablkno), 1020 mp->m_quotainfo->qi_dqchunklen, 1021 NULL); 1022 rablkno++; 1023 } 1024 } 1025 /* 1026 * Iterate thru all the blks in the extent and 1027 * reset the counters of all the dquots inside them. 1028 */ 1029 error = xfs_qm_dqiter_bufs(mp, firstid, 1030 map[i].br_startblock, 1031 map[i].br_blockcount, 1032 flags, buffer_list); 1033 if (error) 1034 goto out; 1035 } 1036 } while (nmaps > 0); 1037 1038 out: 1039 kmem_free(map); 1040 return error; 1041 } 1042 1043 /* 1044 * Called by dqusage_adjust in doing a quotacheck. 1045 * 1046 * Given the inode, and a dquot id this updates both the incore dqout as well 1047 * as the buffer copy. This is so that once the quotacheck is done, we can 1048 * just log all the buffers, as opposed to logging numerous updates to 1049 * individual dquots. 1050 */ 1051 STATIC int 1052 xfs_qm_quotacheck_dqadjust( 1053 struct xfs_inode *ip, 1054 xfs_dqid_t id, 1055 uint type, 1056 xfs_qcnt_t nblks, 1057 xfs_qcnt_t rtblks) 1058 { 1059 struct xfs_mount *mp = ip->i_mount; 1060 struct xfs_dquot *dqp; 1061 int error; 1062 1063 error = xfs_qm_dqget(mp, ip, id, type, 1064 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp); 1065 if (error) { 1066 /* 1067 * Shouldn't be able to turn off quotas here. 1068 */ 1069 ASSERT(error != ESRCH); 1070 ASSERT(error != ENOENT); 1071 return error; 1072 } 1073 1074 trace_xfs_dqadjust(dqp); 1075 1076 /* 1077 * Adjust the inode count and the block count to reflect this inode's 1078 * resource usage. 1079 */ 1080 be64_add_cpu(&dqp->q_core.d_icount, 1); 1081 dqp->q_res_icount++; 1082 if (nblks) { 1083 be64_add_cpu(&dqp->q_core.d_bcount, nblks); 1084 dqp->q_res_bcount += nblks; 1085 } 1086 if (rtblks) { 1087 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); 1088 dqp->q_res_rtbcount += rtblks; 1089 } 1090 1091 /* 1092 * Set default limits, adjust timers (since we changed usages) 1093 * 1094 * There are no timers for the default values set in the root dquot. 1095 */ 1096 if (dqp->q_core.d_id) { 1097 xfs_qm_adjust_dqlimits(mp, dqp); 1098 xfs_qm_adjust_dqtimers(mp, &dqp->q_core); 1099 } 1100 1101 dqp->dq_flags |= XFS_DQ_DIRTY; 1102 xfs_qm_dqput(dqp); 1103 return 0; 1104 } 1105 1106 STATIC int 1107 xfs_qm_get_rtblks( 1108 xfs_inode_t *ip, 1109 xfs_qcnt_t *O_rtblks) 1110 { 1111 xfs_filblks_t rtblks; /* total rt blks */ 1112 xfs_extnum_t idx; /* extent record index */ 1113 xfs_ifork_t *ifp; /* inode fork pointer */ 1114 xfs_extnum_t nextents; /* number of extent entries */ 1115 int error; 1116 1117 ASSERT(XFS_IS_REALTIME_INODE(ip)); 1118 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1119 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1120 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) 1121 return error; 1122 } 1123 rtblks = 0; 1124 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 1125 for (idx = 0; idx < nextents; idx++) 1126 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx)); 1127 *O_rtblks = (xfs_qcnt_t)rtblks; 1128 return 0; 1129 } 1130 1131 /* 1132 * callback routine supplied to bulkstat(). Given an inumber, find its 1133 * dquots and update them to account for resources taken by that inode. 1134 */ 1135 /* ARGSUSED */ 1136 STATIC int 1137 xfs_qm_dqusage_adjust( 1138 xfs_mount_t *mp, /* mount point for filesystem */ 1139 xfs_ino_t ino, /* inode number to get data for */ 1140 void __user *buffer, /* not used */ 1141 int ubsize, /* not used */ 1142 int *ubused, /* not used */ 1143 int *res) /* result code value */ 1144 { 1145 xfs_inode_t *ip; 1146 xfs_qcnt_t nblks, rtblks = 0; 1147 int error; 1148 1149 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1150 1151 /* 1152 * rootino must have its resources accounted for, not so with the quota 1153 * inodes. 1154 */ 1155 if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { 1156 *res = BULKSTAT_RV_NOTHING; 1157 return XFS_ERROR(EINVAL); 1158 } 1159 1160 /* 1161 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget 1162 * interface expects the inode to be exclusively locked because that's 1163 * the case in all other instances. It's OK that we do this because 1164 * quotacheck is done only at mount time. 1165 */ 1166 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); 1167 if (error) { 1168 *res = BULKSTAT_RV_NOTHING; 1169 return error; 1170 } 1171 1172 ASSERT(ip->i_delayed_blks == 0); 1173 1174 if (XFS_IS_REALTIME_INODE(ip)) { 1175 /* 1176 * Walk thru the extent list and count the realtime blocks. 1177 */ 1178 error = xfs_qm_get_rtblks(ip, &rtblks); 1179 if (error) 1180 goto error0; 1181 } 1182 1183 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; 1184 1185 /* 1186 * Add the (disk blocks and inode) resources occupied by this 1187 * inode to its dquots. We do this adjustment in the incore dquot, 1188 * and also copy the changes to its buffer. 1189 * We don't care about putting these changes in a transaction 1190 * envelope because if we crash in the middle of a 'quotacheck' 1191 * we have to start from the beginning anyway. 1192 * Once we're done, we'll log all the dquot bufs. 1193 * 1194 * The *QUOTA_ON checks below may look pretty racy, but quotachecks 1195 * and quotaoffs don't race. (Quotachecks happen at mount time only). 1196 */ 1197 if (XFS_IS_UQUOTA_ON(mp)) { 1198 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, 1199 XFS_DQ_USER, nblks, rtblks); 1200 if (error) 1201 goto error0; 1202 } 1203 1204 if (XFS_IS_GQUOTA_ON(mp)) { 1205 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, 1206 XFS_DQ_GROUP, nblks, rtblks); 1207 if (error) 1208 goto error0; 1209 } 1210 1211 if (XFS_IS_PQUOTA_ON(mp)) { 1212 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), 1213 XFS_DQ_PROJ, nblks, rtblks); 1214 if (error) 1215 goto error0; 1216 } 1217 1218 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1219 IRELE(ip); 1220 *res = BULKSTAT_RV_DIDONE; 1221 return 0; 1222 1223 error0: 1224 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1225 IRELE(ip); 1226 *res = BULKSTAT_RV_GIVEUP; 1227 return error; 1228 } 1229 1230 STATIC int 1231 xfs_qm_flush_one( 1232 struct xfs_dquot *dqp, 1233 void *data) 1234 { 1235 struct list_head *buffer_list = data; 1236 struct xfs_buf *bp = NULL; 1237 int error = 0; 1238 1239 xfs_dqlock(dqp); 1240 if (dqp->dq_flags & XFS_DQ_FREEING) 1241 goto out_unlock; 1242 if (!XFS_DQ_IS_DIRTY(dqp)) 1243 goto out_unlock; 1244 1245 xfs_dqflock(dqp); 1246 error = xfs_qm_dqflush(dqp, &bp); 1247 if (error) 1248 goto out_unlock; 1249 1250 xfs_buf_delwri_queue(bp, buffer_list); 1251 xfs_buf_relse(bp); 1252 out_unlock: 1253 xfs_dqunlock(dqp); 1254 return error; 1255 } 1256 1257 /* 1258 * Walk thru all the filesystem inodes and construct a consistent view 1259 * of the disk quota world. If the quotacheck fails, disable quotas. 1260 */ 1261 int 1262 xfs_qm_quotacheck( 1263 xfs_mount_t *mp) 1264 { 1265 int done, count, error, error2; 1266 xfs_ino_t lastino; 1267 size_t structsz; 1268 xfs_inode_t *uip, *gip; 1269 uint flags; 1270 LIST_HEAD (buffer_list); 1271 1272 count = INT_MAX; 1273 structsz = 1; 1274 lastino = 0; 1275 flags = 0; 1276 1277 ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip); 1278 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1279 1280 xfs_notice(mp, "Quotacheck needed: Please wait."); 1281 1282 /* 1283 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset 1284 * their counters to zero. We need a clean slate. 1285 * We don't log our changes till later. 1286 */ 1287 uip = mp->m_quotainfo->qi_uquotaip; 1288 if (uip) { 1289 error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA, 1290 &buffer_list); 1291 if (error) 1292 goto error_return; 1293 flags |= XFS_UQUOTA_CHKD; 1294 } 1295 1296 gip = mp->m_quotainfo->qi_gquotaip; 1297 if (gip) { 1298 error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ? 1299 XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA, 1300 &buffer_list); 1301 if (error) 1302 goto error_return; 1303 flags |= XFS_OQUOTA_CHKD; 1304 } 1305 1306 do { 1307 /* 1308 * Iterate thru all the inodes in the file system, 1309 * adjusting the corresponding dquot counters in core. 1310 */ 1311 error = xfs_bulkstat(mp, &lastino, &count, 1312 xfs_qm_dqusage_adjust, 1313 structsz, NULL, &done); 1314 if (error) 1315 break; 1316 1317 } while (!done); 1318 1319 /* 1320 * We've made all the changes that we need to make incore. Flush them 1321 * down to disk buffers if everything was updated successfully. 1322 */ 1323 if (XFS_IS_UQUOTA_ON(mp)) { 1324 error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one, 1325 &buffer_list); 1326 } 1327 if (XFS_IS_GQUOTA_ON(mp)) { 1328 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one, 1329 &buffer_list); 1330 if (!error) 1331 error = error2; 1332 } 1333 if (XFS_IS_PQUOTA_ON(mp)) { 1334 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one, 1335 &buffer_list); 1336 if (!error) 1337 error = error2; 1338 } 1339 1340 error2 = xfs_buf_delwri_submit(&buffer_list); 1341 if (!error) 1342 error = error2; 1343 1344 /* 1345 * We can get this error if we couldn't do a dquot allocation inside 1346 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the 1347 * dirty dquots that might be cached, we just want to get rid of them 1348 * and turn quotaoff. The dquots won't be attached to any of the inodes 1349 * at this point (because we intentionally didn't in dqget_noattach). 1350 */ 1351 if (error) { 1352 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); 1353 goto error_return; 1354 } 1355 1356 /* 1357 * If one type of quotas is off, then it will lose its 1358 * quotachecked status, since we won't be doing accounting for 1359 * that type anymore. 1360 */ 1361 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD; 1362 mp->m_qflags |= flags; 1363 1364 error_return: 1365 while (!list_empty(&buffer_list)) { 1366 struct xfs_buf *bp = 1367 list_first_entry(&buffer_list, struct xfs_buf, b_list); 1368 list_del_init(&bp->b_list); 1369 xfs_buf_relse(bp); 1370 } 1371 1372 if (error) { 1373 xfs_warn(mp, 1374 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", 1375 error); 1376 /* 1377 * We must turn off quotas. 1378 */ 1379 ASSERT(mp->m_quotainfo != NULL); 1380 xfs_qm_destroy_quotainfo(mp); 1381 if (xfs_mount_reset_sbqflags(mp)) { 1382 xfs_warn(mp, 1383 "Quotacheck: Failed to reset quota flags."); 1384 } 1385 } else 1386 xfs_notice(mp, "Quotacheck: Done."); 1387 return (error); 1388 } 1389 1390 /* 1391 * This is called after the superblock has been read in and we're ready to 1392 * iget the quota inodes. 1393 */ 1394 STATIC int 1395 xfs_qm_init_quotainos( 1396 xfs_mount_t *mp) 1397 { 1398 xfs_inode_t *uip, *gip; 1399 int error; 1400 __int64_t sbflags; 1401 uint flags; 1402 1403 ASSERT(mp->m_quotainfo); 1404 uip = gip = NULL; 1405 sbflags = 0; 1406 flags = 0; 1407 1408 /* 1409 * Get the uquota and gquota inodes 1410 */ 1411 if (xfs_sb_version_hasquota(&mp->m_sb)) { 1412 if (XFS_IS_UQUOTA_ON(mp) && 1413 mp->m_sb.sb_uquotino != NULLFSINO) { 1414 ASSERT(mp->m_sb.sb_uquotino > 0); 1415 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 1416 0, 0, &uip))) 1417 return XFS_ERROR(error); 1418 } 1419 if (XFS_IS_OQUOTA_ON(mp) && 1420 mp->m_sb.sb_gquotino != NULLFSINO) { 1421 ASSERT(mp->m_sb.sb_gquotino > 0); 1422 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 1423 0, 0, &gip))) { 1424 if (uip) 1425 IRELE(uip); 1426 return XFS_ERROR(error); 1427 } 1428 } 1429 } else { 1430 flags |= XFS_QMOPT_SBVERSION; 1431 sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 1432 XFS_SB_GQUOTINO | XFS_SB_QFLAGS); 1433 } 1434 1435 /* 1436 * Create the two inodes, if they don't exist already. The changes 1437 * made above will get added to a transaction and logged in one of 1438 * the qino_alloc calls below. If the device is readonly, 1439 * temporarily switch to read-write to do this. 1440 */ 1441 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { 1442 if ((error = xfs_qm_qino_alloc(mp, &uip, 1443 sbflags | XFS_SB_UQUOTINO, 1444 flags | XFS_QMOPT_UQUOTA))) 1445 return XFS_ERROR(error); 1446 1447 flags &= ~XFS_QMOPT_SBVERSION; 1448 } 1449 if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) { 1450 flags |= (XFS_IS_GQUOTA_ON(mp) ? 1451 XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); 1452 error = xfs_qm_qino_alloc(mp, &gip, 1453 sbflags | XFS_SB_GQUOTINO, flags); 1454 if (error) { 1455 if (uip) 1456 IRELE(uip); 1457 1458 return XFS_ERROR(error); 1459 } 1460 } 1461 1462 mp->m_quotainfo->qi_uquotaip = uip; 1463 mp->m_quotainfo->qi_gquotaip = gip; 1464 1465 return 0; 1466 } 1467 1468 STATIC void 1469 xfs_qm_dqfree_one( 1470 struct xfs_dquot *dqp) 1471 { 1472 struct xfs_mount *mp = dqp->q_mount; 1473 struct xfs_quotainfo *qi = mp->m_quotainfo; 1474 1475 mutex_lock(&qi->qi_tree_lock); 1476 radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags), 1477 be32_to_cpu(dqp->q_core.d_id)); 1478 1479 qi->qi_dquots--; 1480 mutex_unlock(&qi->qi_tree_lock); 1481 1482 xfs_qm_dqdestroy(dqp); 1483 } 1484 1485 STATIC void 1486 xfs_qm_dqreclaim_one( 1487 struct xfs_dquot *dqp, 1488 struct list_head *buffer_list, 1489 struct list_head *dispose_list) 1490 { 1491 struct xfs_mount *mp = dqp->q_mount; 1492 struct xfs_quotainfo *qi = mp->m_quotainfo; 1493 int error; 1494 1495 if (!xfs_dqlock_nowait(dqp)) 1496 goto out_move_tail; 1497 1498 /* 1499 * This dquot has acquired a reference in the meantime remove it from 1500 * the freelist and try again. 1501 */ 1502 if (dqp->q_nrefs) { 1503 xfs_dqunlock(dqp); 1504 1505 trace_xfs_dqreclaim_want(dqp); 1506 XFS_STATS_INC(xs_qm_dqwants); 1507 1508 list_del_init(&dqp->q_lru); 1509 qi->qi_lru_count--; 1510 XFS_STATS_DEC(xs_qm_dquot_unused); 1511 return; 1512 } 1513 1514 /* 1515 * Try to grab the flush lock. If this dquot is in the process of 1516 * getting flushed to disk, we don't want to reclaim it. 1517 */ 1518 if (!xfs_dqflock_nowait(dqp)) 1519 goto out_unlock_move_tail; 1520 1521 if (XFS_DQ_IS_DIRTY(dqp)) { 1522 struct xfs_buf *bp = NULL; 1523 1524 trace_xfs_dqreclaim_dirty(dqp); 1525 1526 error = xfs_qm_dqflush(dqp, &bp); 1527 if (error) { 1528 xfs_warn(mp, "%s: dquot %p flush failed", 1529 __func__, dqp); 1530 goto out_unlock_move_tail; 1531 } 1532 1533 xfs_buf_delwri_queue(bp, buffer_list); 1534 xfs_buf_relse(bp); 1535 /* 1536 * Give the dquot another try on the freelist, as the 1537 * flushing will take some time. 1538 */ 1539 goto out_unlock_move_tail; 1540 } 1541 xfs_dqfunlock(dqp); 1542 1543 /* 1544 * Prevent lookups now that we are past the point of no return. 1545 */ 1546 dqp->dq_flags |= XFS_DQ_FREEING; 1547 xfs_dqunlock(dqp); 1548 1549 ASSERT(dqp->q_nrefs == 0); 1550 list_move_tail(&dqp->q_lru, dispose_list); 1551 qi->qi_lru_count--; 1552 XFS_STATS_DEC(xs_qm_dquot_unused); 1553 1554 trace_xfs_dqreclaim_done(dqp); 1555 XFS_STATS_INC(xs_qm_dqreclaims); 1556 return; 1557 1558 /* 1559 * Move the dquot to the tail of the list so that we don't spin on it. 1560 */ 1561 out_unlock_move_tail: 1562 xfs_dqunlock(dqp); 1563 out_move_tail: 1564 list_move_tail(&dqp->q_lru, &qi->qi_lru_list); 1565 trace_xfs_dqreclaim_busy(dqp); 1566 XFS_STATS_INC(xs_qm_dqreclaim_misses); 1567 } 1568 1569 STATIC int 1570 xfs_qm_shake( 1571 struct shrinker *shrink, 1572 struct shrink_control *sc) 1573 { 1574 struct xfs_quotainfo *qi = 1575 container_of(shrink, struct xfs_quotainfo, qi_shrinker); 1576 int nr_to_scan = sc->nr_to_scan; 1577 LIST_HEAD (buffer_list); 1578 LIST_HEAD (dispose_list); 1579 struct xfs_dquot *dqp; 1580 int error; 1581 1582 if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT)) 1583 return 0; 1584 if (!nr_to_scan) 1585 goto out; 1586 1587 mutex_lock(&qi->qi_lru_lock); 1588 while (!list_empty(&qi->qi_lru_list)) { 1589 if (nr_to_scan-- <= 0) 1590 break; 1591 dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot, 1592 q_lru); 1593 xfs_qm_dqreclaim_one(dqp, &buffer_list, &dispose_list); 1594 } 1595 mutex_unlock(&qi->qi_lru_lock); 1596 1597 error = xfs_buf_delwri_submit(&buffer_list); 1598 if (error) 1599 xfs_warn(NULL, "%s: dquot reclaim failed", __func__); 1600 1601 while (!list_empty(&dispose_list)) { 1602 dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru); 1603 list_del_init(&dqp->q_lru); 1604 xfs_qm_dqfree_one(dqp); 1605 } 1606 1607 out: 1608 return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure; 1609 } 1610 1611 /* 1612 * Start a transaction and write the incore superblock changes to 1613 * disk. flags parameter indicates which fields have changed. 1614 */ 1615 int 1616 xfs_qm_write_sb_changes( 1617 xfs_mount_t *mp, 1618 __int64_t flags) 1619 { 1620 xfs_trans_t *tp; 1621 int error; 1622 1623 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); 1624 error = xfs_trans_reserve(tp, 0, XFS_QM_SBCHANGE_LOG_RES(mp), 1625 0, 0, XFS_DEFAULT_LOG_COUNT); 1626 if (error) { 1627 xfs_trans_cancel(tp, 0); 1628 return error; 1629 } 1630 1631 xfs_mod_sb(tp, flags); 1632 error = xfs_trans_commit(tp, 0); 1633 1634 return error; 1635 } 1636 1637 1638 /* --------------- utility functions for vnodeops ---------------- */ 1639 1640 1641 /* 1642 * Given an inode, a uid, gid and prid make sure that we have 1643 * allocated relevant dquot(s) on disk, and that we won't exceed inode 1644 * quotas by creating this file. 1645 * This also attaches dquot(s) to the given inode after locking it, 1646 * and returns the dquots corresponding to the uid and/or gid. 1647 * 1648 * in : inode (unlocked) 1649 * out : udquot, gdquot with references taken and unlocked 1650 */ 1651 int 1652 xfs_qm_vop_dqalloc( 1653 struct xfs_inode *ip, 1654 uid_t uid, 1655 gid_t gid, 1656 prid_t prid, 1657 uint flags, 1658 struct xfs_dquot **O_udqpp, 1659 struct xfs_dquot **O_gdqpp) 1660 { 1661 struct xfs_mount *mp = ip->i_mount; 1662 struct xfs_dquot *uq, *gq; 1663 int error; 1664 uint lockflags; 1665 1666 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1667 return 0; 1668 1669 lockflags = XFS_ILOCK_EXCL; 1670 xfs_ilock(ip, lockflags); 1671 1672 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) 1673 gid = ip->i_d.di_gid; 1674 1675 /* 1676 * Attach the dquot(s) to this inode, doing a dquot allocation 1677 * if necessary. The dquot(s) will not be locked. 1678 */ 1679 if (XFS_NOT_DQATTACHED(mp, ip)) { 1680 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC); 1681 if (error) { 1682 xfs_iunlock(ip, lockflags); 1683 return error; 1684 } 1685 } 1686 1687 uq = gq = NULL; 1688 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { 1689 if (ip->i_d.di_uid != uid) { 1690 /* 1691 * What we need is the dquot that has this uid, and 1692 * if we send the inode to dqget, the uid of the inode 1693 * takes priority over what's sent in the uid argument. 1694 * We must unlock inode here before calling dqget if 1695 * we're not sending the inode, because otherwise 1696 * we'll deadlock by doing trans_reserve while 1697 * holding ilock. 1698 */ 1699 xfs_iunlock(ip, lockflags); 1700 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid, 1701 XFS_DQ_USER, 1702 XFS_QMOPT_DQALLOC | 1703 XFS_QMOPT_DOWARN, 1704 &uq))) { 1705 ASSERT(error != ENOENT); 1706 return error; 1707 } 1708 /* 1709 * Get the ilock in the right order. 1710 */ 1711 xfs_dqunlock(uq); 1712 lockflags = XFS_ILOCK_SHARED; 1713 xfs_ilock(ip, lockflags); 1714 } else { 1715 /* 1716 * Take an extra reference, because we'll return 1717 * this to caller 1718 */ 1719 ASSERT(ip->i_udquot); 1720 uq = xfs_qm_dqhold(ip->i_udquot); 1721 } 1722 } 1723 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { 1724 if (ip->i_d.di_gid != gid) { 1725 xfs_iunlock(ip, lockflags); 1726 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid, 1727 XFS_DQ_GROUP, 1728 XFS_QMOPT_DQALLOC | 1729 XFS_QMOPT_DOWARN, 1730 &gq))) { 1731 if (uq) 1732 xfs_qm_dqrele(uq); 1733 ASSERT(error != ENOENT); 1734 return error; 1735 } 1736 xfs_dqunlock(gq); 1737 lockflags = XFS_ILOCK_SHARED; 1738 xfs_ilock(ip, lockflags); 1739 } else { 1740 ASSERT(ip->i_gdquot); 1741 gq = xfs_qm_dqhold(ip->i_gdquot); 1742 } 1743 } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { 1744 if (xfs_get_projid(ip) != prid) { 1745 xfs_iunlock(ip, lockflags); 1746 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, 1747 XFS_DQ_PROJ, 1748 XFS_QMOPT_DQALLOC | 1749 XFS_QMOPT_DOWARN, 1750 &gq))) { 1751 if (uq) 1752 xfs_qm_dqrele(uq); 1753 ASSERT(error != ENOENT); 1754 return (error); 1755 } 1756 xfs_dqunlock(gq); 1757 lockflags = XFS_ILOCK_SHARED; 1758 xfs_ilock(ip, lockflags); 1759 } else { 1760 ASSERT(ip->i_gdquot); 1761 gq = xfs_qm_dqhold(ip->i_gdquot); 1762 } 1763 } 1764 if (uq) 1765 trace_xfs_dquot_dqalloc(ip); 1766 1767 xfs_iunlock(ip, lockflags); 1768 if (O_udqpp) 1769 *O_udqpp = uq; 1770 else if (uq) 1771 xfs_qm_dqrele(uq); 1772 if (O_gdqpp) 1773 *O_gdqpp = gq; 1774 else if (gq) 1775 xfs_qm_dqrele(gq); 1776 return 0; 1777 } 1778 1779 /* 1780 * Actually transfer ownership, and do dquot modifications. 1781 * These were already reserved. 1782 */ 1783 xfs_dquot_t * 1784 xfs_qm_vop_chown( 1785 xfs_trans_t *tp, 1786 xfs_inode_t *ip, 1787 xfs_dquot_t **IO_olddq, 1788 xfs_dquot_t *newdq) 1789 { 1790 xfs_dquot_t *prevdq; 1791 uint bfield = XFS_IS_REALTIME_INODE(ip) ? 1792 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; 1793 1794 1795 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1796 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); 1797 1798 /* old dquot */ 1799 prevdq = *IO_olddq; 1800 ASSERT(prevdq); 1801 ASSERT(prevdq != newdq); 1802 1803 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks)); 1804 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); 1805 1806 /* the sparkling new dquot */ 1807 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); 1808 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); 1809 1810 /* 1811 * Take an extra reference, because the inode is going to keep 1812 * this dquot pointer even after the trans_commit. 1813 */ 1814 *IO_olddq = xfs_qm_dqhold(newdq); 1815 1816 return prevdq; 1817 } 1818 1819 /* 1820 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID). 1821 */ 1822 int 1823 xfs_qm_vop_chown_reserve( 1824 xfs_trans_t *tp, 1825 xfs_inode_t *ip, 1826 xfs_dquot_t *udqp, 1827 xfs_dquot_t *gdqp, 1828 uint flags) 1829 { 1830 xfs_mount_t *mp = ip->i_mount; 1831 uint delblks, blkflags, prjflags = 0; 1832 xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; 1833 int error; 1834 1835 1836 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 1837 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1838 1839 delblks = ip->i_delayed_blks; 1840 delblksudq = delblksgdq = unresudq = unresgdq = NULL; 1841 blkflags = XFS_IS_REALTIME_INODE(ip) ? 1842 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; 1843 1844 if (XFS_IS_UQUOTA_ON(mp) && udqp && 1845 ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) { 1846 delblksudq = udqp; 1847 /* 1848 * If there are delayed allocation blocks, then we have to 1849 * unreserve those from the old dquot, and add them to the 1850 * new dquot. 1851 */ 1852 if (delblks) { 1853 ASSERT(ip->i_udquot); 1854 unresudq = ip->i_udquot; 1855 } 1856 } 1857 if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { 1858 if (XFS_IS_PQUOTA_ON(ip->i_mount) && 1859 xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id)) 1860 prjflags = XFS_QMOPT_ENOSPC; 1861 1862 if (prjflags || 1863 (XFS_IS_GQUOTA_ON(ip->i_mount) && 1864 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) { 1865 delblksgdq = gdqp; 1866 if (delblks) { 1867 ASSERT(ip->i_gdquot); 1868 unresgdq = ip->i_gdquot; 1869 } 1870 } 1871 } 1872 1873 if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, 1874 delblksudq, delblksgdq, ip->i_d.di_nblocks, 1, 1875 flags | blkflags | prjflags))) 1876 return (error); 1877 1878 /* 1879 * Do the delayed blks reservations/unreservations now. Since, these 1880 * are done without the help of a transaction, if a reservation fails 1881 * its previous reservations won't be automatically undone by trans 1882 * code. So, we have to do it manually here. 1883 */ 1884 if (delblks) { 1885 /* 1886 * Do the reservations first. Unreservation can't fail. 1887 */ 1888 ASSERT(delblksudq || delblksgdq); 1889 ASSERT(unresudq || unresgdq); 1890 if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 1891 delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0, 1892 flags | blkflags | prjflags))) 1893 return (error); 1894 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 1895 unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0, 1896 blkflags); 1897 } 1898 1899 return (0); 1900 } 1901 1902 int 1903 xfs_qm_vop_rename_dqattach( 1904 struct xfs_inode **i_tab) 1905 { 1906 struct xfs_mount *mp = i_tab[0]->i_mount; 1907 int i; 1908 1909 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1910 return 0; 1911 1912 for (i = 0; (i < 4 && i_tab[i]); i++) { 1913 struct xfs_inode *ip = i_tab[i]; 1914 int error; 1915 1916 /* 1917 * Watch out for duplicate entries in the table. 1918 */ 1919 if (i == 0 || ip != i_tab[i-1]) { 1920 if (XFS_NOT_DQATTACHED(mp, ip)) { 1921 error = xfs_qm_dqattach(ip, 0); 1922 if (error) 1923 return error; 1924 } 1925 } 1926 } 1927 return 0; 1928 } 1929 1930 void 1931 xfs_qm_vop_create_dqattach( 1932 struct xfs_trans *tp, 1933 struct xfs_inode *ip, 1934 struct xfs_dquot *udqp, 1935 struct xfs_dquot *gdqp) 1936 { 1937 struct xfs_mount *mp = tp->t_mountp; 1938 1939 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1940 return; 1941 1942 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1943 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1944 1945 if (udqp) { 1946 ASSERT(ip->i_udquot == NULL); 1947 ASSERT(XFS_IS_UQUOTA_ON(mp)); 1948 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); 1949 1950 ip->i_udquot = xfs_qm_dqhold(udqp); 1951 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 1952 } 1953 if (gdqp) { 1954 ASSERT(ip->i_gdquot == NULL); 1955 ASSERT(XFS_IS_OQUOTA_ON(mp)); 1956 ASSERT((XFS_IS_GQUOTA_ON(mp) ? 1957 ip->i_d.di_gid : xfs_get_projid(ip)) == 1958 be32_to_cpu(gdqp->q_core.d_id)); 1959 1960 ip->i_gdquot = xfs_qm_dqhold(gdqp); 1961 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 1962 } 1963 } 1964 1965