1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_bit.h" 21 #include "xfs_log.h" 22 #include "xfs_inum.h" 23 #include "xfs_trans.h" 24 #include "xfs_sb.h" 25 #include "xfs_ag.h" 26 #include "xfs_alloc.h" 27 #include "xfs_quota.h" 28 #include "xfs_mount.h" 29 #include "xfs_bmap_btree.h" 30 #include "xfs_ialloc_btree.h" 31 #include "xfs_dinode.h" 32 #include "xfs_inode.h" 33 #include "xfs_ialloc.h" 34 #include "xfs_itable.h" 35 #include "xfs_rtalloc.h" 36 #include "xfs_error.h" 37 #include "xfs_bmap.h" 38 #include "xfs_attr.h" 39 #include "xfs_buf_item.h" 40 #include "xfs_trans_space.h" 41 #include "xfs_utils.h" 42 #include "xfs_qm.h" 43 #include "xfs_trace.h" 44 45 /* 46 * The global quota manager. There is only one of these for the entire 47 * system, _not_ one per file system. XQM keeps track of the overall 48 * quota functionality, including maintaining the freelist and hash 49 * tables of dquots. 50 */ 51 struct mutex xfs_Gqm_lock; 52 struct xfs_qm *xfs_Gqm; 53 54 kmem_zone_t *qm_dqzone; 55 kmem_zone_t *qm_dqtrxzone; 56 57 STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); 58 STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); 59 60 STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 61 STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 62 STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); 63 64 static struct shrinker xfs_qm_shaker = { 65 .shrink = xfs_qm_shake, 66 .seeks = DEFAULT_SEEKS, 67 }; 68 69 /* 70 * Initialize the XQM structure. 71 * Note that there is not one quota manager per file system. 72 */ 73 STATIC struct xfs_qm * 74 xfs_Gqm_init(void) 75 { 76 xfs_dqhash_t *udqhash, *gdqhash; 77 xfs_qm_t *xqm; 78 size_t hsize; 79 uint i; 80 81 /* 82 * Initialize the dquot hash tables. 83 */ 84 udqhash = kmem_zalloc_greedy(&hsize, 85 XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t), 86 XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t)); 87 if (!udqhash) 88 goto out; 89 90 gdqhash = kmem_zalloc_large(hsize); 91 if (!gdqhash) 92 goto out_free_udqhash; 93 94 hsize /= sizeof(xfs_dqhash_t); 95 96 xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); 97 xqm->qm_dqhashmask = hsize - 1; 98 xqm->qm_usr_dqhtable = udqhash; 99 xqm->qm_grp_dqhtable = gdqhash; 100 ASSERT(xqm->qm_usr_dqhtable != NULL); 101 ASSERT(xqm->qm_grp_dqhtable != NULL); 102 103 for (i = 0; i < hsize; i++) { 104 xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i); 105 xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i); 106 } 107 108 /* 109 * Freelist of all dquots of all file systems 110 */ 111 INIT_LIST_HEAD(&xqm->qm_dqfrlist); 112 xqm->qm_dqfrlist_cnt = 0; 113 mutex_init(&xqm->qm_dqfrlist_lock); 114 115 /* 116 * dquot zone. we register our own low-memory callback. 117 */ 118 if (!qm_dqzone) { 119 xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t), 120 "xfs_dquots"); 121 qm_dqzone = xqm->qm_dqzone; 122 } else 123 xqm->qm_dqzone = qm_dqzone; 124 125 register_shrinker(&xfs_qm_shaker); 126 127 /* 128 * The t_dqinfo portion of transactions. 129 */ 130 if (!qm_dqtrxzone) { 131 xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t), 132 "xfs_dqtrx"); 133 qm_dqtrxzone = xqm->qm_dqtrxzone; 134 } else 135 xqm->qm_dqtrxzone = qm_dqtrxzone; 136 137 atomic_set(&xqm->qm_totaldquots, 0); 138 xqm->qm_nrefs = 0; 139 return xqm; 140 141 out_free_udqhash: 142 kmem_free_large(udqhash); 143 out: 144 return NULL; 145 } 146 147 /* 148 * Destroy the global quota manager when its reference count goes to zero. 149 */ 150 STATIC void 151 xfs_qm_destroy( 152 struct xfs_qm *xqm) 153 { 154 int hsize, i; 155 156 ASSERT(xqm != NULL); 157 ASSERT(xqm->qm_nrefs == 0); 158 159 unregister_shrinker(&xfs_qm_shaker); 160 161 mutex_lock(&xqm->qm_dqfrlist_lock); 162 ASSERT(list_empty(&xqm->qm_dqfrlist)); 163 mutex_unlock(&xqm->qm_dqfrlist_lock); 164 165 hsize = xqm->qm_dqhashmask + 1; 166 for (i = 0; i < hsize; i++) { 167 xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); 168 xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i])); 169 } 170 kmem_free_large(xqm->qm_usr_dqhtable); 171 kmem_free_large(xqm->qm_grp_dqhtable); 172 xqm->qm_usr_dqhtable = NULL; 173 xqm->qm_grp_dqhtable = NULL; 174 xqm->qm_dqhashmask = 0; 175 176 kmem_free(xqm); 177 } 178 179 /* 180 * Called at mount time to let XQM know that another file system is 181 * starting quotas. This isn't crucial information as the individual mount 182 * structures are pretty independent, but it helps the XQM keep a 183 * global view of what's going on. 184 */ 185 /* ARGSUSED */ 186 STATIC int 187 xfs_qm_hold_quotafs_ref( 188 struct xfs_mount *mp) 189 { 190 /* 191 * Need to lock the xfs_Gqm structure for things like this. For example, 192 * the structure could disappear between the entry to this routine and 193 * a HOLD operation if not locked. 194 */ 195 mutex_lock(&xfs_Gqm_lock); 196 197 if (!xfs_Gqm) { 198 xfs_Gqm = xfs_Gqm_init(); 199 if (!xfs_Gqm) { 200 mutex_unlock(&xfs_Gqm_lock); 201 return ENOMEM; 202 } 203 } 204 205 /* 206 * We can keep a list of all filesystems with quotas mounted for 207 * debugging and statistical purposes, but ... 208 * Just take a reference and get out. 209 */ 210 xfs_Gqm->qm_nrefs++; 211 mutex_unlock(&xfs_Gqm_lock); 212 213 return 0; 214 } 215 216 217 /* 218 * Release the reference that a filesystem took at mount time, 219 * so that we know when we need to destroy the entire quota manager. 220 */ 221 /* ARGSUSED */ 222 STATIC void 223 xfs_qm_rele_quotafs_ref( 224 struct xfs_mount *mp) 225 { 226 ASSERT(xfs_Gqm); 227 ASSERT(xfs_Gqm->qm_nrefs > 0); 228 229 /* 230 * Destroy the entire XQM. If somebody mounts with quotaon, this'll 231 * be restarted. 232 */ 233 mutex_lock(&xfs_Gqm_lock); 234 if (--xfs_Gqm->qm_nrefs == 0) { 235 xfs_qm_destroy(xfs_Gqm); 236 xfs_Gqm = NULL; 237 } 238 mutex_unlock(&xfs_Gqm_lock); 239 } 240 241 /* 242 * Just destroy the quotainfo structure. 243 */ 244 void 245 xfs_qm_unmount( 246 struct xfs_mount *mp) 247 { 248 if (mp->m_quotainfo) { 249 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); 250 xfs_qm_destroy_quotainfo(mp); 251 } 252 } 253 254 255 /* 256 * This is called from xfs_mountfs to start quotas and initialize all 257 * necessary data structures like quotainfo. This is also responsible for 258 * running a quotacheck as necessary. We are guaranteed that the superblock 259 * is consistently read in at this point. 260 * 261 * If we fail here, the mount will continue with quota turned off. We don't 262 * need to inidicate success or failure at all. 263 */ 264 void 265 xfs_qm_mount_quotas( 266 xfs_mount_t *mp) 267 { 268 int error = 0; 269 uint sbf; 270 271 /* 272 * If quotas on realtime volumes is not supported, we disable 273 * quotas immediately. 274 */ 275 if (mp->m_sb.sb_rextents) { 276 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); 277 mp->m_qflags = 0; 278 goto write_changes; 279 } 280 281 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 282 283 /* 284 * Allocate the quotainfo structure inside the mount struct, and 285 * create quotainode(s), and change/rev superblock if necessary. 286 */ 287 error = xfs_qm_init_quotainfo(mp); 288 if (error) { 289 /* 290 * We must turn off quotas. 291 */ 292 ASSERT(mp->m_quotainfo == NULL); 293 mp->m_qflags = 0; 294 goto write_changes; 295 } 296 /* 297 * If any of the quotas are not consistent, do a quotacheck. 298 */ 299 if (XFS_QM_NEED_QUOTACHECK(mp)) { 300 error = xfs_qm_quotacheck(mp); 301 if (error) { 302 /* Quotacheck failed and disabled quotas. */ 303 return; 304 } 305 } 306 /* 307 * If one type of quotas is off, then it will lose its 308 * quotachecked status, since we won't be doing accounting for 309 * that type anymore. 310 */ 311 if (!XFS_IS_UQUOTA_ON(mp)) 312 mp->m_qflags &= ~XFS_UQUOTA_CHKD; 313 if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp))) 314 mp->m_qflags &= ~XFS_OQUOTA_CHKD; 315 316 write_changes: 317 /* 318 * We actually don't have to acquire the m_sb_lock at all. 319 * This can only be called from mount, and that's single threaded. XXX 320 */ 321 spin_lock(&mp->m_sb_lock); 322 sbf = mp->m_sb.sb_qflags; 323 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; 324 spin_unlock(&mp->m_sb_lock); 325 326 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 327 if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { 328 /* 329 * We could only have been turning quotas off. 330 * We aren't in very good shape actually because 331 * the incore structures are convinced that quotas are 332 * off, but the on disk superblock doesn't know that ! 333 */ 334 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); 335 xfs_alert(mp, "%s: Superblock update failed!", 336 __func__); 337 } 338 } 339 340 if (error) { 341 xfs_warn(mp, "Failed to initialize disk quotas."); 342 return; 343 } 344 } 345 346 /* 347 * Called from the vfsops layer. 348 */ 349 void 350 xfs_qm_unmount_quotas( 351 xfs_mount_t *mp) 352 { 353 /* 354 * Release the dquots that root inode, et al might be holding, 355 * before we flush quotas and blow away the quotainfo structure. 356 */ 357 ASSERT(mp->m_rootip); 358 xfs_qm_dqdetach(mp->m_rootip); 359 if (mp->m_rbmip) 360 xfs_qm_dqdetach(mp->m_rbmip); 361 if (mp->m_rsumip) 362 xfs_qm_dqdetach(mp->m_rsumip); 363 364 /* 365 * Release the quota inodes. 366 */ 367 if (mp->m_quotainfo) { 368 if (mp->m_quotainfo->qi_uquotaip) { 369 IRELE(mp->m_quotainfo->qi_uquotaip); 370 mp->m_quotainfo->qi_uquotaip = NULL; 371 } 372 if (mp->m_quotainfo->qi_gquotaip) { 373 IRELE(mp->m_quotainfo->qi_gquotaip); 374 mp->m_quotainfo->qi_gquotaip = NULL; 375 } 376 } 377 } 378 379 /* 380 * Flush all dquots of the given file system to disk. The dquots are 381 * _not_ purged from memory here, just their data written to disk. 382 */ 383 STATIC int 384 xfs_qm_dqflush_all( 385 struct xfs_mount *mp) 386 { 387 struct xfs_quotainfo *q = mp->m_quotainfo; 388 int recl; 389 struct xfs_dquot *dqp; 390 int error; 391 392 if (!q) 393 return 0; 394 again: 395 mutex_lock(&q->qi_dqlist_lock); 396 list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { 397 xfs_dqlock(dqp); 398 if ((dqp->dq_flags & XFS_DQ_FREEING) || 399 !XFS_DQ_IS_DIRTY(dqp)) { 400 xfs_dqunlock(dqp); 401 continue; 402 } 403 404 /* XXX a sentinel would be better */ 405 recl = q->qi_dqreclaims; 406 if (!xfs_dqflock_nowait(dqp)) { 407 /* 408 * If we can't grab the flush lock then check 409 * to see if the dquot has been flushed delayed 410 * write. If so, grab its buffer and send it 411 * out immediately. We'll be able to acquire 412 * the flush lock when the I/O completes. 413 */ 414 xfs_dqflock_pushbuf_wait(dqp); 415 } 416 /* 417 * Let go of the mplist lock. We don't want to hold it 418 * across a disk write. 419 */ 420 mutex_unlock(&q->qi_dqlist_lock); 421 error = xfs_qm_dqflush(dqp, 0); 422 xfs_dqunlock(dqp); 423 if (error) 424 return error; 425 426 mutex_lock(&q->qi_dqlist_lock); 427 if (recl != q->qi_dqreclaims) { 428 mutex_unlock(&q->qi_dqlist_lock); 429 /* XXX restart limit */ 430 goto again; 431 } 432 } 433 434 mutex_unlock(&q->qi_dqlist_lock); 435 /* return ! busy */ 436 return 0; 437 } 438 439 /* 440 * Release the group dquot pointers the user dquots may be 441 * carrying around as a hint. mplist is locked on entry and exit. 442 */ 443 STATIC void 444 xfs_qm_detach_gdquots( 445 struct xfs_mount *mp) 446 { 447 struct xfs_quotainfo *q = mp->m_quotainfo; 448 struct xfs_dquot *dqp, *gdqp; 449 450 again: 451 ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); 452 list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { 453 xfs_dqlock(dqp); 454 if (dqp->dq_flags & XFS_DQ_FREEING) { 455 xfs_dqunlock(dqp); 456 mutex_unlock(&q->qi_dqlist_lock); 457 delay(1); 458 mutex_lock(&q->qi_dqlist_lock); 459 goto again; 460 } 461 462 gdqp = dqp->q_gdquot; 463 if (gdqp) 464 dqp->q_gdquot = NULL; 465 xfs_dqunlock(dqp); 466 467 if (gdqp) 468 xfs_qm_dqrele(gdqp); 469 } 470 } 471 472 /* 473 * Go through all the incore dquots of this file system and take them 474 * off the mplist and hashlist, if the dquot type matches the dqtype 475 * parameter. This is used when turning off quota accounting for 476 * users and/or groups, as well as when the filesystem is unmounting. 477 */ 478 STATIC int 479 xfs_qm_dqpurge_int( 480 struct xfs_mount *mp, 481 uint flags) 482 { 483 struct xfs_quotainfo *q = mp->m_quotainfo; 484 struct xfs_dquot *dqp, *n; 485 uint dqtype; 486 int nmisses = 0; 487 LIST_HEAD (dispose_list); 488 489 if (!q) 490 return 0; 491 492 dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0; 493 dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0; 494 dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0; 495 496 mutex_lock(&q->qi_dqlist_lock); 497 498 /* 499 * In the first pass through all incore dquots of this filesystem, 500 * we release the group dquot pointers the user dquots may be 501 * carrying around as a hint. We need to do this irrespective of 502 * what's being turned off. 503 */ 504 xfs_qm_detach_gdquots(mp); 505 506 /* 507 * Try to get rid of all of the unwanted dquots. 508 */ 509 list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) { 510 xfs_dqlock(dqp); 511 if ((dqp->dq_flags & dqtype) != 0 && 512 !(dqp->dq_flags & XFS_DQ_FREEING)) { 513 if (dqp->q_nrefs == 0) { 514 dqp->dq_flags |= XFS_DQ_FREEING; 515 list_move_tail(&dqp->q_mplist, &dispose_list); 516 } else 517 nmisses++; 518 } 519 xfs_dqunlock(dqp); 520 } 521 mutex_unlock(&q->qi_dqlist_lock); 522 523 list_for_each_entry_safe(dqp, n, &dispose_list, q_mplist) 524 xfs_qm_dqpurge(dqp); 525 526 return nmisses; 527 } 528 529 int 530 xfs_qm_dqpurge_all( 531 xfs_mount_t *mp, 532 uint flags) 533 { 534 int ndquots; 535 536 /* 537 * Purge the dquot cache. 538 * None of the dquots should really be busy at this point. 539 */ 540 if (mp->m_quotainfo) { 541 while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) { 542 delay(ndquots * 10); 543 } 544 } 545 return 0; 546 } 547 548 STATIC int 549 xfs_qm_dqattach_one( 550 xfs_inode_t *ip, 551 xfs_dqid_t id, 552 uint type, 553 uint doalloc, 554 xfs_dquot_t *udqhint, /* hint */ 555 xfs_dquot_t **IO_idqpp) 556 { 557 xfs_dquot_t *dqp; 558 int error; 559 560 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 561 error = 0; 562 563 /* 564 * See if we already have it in the inode itself. IO_idqpp is 565 * &i_udquot or &i_gdquot. This made the code look weird, but 566 * made the logic a lot simpler. 567 */ 568 dqp = *IO_idqpp; 569 if (dqp) { 570 trace_xfs_dqattach_found(dqp); 571 return 0; 572 } 573 574 /* 575 * udqhint is the i_udquot field in inode, and is non-NULL only 576 * when the type arg is group/project. Its purpose is to save a 577 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside 578 * the user dquot. 579 */ 580 if (udqhint) { 581 ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ); 582 xfs_dqlock(udqhint); 583 584 /* 585 * No need to take dqlock to look at the id. 586 * 587 * The ID can't change until it gets reclaimed, and it won't 588 * be reclaimed as long as we have a ref from inode and we 589 * hold the ilock. 590 */ 591 dqp = udqhint->q_gdquot; 592 if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) { 593 ASSERT(*IO_idqpp == NULL); 594 595 *IO_idqpp = xfs_qm_dqhold(dqp); 596 xfs_dqunlock(udqhint); 597 return 0; 598 } 599 600 /* 601 * We can't hold a dquot lock when we call the dqget code. 602 * We'll deadlock in no time, because of (not conforming to) 603 * lock ordering - the inodelock comes before any dquot lock, 604 * and we may drop and reacquire the ilock in xfs_qm_dqget(). 605 */ 606 xfs_dqunlock(udqhint); 607 } 608 609 /* 610 * Find the dquot from somewhere. This bumps the 611 * reference count of dquot and returns it locked. 612 * This can return ENOENT if dquot didn't exist on 613 * disk and we didn't ask it to allocate; 614 * ESRCH if quotas got turned off suddenly. 615 */ 616 error = xfs_qm_dqget(ip->i_mount, ip, id, type, 617 doalloc | XFS_QMOPT_DOWARN, &dqp); 618 if (error) 619 return error; 620 621 trace_xfs_dqattach_get(dqp); 622 623 /* 624 * dqget may have dropped and re-acquired the ilock, but it guarantees 625 * that the dquot returned is the one that should go in the inode. 626 */ 627 *IO_idqpp = dqp; 628 xfs_dqunlock(dqp); 629 return 0; 630 } 631 632 633 /* 634 * Given a udquot and gdquot, attach a ptr to the group dquot in the 635 * udquot as a hint for future lookups. 636 */ 637 STATIC void 638 xfs_qm_dqattach_grouphint( 639 xfs_dquot_t *udq, 640 xfs_dquot_t *gdq) 641 { 642 xfs_dquot_t *tmp; 643 644 xfs_dqlock(udq); 645 646 tmp = udq->q_gdquot; 647 if (tmp) { 648 if (tmp == gdq) 649 goto done; 650 651 udq->q_gdquot = NULL; 652 xfs_qm_dqrele(tmp); 653 } 654 655 udq->q_gdquot = xfs_qm_dqhold(gdq); 656 done: 657 xfs_dqunlock(udq); 658 } 659 660 661 /* 662 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON 663 * into account. 664 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. 665 * Inode may get unlocked and relocked in here, and the caller must deal with 666 * the consequences. 667 */ 668 int 669 xfs_qm_dqattach_locked( 670 xfs_inode_t *ip, 671 uint flags) 672 { 673 xfs_mount_t *mp = ip->i_mount; 674 uint nquotas = 0; 675 int error = 0; 676 677 if (!XFS_IS_QUOTA_RUNNING(mp) || 678 !XFS_IS_QUOTA_ON(mp) || 679 !XFS_NOT_DQATTACHED(mp, ip) || 680 ip->i_ino == mp->m_sb.sb_uquotino || 681 ip->i_ino == mp->m_sb.sb_gquotino) 682 return 0; 683 684 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 685 686 if (XFS_IS_UQUOTA_ON(mp)) { 687 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, 688 flags & XFS_QMOPT_DQALLOC, 689 NULL, &ip->i_udquot); 690 if (error) 691 goto done; 692 nquotas++; 693 } 694 695 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 696 if (XFS_IS_OQUOTA_ON(mp)) { 697 error = XFS_IS_GQUOTA_ON(mp) ? 698 xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, 699 flags & XFS_QMOPT_DQALLOC, 700 ip->i_udquot, &ip->i_gdquot) : 701 xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, 702 flags & XFS_QMOPT_DQALLOC, 703 ip->i_udquot, &ip->i_gdquot); 704 /* 705 * Don't worry about the udquot that we may have 706 * attached above. It'll get detached, if not already. 707 */ 708 if (error) 709 goto done; 710 nquotas++; 711 } 712 713 /* 714 * Attach this group quota to the user quota as a hint. 715 * This WON'T, in general, result in a thrash. 716 */ 717 if (nquotas == 2) { 718 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 719 ASSERT(ip->i_udquot); 720 ASSERT(ip->i_gdquot); 721 722 /* 723 * We do not have i_udquot locked at this point, but this check 724 * is OK since we don't depend on the i_gdquot to be accurate 725 * 100% all the time. It is just a hint, and this will 726 * succeed in general. 727 */ 728 if (ip->i_udquot->q_gdquot != ip->i_gdquot) 729 xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot); 730 } 731 732 done: 733 #ifdef DEBUG 734 if (!error) { 735 if (XFS_IS_UQUOTA_ON(mp)) 736 ASSERT(ip->i_udquot); 737 if (XFS_IS_OQUOTA_ON(mp)) 738 ASSERT(ip->i_gdquot); 739 } 740 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 741 #endif 742 return error; 743 } 744 745 int 746 xfs_qm_dqattach( 747 struct xfs_inode *ip, 748 uint flags) 749 { 750 int error; 751 752 xfs_ilock(ip, XFS_ILOCK_EXCL); 753 error = xfs_qm_dqattach_locked(ip, flags); 754 xfs_iunlock(ip, XFS_ILOCK_EXCL); 755 756 return error; 757 } 758 759 /* 760 * Release dquots (and their references) if any. 761 * The inode should be locked EXCL except when this's called by 762 * xfs_ireclaim. 763 */ 764 void 765 xfs_qm_dqdetach( 766 xfs_inode_t *ip) 767 { 768 if (!(ip->i_udquot || ip->i_gdquot)) 769 return; 770 771 trace_xfs_dquot_dqdetach(ip); 772 773 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino); 774 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino); 775 if (ip->i_udquot) { 776 xfs_qm_dqrele(ip->i_udquot); 777 ip->i_udquot = NULL; 778 } 779 if (ip->i_gdquot) { 780 xfs_qm_dqrele(ip->i_gdquot); 781 ip->i_gdquot = NULL; 782 } 783 } 784 785 /* 786 * The hash chains and the mplist use the same xfs_dqhash structure as 787 * their list head, but we can take the mplist qh_lock and one of the 788 * hash qh_locks at the same time without any problem as they aren't 789 * related. 790 */ 791 static struct lock_class_key xfs_quota_mplist_class; 792 793 /* 794 * This initializes all the quota information that's kept in the 795 * mount structure 796 */ 797 STATIC int 798 xfs_qm_init_quotainfo( 799 xfs_mount_t *mp) 800 { 801 xfs_quotainfo_t *qinf; 802 int error; 803 xfs_dquot_t *dqp; 804 805 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 806 807 /* 808 * Tell XQM that we exist as soon as possible. 809 */ 810 if ((error = xfs_qm_hold_quotafs_ref(mp))) { 811 return error; 812 } 813 814 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); 815 816 /* 817 * See if quotainodes are setup, and if not, allocate them, 818 * and change the superblock accordingly. 819 */ 820 if ((error = xfs_qm_init_quotainos(mp))) { 821 kmem_free(qinf); 822 mp->m_quotainfo = NULL; 823 return error; 824 } 825 826 INIT_LIST_HEAD(&qinf->qi_dqlist); 827 mutex_init(&qinf->qi_dqlist_lock); 828 lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class); 829 830 qinf->qi_dqreclaims = 0; 831 832 /* mutex used to serialize quotaoffs */ 833 mutex_init(&qinf->qi_quotaofflock); 834 835 /* Precalc some constants */ 836 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); 837 ASSERT(qinf->qi_dqchunklen); 838 qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen); 839 do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t)); 840 841 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); 842 843 /* 844 * We try to get the limits from the superuser's limits fields. 845 * This is quite hacky, but it is standard quota practice. 846 * 847 * We look at the USR dquot with id == 0 first, but if user quotas 848 * are not enabled we goto the GRP dquot with id == 0. 849 * We don't really care to keep separate default limits for user 850 * and group quotas, at least not at this point. 851 * 852 * Since we may not have done a quotacheck by this point, just read 853 * the dquot without attaching it to any hashtables or lists. 854 */ 855 error = xfs_qm_dqread(mp, 0, 856 XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : 857 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP : 858 XFS_DQ_PROJ), 859 XFS_QMOPT_DOWARN, &dqp); 860 if (!error) { 861 xfs_disk_dquot_t *ddqp = &dqp->q_core; 862 863 /* 864 * The warnings and timers set the grace period given to 865 * a user or group before he or she can not perform any 866 * more writing. If it is zero, a default is used. 867 */ 868 qinf->qi_btimelimit = ddqp->d_btimer ? 869 be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT; 870 qinf->qi_itimelimit = ddqp->d_itimer ? 871 be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT; 872 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ? 873 be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT; 874 qinf->qi_bwarnlimit = ddqp->d_bwarns ? 875 be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT; 876 qinf->qi_iwarnlimit = ddqp->d_iwarns ? 877 be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT; 878 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ? 879 be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT; 880 qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); 881 qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); 882 qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); 883 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); 884 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); 885 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); 886 887 xfs_qm_dqdestroy(dqp); 888 } else { 889 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; 890 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; 891 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; 892 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; 893 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; 894 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; 895 } 896 897 return 0; 898 } 899 900 901 /* 902 * Gets called when unmounting a filesystem or when all quotas get 903 * turned off. 904 * This purges the quota inodes, destroys locks and frees itself. 905 */ 906 void 907 xfs_qm_destroy_quotainfo( 908 xfs_mount_t *mp) 909 { 910 xfs_quotainfo_t *qi; 911 912 qi = mp->m_quotainfo; 913 ASSERT(qi != NULL); 914 ASSERT(xfs_Gqm != NULL); 915 916 /* 917 * Release the reference that XQM kept, so that we know 918 * when the XQM structure should be freed. We cannot assume 919 * that xfs_Gqm is non-null after this point. 920 */ 921 xfs_qm_rele_quotafs_ref(mp); 922 923 ASSERT(list_empty(&qi->qi_dqlist)); 924 mutex_destroy(&qi->qi_dqlist_lock); 925 926 if (qi->qi_uquotaip) { 927 IRELE(qi->qi_uquotaip); 928 qi->qi_uquotaip = NULL; /* paranoia */ 929 } 930 if (qi->qi_gquotaip) { 931 IRELE(qi->qi_gquotaip); 932 qi->qi_gquotaip = NULL; 933 } 934 mutex_destroy(&qi->qi_quotaofflock); 935 kmem_free(qi); 936 mp->m_quotainfo = NULL; 937 } 938 939 940 941 /* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */ 942 943 /* ARGSUSED */ 944 STATIC void 945 xfs_qm_list_init( 946 xfs_dqlist_t *list, 947 char *str, 948 int n) 949 { 950 mutex_init(&list->qh_lock); 951 INIT_LIST_HEAD(&list->qh_list); 952 list->qh_version = 0; 953 list->qh_nelems = 0; 954 } 955 956 STATIC void 957 xfs_qm_list_destroy( 958 xfs_dqlist_t *list) 959 { 960 mutex_destroy(&(list->qh_lock)); 961 } 962 963 /* 964 * Create an inode and return with a reference already taken, but unlocked 965 * This is how we create quota inodes 966 */ 967 STATIC int 968 xfs_qm_qino_alloc( 969 xfs_mount_t *mp, 970 xfs_inode_t **ip, 971 __int64_t sbfields, 972 uint flags) 973 { 974 xfs_trans_t *tp; 975 int error; 976 int committed; 977 978 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); 979 if ((error = xfs_trans_reserve(tp, 980 XFS_QM_QINOCREATE_SPACE_RES(mp), 981 XFS_CREATE_LOG_RES(mp), 0, 982 XFS_TRANS_PERM_LOG_RES, 983 XFS_CREATE_LOG_COUNT))) { 984 xfs_trans_cancel(tp, 0); 985 return error; 986 } 987 988 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed); 989 if (error) { 990 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | 991 XFS_TRANS_ABORT); 992 return error; 993 } 994 995 /* 996 * Make the changes in the superblock, and log those too. 997 * sbfields arg may contain fields other than *QUOTINO; 998 * VERSIONNUM for example. 999 */ 1000 spin_lock(&mp->m_sb_lock); 1001 if (flags & XFS_QMOPT_SBVERSION) { 1002 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); 1003 ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 1004 XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) == 1005 (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 1006 XFS_SB_GQUOTINO | XFS_SB_QFLAGS)); 1007 1008 xfs_sb_version_addquota(&mp->m_sb); 1009 mp->m_sb.sb_uquotino = NULLFSINO; 1010 mp->m_sb.sb_gquotino = NULLFSINO; 1011 1012 /* qflags will get updated _after_ quotacheck */ 1013 mp->m_sb.sb_qflags = 0; 1014 } 1015 if (flags & XFS_QMOPT_UQUOTA) 1016 mp->m_sb.sb_uquotino = (*ip)->i_ino; 1017 else 1018 mp->m_sb.sb_gquotino = (*ip)->i_ino; 1019 spin_unlock(&mp->m_sb_lock); 1020 xfs_mod_sb(tp, sbfields); 1021 1022 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { 1023 xfs_alert(mp, "%s failed (error %d)!", __func__, error); 1024 return error; 1025 } 1026 return 0; 1027 } 1028 1029 1030 STATIC void 1031 xfs_qm_reset_dqcounts( 1032 xfs_mount_t *mp, 1033 xfs_buf_t *bp, 1034 xfs_dqid_t id, 1035 uint type) 1036 { 1037 xfs_disk_dquot_t *ddq; 1038 int j; 1039 1040 trace_xfs_reset_dqcounts(bp, _RET_IP_); 1041 1042 /* 1043 * Reset all counters and timers. They'll be 1044 * started afresh by xfs_qm_quotacheck. 1045 */ 1046 #ifdef DEBUG 1047 j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); 1048 do_div(j, sizeof(xfs_dqblk_t)); 1049 ASSERT(mp->m_quotainfo->qi_dqperchunk == j); 1050 #endif 1051 ddq = bp->b_addr; 1052 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { 1053 /* 1054 * Do a sanity check, and if needed, repair the dqblk. Don't 1055 * output any warnings because it's perfectly possible to 1056 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck. 1057 */ 1058 (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, 1059 "xfs_quotacheck"); 1060 ddq->d_bcount = 0; 1061 ddq->d_icount = 0; 1062 ddq->d_rtbcount = 0; 1063 ddq->d_btimer = 0; 1064 ddq->d_itimer = 0; 1065 ddq->d_rtbtimer = 0; 1066 ddq->d_bwarns = 0; 1067 ddq->d_iwarns = 0; 1068 ddq->d_rtbwarns = 0; 1069 ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); 1070 } 1071 } 1072 1073 STATIC int 1074 xfs_qm_dqiter_bufs( 1075 xfs_mount_t *mp, 1076 xfs_dqid_t firstid, 1077 xfs_fsblock_t bno, 1078 xfs_filblks_t blkcnt, 1079 uint flags) 1080 { 1081 xfs_buf_t *bp; 1082 int error; 1083 int type; 1084 1085 ASSERT(blkcnt > 0); 1086 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : 1087 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); 1088 error = 0; 1089 1090 /* 1091 * Blkcnt arg can be a very big number, and might even be 1092 * larger than the log itself. So, we have to break it up into 1093 * manageable-sized transactions. 1094 * Note that we don't start a permanent transaction here; we might 1095 * not be able to get a log reservation for the whole thing up front, 1096 * and we don't really care to either, because we just discard 1097 * everything if we were to crash in the middle of this loop. 1098 */ 1099 while (blkcnt--) { 1100 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 1101 XFS_FSB_TO_DADDR(mp, bno), 1102 mp->m_quotainfo->qi_dqchunklen, 0, &bp); 1103 if (error) 1104 break; 1105 1106 xfs_qm_reset_dqcounts(mp, bp, firstid, type); 1107 xfs_buf_delwri_queue(bp); 1108 xfs_buf_relse(bp); 1109 /* 1110 * goto the next block. 1111 */ 1112 bno++; 1113 firstid += mp->m_quotainfo->qi_dqperchunk; 1114 } 1115 return error; 1116 } 1117 1118 /* 1119 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a 1120 * caller supplied function for every chunk of dquots that we find. 1121 */ 1122 STATIC int 1123 xfs_qm_dqiterate( 1124 xfs_mount_t *mp, 1125 xfs_inode_t *qip, 1126 uint flags) 1127 { 1128 xfs_bmbt_irec_t *map; 1129 int i, nmaps; /* number of map entries */ 1130 int error; /* return value */ 1131 xfs_fileoff_t lblkno; 1132 xfs_filblks_t maxlblkcnt; 1133 xfs_dqid_t firstid; 1134 xfs_fsblock_t rablkno; 1135 xfs_filblks_t rablkcnt; 1136 1137 error = 0; 1138 /* 1139 * This looks racy, but we can't keep an inode lock across a 1140 * trans_reserve. But, this gets called during quotacheck, and that 1141 * happens only at mount time which is single threaded. 1142 */ 1143 if (qip->i_d.di_nblocks == 0) 1144 return 0; 1145 1146 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); 1147 1148 lblkno = 0; 1149 maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 1150 do { 1151 nmaps = XFS_DQITER_MAP_SIZE; 1152 /* 1153 * We aren't changing the inode itself. Just changing 1154 * some of its data. No new blocks are added here, and 1155 * the inode is never added to the transaction. 1156 */ 1157 xfs_ilock(qip, XFS_ILOCK_SHARED); 1158 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, 1159 map, &nmaps, 0); 1160 xfs_iunlock(qip, XFS_ILOCK_SHARED); 1161 if (error) 1162 break; 1163 1164 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); 1165 for (i = 0; i < nmaps; i++) { 1166 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); 1167 ASSERT(map[i].br_blockcount); 1168 1169 1170 lblkno += map[i].br_blockcount; 1171 1172 if (map[i].br_startblock == HOLESTARTBLOCK) 1173 continue; 1174 1175 firstid = (xfs_dqid_t) map[i].br_startoff * 1176 mp->m_quotainfo->qi_dqperchunk; 1177 /* 1178 * Do a read-ahead on the next extent. 1179 */ 1180 if ((i+1 < nmaps) && 1181 (map[i+1].br_startblock != HOLESTARTBLOCK)) { 1182 rablkcnt = map[i+1].br_blockcount; 1183 rablkno = map[i+1].br_startblock; 1184 while (rablkcnt--) { 1185 xfs_buf_readahead(mp->m_ddev_targp, 1186 XFS_FSB_TO_DADDR(mp, rablkno), 1187 mp->m_quotainfo->qi_dqchunklen); 1188 rablkno++; 1189 } 1190 } 1191 /* 1192 * Iterate thru all the blks in the extent and 1193 * reset the counters of all the dquots inside them. 1194 */ 1195 if ((error = xfs_qm_dqiter_bufs(mp, 1196 firstid, 1197 map[i].br_startblock, 1198 map[i].br_blockcount, 1199 flags))) { 1200 break; 1201 } 1202 } 1203 1204 if (error) 1205 break; 1206 } while (nmaps > 0); 1207 1208 kmem_free(map); 1209 1210 return error; 1211 } 1212 1213 /* 1214 * Called by dqusage_adjust in doing a quotacheck. 1215 * 1216 * Given the inode, and a dquot id this updates both the incore dqout as well 1217 * as the buffer copy. This is so that once the quotacheck is done, we can 1218 * just log all the buffers, as opposed to logging numerous updates to 1219 * individual dquots. 1220 */ 1221 STATIC int 1222 xfs_qm_quotacheck_dqadjust( 1223 struct xfs_inode *ip, 1224 xfs_dqid_t id, 1225 uint type, 1226 xfs_qcnt_t nblks, 1227 xfs_qcnt_t rtblks) 1228 { 1229 struct xfs_mount *mp = ip->i_mount; 1230 struct xfs_dquot *dqp; 1231 int error; 1232 1233 error = xfs_qm_dqget(mp, ip, id, type, 1234 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp); 1235 if (error) { 1236 /* 1237 * Shouldn't be able to turn off quotas here. 1238 */ 1239 ASSERT(error != ESRCH); 1240 ASSERT(error != ENOENT); 1241 return error; 1242 } 1243 1244 trace_xfs_dqadjust(dqp); 1245 1246 /* 1247 * Adjust the inode count and the block count to reflect this inode's 1248 * resource usage. 1249 */ 1250 be64_add_cpu(&dqp->q_core.d_icount, 1); 1251 dqp->q_res_icount++; 1252 if (nblks) { 1253 be64_add_cpu(&dqp->q_core.d_bcount, nblks); 1254 dqp->q_res_bcount += nblks; 1255 } 1256 if (rtblks) { 1257 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); 1258 dqp->q_res_rtbcount += rtblks; 1259 } 1260 1261 /* 1262 * Set default limits, adjust timers (since we changed usages) 1263 * 1264 * There are no timers for the default values set in the root dquot. 1265 */ 1266 if (dqp->q_core.d_id) { 1267 xfs_qm_adjust_dqlimits(mp, &dqp->q_core); 1268 xfs_qm_adjust_dqtimers(mp, &dqp->q_core); 1269 } 1270 1271 dqp->dq_flags |= XFS_DQ_DIRTY; 1272 xfs_qm_dqput(dqp); 1273 return 0; 1274 } 1275 1276 STATIC int 1277 xfs_qm_get_rtblks( 1278 xfs_inode_t *ip, 1279 xfs_qcnt_t *O_rtblks) 1280 { 1281 xfs_filblks_t rtblks; /* total rt blks */ 1282 xfs_extnum_t idx; /* extent record index */ 1283 xfs_ifork_t *ifp; /* inode fork pointer */ 1284 xfs_extnum_t nextents; /* number of extent entries */ 1285 int error; 1286 1287 ASSERT(XFS_IS_REALTIME_INODE(ip)); 1288 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1289 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1290 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) 1291 return error; 1292 } 1293 rtblks = 0; 1294 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 1295 for (idx = 0; idx < nextents; idx++) 1296 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx)); 1297 *O_rtblks = (xfs_qcnt_t)rtblks; 1298 return 0; 1299 } 1300 1301 /* 1302 * callback routine supplied to bulkstat(). Given an inumber, find its 1303 * dquots and update them to account for resources taken by that inode. 1304 */ 1305 /* ARGSUSED */ 1306 STATIC int 1307 xfs_qm_dqusage_adjust( 1308 xfs_mount_t *mp, /* mount point for filesystem */ 1309 xfs_ino_t ino, /* inode number to get data for */ 1310 void __user *buffer, /* not used */ 1311 int ubsize, /* not used */ 1312 int *ubused, /* not used */ 1313 int *res) /* result code value */ 1314 { 1315 xfs_inode_t *ip; 1316 xfs_qcnt_t nblks, rtblks = 0; 1317 int error; 1318 1319 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1320 1321 /* 1322 * rootino must have its resources accounted for, not so with the quota 1323 * inodes. 1324 */ 1325 if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { 1326 *res = BULKSTAT_RV_NOTHING; 1327 return XFS_ERROR(EINVAL); 1328 } 1329 1330 /* 1331 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget 1332 * interface expects the inode to be exclusively locked because that's 1333 * the case in all other instances. It's OK that we do this because 1334 * quotacheck is done only at mount time. 1335 */ 1336 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); 1337 if (error) { 1338 *res = BULKSTAT_RV_NOTHING; 1339 return error; 1340 } 1341 1342 ASSERT(ip->i_delayed_blks == 0); 1343 1344 if (XFS_IS_REALTIME_INODE(ip)) { 1345 /* 1346 * Walk thru the extent list and count the realtime blocks. 1347 */ 1348 error = xfs_qm_get_rtblks(ip, &rtblks); 1349 if (error) 1350 goto error0; 1351 } 1352 1353 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; 1354 1355 /* 1356 * Add the (disk blocks and inode) resources occupied by this 1357 * inode to its dquots. We do this adjustment in the incore dquot, 1358 * and also copy the changes to its buffer. 1359 * We don't care about putting these changes in a transaction 1360 * envelope because if we crash in the middle of a 'quotacheck' 1361 * we have to start from the beginning anyway. 1362 * Once we're done, we'll log all the dquot bufs. 1363 * 1364 * The *QUOTA_ON checks below may look pretty racy, but quotachecks 1365 * and quotaoffs don't race. (Quotachecks happen at mount time only). 1366 */ 1367 if (XFS_IS_UQUOTA_ON(mp)) { 1368 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, 1369 XFS_DQ_USER, nblks, rtblks); 1370 if (error) 1371 goto error0; 1372 } 1373 1374 if (XFS_IS_GQUOTA_ON(mp)) { 1375 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, 1376 XFS_DQ_GROUP, nblks, rtblks); 1377 if (error) 1378 goto error0; 1379 } 1380 1381 if (XFS_IS_PQUOTA_ON(mp)) { 1382 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), 1383 XFS_DQ_PROJ, nblks, rtblks); 1384 if (error) 1385 goto error0; 1386 } 1387 1388 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1389 IRELE(ip); 1390 *res = BULKSTAT_RV_DIDONE; 1391 return 0; 1392 1393 error0: 1394 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1395 IRELE(ip); 1396 *res = BULKSTAT_RV_GIVEUP; 1397 return error; 1398 } 1399 1400 /* 1401 * Walk thru all the filesystem inodes and construct a consistent view 1402 * of the disk quota world. If the quotacheck fails, disable quotas. 1403 */ 1404 int 1405 xfs_qm_quotacheck( 1406 xfs_mount_t *mp) 1407 { 1408 int done, count, error; 1409 xfs_ino_t lastino; 1410 size_t structsz; 1411 xfs_inode_t *uip, *gip; 1412 uint flags; 1413 1414 count = INT_MAX; 1415 structsz = 1; 1416 lastino = 0; 1417 flags = 0; 1418 1419 ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip); 1420 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1421 1422 /* 1423 * There should be no cached dquots. The (simplistic) quotacheck 1424 * algorithm doesn't like that. 1425 */ 1426 ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist)); 1427 1428 xfs_notice(mp, "Quotacheck needed: Please wait."); 1429 1430 /* 1431 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset 1432 * their counters to zero. We need a clean slate. 1433 * We don't log our changes till later. 1434 */ 1435 uip = mp->m_quotainfo->qi_uquotaip; 1436 if (uip) { 1437 error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA); 1438 if (error) 1439 goto error_return; 1440 flags |= XFS_UQUOTA_CHKD; 1441 } 1442 1443 gip = mp->m_quotainfo->qi_gquotaip; 1444 if (gip) { 1445 error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ? 1446 XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); 1447 if (error) 1448 goto error_return; 1449 flags |= XFS_OQUOTA_CHKD; 1450 } 1451 1452 do { 1453 /* 1454 * Iterate thru all the inodes in the file system, 1455 * adjusting the corresponding dquot counters in core. 1456 */ 1457 error = xfs_bulkstat(mp, &lastino, &count, 1458 xfs_qm_dqusage_adjust, 1459 structsz, NULL, &done); 1460 if (error) 1461 break; 1462 1463 } while (!done); 1464 1465 /* 1466 * We've made all the changes that we need to make incore. 1467 * Flush them down to disk buffers if everything was updated 1468 * successfully. 1469 */ 1470 if (!error) 1471 error = xfs_qm_dqflush_all(mp); 1472 1473 /* 1474 * We can get this error if we couldn't do a dquot allocation inside 1475 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the 1476 * dirty dquots that might be cached, we just want to get rid of them 1477 * and turn quotaoff. The dquots won't be attached to any of the inodes 1478 * at this point (because we intentionally didn't in dqget_noattach). 1479 */ 1480 if (error) { 1481 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); 1482 goto error_return; 1483 } 1484 1485 /* 1486 * We didn't log anything, because if we crashed, we'll have to 1487 * start the quotacheck from scratch anyway. However, we must make 1488 * sure that our dquot changes are secure before we put the 1489 * quotacheck'd stamp on the superblock. So, here we do a synchronous 1490 * flush. 1491 */ 1492 xfs_flush_buftarg(mp->m_ddev_targp, 1); 1493 1494 /* 1495 * If one type of quotas is off, then it will lose its 1496 * quotachecked status, since we won't be doing accounting for 1497 * that type anymore. 1498 */ 1499 mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD); 1500 mp->m_qflags |= flags; 1501 1502 error_return: 1503 if (error) { 1504 xfs_warn(mp, 1505 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", 1506 error); 1507 /* 1508 * We must turn off quotas. 1509 */ 1510 ASSERT(mp->m_quotainfo != NULL); 1511 ASSERT(xfs_Gqm != NULL); 1512 xfs_qm_destroy_quotainfo(mp); 1513 if (xfs_mount_reset_sbqflags(mp)) { 1514 xfs_warn(mp, 1515 "Quotacheck: Failed to reset quota flags."); 1516 } 1517 } else 1518 xfs_notice(mp, "Quotacheck: Done."); 1519 return (error); 1520 } 1521 1522 /* 1523 * This is called after the superblock has been read in and we're ready to 1524 * iget the quota inodes. 1525 */ 1526 STATIC int 1527 xfs_qm_init_quotainos( 1528 xfs_mount_t *mp) 1529 { 1530 xfs_inode_t *uip, *gip; 1531 int error; 1532 __int64_t sbflags; 1533 uint flags; 1534 1535 ASSERT(mp->m_quotainfo); 1536 uip = gip = NULL; 1537 sbflags = 0; 1538 flags = 0; 1539 1540 /* 1541 * Get the uquota and gquota inodes 1542 */ 1543 if (xfs_sb_version_hasquota(&mp->m_sb)) { 1544 if (XFS_IS_UQUOTA_ON(mp) && 1545 mp->m_sb.sb_uquotino != NULLFSINO) { 1546 ASSERT(mp->m_sb.sb_uquotino > 0); 1547 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 1548 0, 0, &uip))) 1549 return XFS_ERROR(error); 1550 } 1551 if (XFS_IS_OQUOTA_ON(mp) && 1552 mp->m_sb.sb_gquotino != NULLFSINO) { 1553 ASSERT(mp->m_sb.sb_gquotino > 0); 1554 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 1555 0, 0, &gip))) { 1556 if (uip) 1557 IRELE(uip); 1558 return XFS_ERROR(error); 1559 } 1560 } 1561 } else { 1562 flags |= XFS_QMOPT_SBVERSION; 1563 sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 1564 XFS_SB_GQUOTINO | XFS_SB_QFLAGS); 1565 } 1566 1567 /* 1568 * Create the two inodes, if they don't exist already. The changes 1569 * made above will get added to a transaction and logged in one of 1570 * the qino_alloc calls below. If the device is readonly, 1571 * temporarily switch to read-write to do this. 1572 */ 1573 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { 1574 if ((error = xfs_qm_qino_alloc(mp, &uip, 1575 sbflags | XFS_SB_UQUOTINO, 1576 flags | XFS_QMOPT_UQUOTA))) 1577 return XFS_ERROR(error); 1578 1579 flags &= ~XFS_QMOPT_SBVERSION; 1580 } 1581 if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) { 1582 flags |= (XFS_IS_GQUOTA_ON(mp) ? 1583 XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); 1584 error = xfs_qm_qino_alloc(mp, &gip, 1585 sbflags | XFS_SB_GQUOTINO, flags); 1586 if (error) { 1587 if (uip) 1588 IRELE(uip); 1589 1590 return XFS_ERROR(error); 1591 } 1592 } 1593 1594 mp->m_quotainfo->qi_uquotaip = uip; 1595 mp->m_quotainfo->qi_gquotaip = gip; 1596 1597 return 0; 1598 } 1599 1600 STATIC void 1601 xfs_qm_dqfree_one( 1602 struct xfs_dquot *dqp) 1603 { 1604 struct xfs_mount *mp = dqp->q_mount; 1605 struct xfs_quotainfo *qi = mp->m_quotainfo; 1606 1607 mutex_lock(&dqp->q_hash->qh_lock); 1608 list_del_init(&dqp->q_hashlist); 1609 dqp->q_hash->qh_version++; 1610 mutex_unlock(&dqp->q_hash->qh_lock); 1611 1612 mutex_lock(&qi->qi_dqlist_lock); 1613 list_del_init(&dqp->q_mplist); 1614 qi->qi_dquots--; 1615 qi->qi_dqreclaims++; 1616 mutex_unlock(&qi->qi_dqlist_lock); 1617 1618 xfs_qm_dqdestroy(dqp); 1619 } 1620 1621 STATIC void 1622 xfs_qm_dqreclaim_one( 1623 struct xfs_dquot *dqp, 1624 struct list_head *dispose_list) 1625 { 1626 struct xfs_mount *mp = dqp->q_mount; 1627 int error; 1628 1629 if (!xfs_dqlock_nowait(dqp)) 1630 goto out_busy; 1631 1632 /* 1633 * This dquot has acquired a reference in the meantime remove it from 1634 * the freelist and try again. 1635 */ 1636 if (dqp->q_nrefs) { 1637 xfs_dqunlock(dqp); 1638 1639 trace_xfs_dqreclaim_want(dqp); 1640 XQM_STATS_INC(xqmstats.xs_qm_dqwants); 1641 1642 list_del_init(&dqp->q_freelist); 1643 xfs_Gqm->qm_dqfrlist_cnt--; 1644 return; 1645 } 1646 1647 ASSERT(dqp->q_hash); 1648 ASSERT(!list_empty(&dqp->q_mplist)); 1649 1650 /* 1651 * Try to grab the flush lock. If this dquot is in the process of 1652 * getting flushed to disk, we don't want to reclaim it. 1653 */ 1654 if (!xfs_dqflock_nowait(dqp)) 1655 goto out_busy; 1656 1657 /* 1658 * We have the flush lock so we know that this is not in the 1659 * process of being flushed. So, if this is dirty, flush it 1660 * DELWRI so that we don't get a freelist infested with 1661 * dirty dquots. 1662 */ 1663 if (XFS_DQ_IS_DIRTY(dqp)) { 1664 trace_xfs_dqreclaim_dirty(dqp); 1665 1666 /* 1667 * We flush it delayed write, so don't bother releasing the 1668 * freelist lock. 1669 */ 1670 error = xfs_qm_dqflush(dqp, 0); 1671 if (error) { 1672 xfs_warn(mp, "%s: dquot %p flush failed", 1673 __func__, dqp); 1674 } 1675 1676 /* 1677 * Give the dquot another try on the freelist, as the 1678 * flushing will take some time. 1679 */ 1680 goto out_busy; 1681 } 1682 xfs_dqfunlock(dqp); 1683 1684 /* 1685 * Prevent lookups now that we are past the point of no return. 1686 */ 1687 dqp->dq_flags |= XFS_DQ_FREEING; 1688 xfs_dqunlock(dqp); 1689 1690 ASSERT(dqp->q_nrefs == 0); 1691 list_move_tail(&dqp->q_freelist, dispose_list); 1692 xfs_Gqm->qm_dqfrlist_cnt--; 1693 1694 trace_xfs_dqreclaim_done(dqp); 1695 XQM_STATS_INC(xqmstats.xs_qm_dqreclaims); 1696 return; 1697 1698 out_busy: 1699 xfs_dqunlock(dqp); 1700 1701 /* 1702 * Move the dquot to the tail of the list so that we don't spin on it. 1703 */ 1704 list_move_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist); 1705 1706 trace_xfs_dqreclaim_busy(dqp); 1707 XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); 1708 } 1709 1710 STATIC int 1711 xfs_qm_shake( 1712 struct shrinker *shrink, 1713 struct shrink_control *sc) 1714 { 1715 int nr_to_scan = sc->nr_to_scan; 1716 LIST_HEAD (dispose_list); 1717 struct xfs_dquot *dqp; 1718 1719 if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT)) 1720 return 0; 1721 if (!nr_to_scan) 1722 goto out; 1723 1724 mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); 1725 while (!list_empty(&xfs_Gqm->qm_dqfrlist)) { 1726 if (nr_to_scan-- <= 0) 1727 break; 1728 dqp = list_first_entry(&xfs_Gqm->qm_dqfrlist, struct xfs_dquot, 1729 q_freelist); 1730 xfs_qm_dqreclaim_one(dqp, &dispose_list); 1731 } 1732 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); 1733 1734 while (!list_empty(&dispose_list)) { 1735 dqp = list_first_entry(&dispose_list, struct xfs_dquot, 1736 q_freelist); 1737 list_del_init(&dqp->q_freelist); 1738 xfs_qm_dqfree_one(dqp); 1739 } 1740 out: 1741 return (xfs_Gqm->qm_dqfrlist_cnt / 100) * sysctl_vfs_cache_pressure; 1742 } 1743 1744 /* 1745 * Start a transaction and write the incore superblock changes to 1746 * disk. flags parameter indicates which fields have changed. 1747 */ 1748 int 1749 xfs_qm_write_sb_changes( 1750 xfs_mount_t *mp, 1751 __int64_t flags) 1752 { 1753 xfs_trans_t *tp; 1754 int error; 1755 1756 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); 1757 if ((error = xfs_trans_reserve(tp, 0, 1758 mp->m_sb.sb_sectsize + 128, 0, 1759 0, 1760 XFS_DEFAULT_LOG_COUNT))) { 1761 xfs_trans_cancel(tp, 0); 1762 return error; 1763 } 1764 1765 xfs_mod_sb(tp, flags); 1766 error = xfs_trans_commit(tp, 0); 1767 1768 return error; 1769 } 1770 1771 1772 /* --------------- utility functions for vnodeops ---------------- */ 1773 1774 1775 /* 1776 * Given an inode, a uid, gid and prid make sure that we have 1777 * allocated relevant dquot(s) on disk, and that we won't exceed inode 1778 * quotas by creating this file. 1779 * This also attaches dquot(s) to the given inode after locking it, 1780 * and returns the dquots corresponding to the uid and/or gid. 1781 * 1782 * in : inode (unlocked) 1783 * out : udquot, gdquot with references taken and unlocked 1784 */ 1785 int 1786 xfs_qm_vop_dqalloc( 1787 struct xfs_inode *ip, 1788 uid_t uid, 1789 gid_t gid, 1790 prid_t prid, 1791 uint flags, 1792 struct xfs_dquot **O_udqpp, 1793 struct xfs_dquot **O_gdqpp) 1794 { 1795 struct xfs_mount *mp = ip->i_mount; 1796 struct xfs_dquot *uq, *gq; 1797 int error; 1798 uint lockflags; 1799 1800 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1801 return 0; 1802 1803 lockflags = XFS_ILOCK_EXCL; 1804 xfs_ilock(ip, lockflags); 1805 1806 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) 1807 gid = ip->i_d.di_gid; 1808 1809 /* 1810 * Attach the dquot(s) to this inode, doing a dquot allocation 1811 * if necessary. The dquot(s) will not be locked. 1812 */ 1813 if (XFS_NOT_DQATTACHED(mp, ip)) { 1814 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC); 1815 if (error) { 1816 xfs_iunlock(ip, lockflags); 1817 return error; 1818 } 1819 } 1820 1821 uq = gq = NULL; 1822 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { 1823 if (ip->i_d.di_uid != uid) { 1824 /* 1825 * What we need is the dquot that has this uid, and 1826 * if we send the inode to dqget, the uid of the inode 1827 * takes priority over what's sent in the uid argument. 1828 * We must unlock inode here before calling dqget if 1829 * we're not sending the inode, because otherwise 1830 * we'll deadlock by doing trans_reserve while 1831 * holding ilock. 1832 */ 1833 xfs_iunlock(ip, lockflags); 1834 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid, 1835 XFS_DQ_USER, 1836 XFS_QMOPT_DQALLOC | 1837 XFS_QMOPT_DOWARN, 1838 &uq))) { 1839 ASSERT(error != ENOENT); 1840 return error; 1841 } 1842 /* 1843 * Get the ilock in the right order. 1844 */ 1845 xfs_dqunlock(uq); 1846 lockflags = XFS_ILOCK_SHARED; 1847 xfs_ilock(ip, lockflags); 1848 } else { 1849 /* 1850 * Take an extra reference, because we'll return 1851 * this to caller 1852 */ 1853 ASSERT(ip->i_udquot); 1854 uq = xfs_qm_dqhold(ip->i_udquot); 1855 } 1856 } 1857 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { 1858 if (ip->i_d.di_gid != gid) { 1859 xfs_iunlock(ip, lockflags); 1860 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid, 1861 XFS_DQ_GROUP, 1862 XFS_QMOPT_DQALLOC | 1863 XFS_QMOPT_DOWARN, 1864 &gq))) { 1865 if (uq) 1866 xfs_qm_dqrele(uq); 1867 ASSERT(error != ENOENT); 1868 return error; 1869 } 1870 xfs_dqunlock(gq); 1871 lockflags = XFS_ILOCK_SHARED; 1872 xfs_ilock(ip, lockflags); 1873 } else { 1874 ASSERT(ip->i_gdquot); 1875 gq = xfs_qm_dqhold(ip->i_gdquot); 1876 } 1877 } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { 1878 if (xfs_get_projid(ip) != prid) { 1879 xfs_iunlock(ip, lockflags); 1880 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, 1881 XFS_DQ_PROJ, 1882 XFS_QMOPT_DQALLOC | 1883 XFS_QMOPT_DOWARN, 1884 &gq))) { 1885 if (uq) 1886 xfs_qm_dqrele(uq); 1887 ASSERT(error != ENOENT); 1888 return (error); 1889 } 1890 xfs_dqunlock(gq); 1891 lockflags = XFS_ILOCK_SHARED; 1892 xfs_ilock(ip, lockflags); 1893 } else { 1894 ASSERT(ip->i_gdquot); 1895 gq = xfs_qm_dqhold(ip->i_gdquot); 1896 } 1897 } 1898 if (uq) 1899 trace_xfs_dquot_dqalloc(ip); 1900 1901 xfs_iunlock(ip, lockflags); 1902 if (O_udqpp) 1903 *O_udqpp = uq; 1904 else if (uq) 1905 xfs_qm_dqrele(uq); 1906 if (O_gdqpp) 1907 *O_gdqpp = gq; 1908 else if (gq) 1909 xfs_qm_dqrele(gq); 1910 return 0; 1911 } 1912 1913 /* 1914 * Actually transfer ownership, and do dquot modifications. 1915 * These were already reserved. 1916 */ 1917 xfs_dquot_t * 1918 xfs_qm_vop_chown( 1919 xfs_trans_t *tp, 1920 xfs_inode_t *ip, 1921 xfs_dquot_t **IO_olddq, 1922 xfs_dquot_t *newdq) 1923 { 1924 xfs_dquot_t *prevdq; 1925 uint bfield = XFS_IS_REALTIME_INODE(ip) ? 1926 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; 1927 1928 1929 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1930 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); 1931 1932 /* old dquot */ 1933 prevdq = *IO_olddq; 1934 ASSERT(prevdq); 1935 ASSERT(prevdq != newdq); 1936 1937 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks)); 1938 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); 1939 1940 /* the sparkling new dquot */ 1941 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); 1942 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); 1943 1944 /* 1945 * Take an extra reference, because the inode is going to keep 1946 * this dquot pointer even after the trans_commit. 1947 */ 1948 *IO_olddq = xfs_qm_dqhold(newdq); 1949 1950 return prevdq; 1951 } 1952 1953 /* 1954 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID). 1955 */ 1956 int 1957 xfs_qm_vop_chown_reserve( 1958 xfs_trans_t *tp, 1959 xfs_inode_t *ip, 1960 xfs_dquot_t *udqp, 1961 xfs_dquot_t *gdqp, 1962 uint flags) 1963 { 1964 xfs_mount_t *mp = ip->i_mount; 1965 uint delblks, blkflags, prjflags = 0; 1966 xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; 1967 int error; 1968 1969 1970 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 1971 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1972 1973 delblks = ip->i_delayed_blks; 1974 delblksudq = delblksgdq = unresudq = unresgdq = NULL; 1975 blkflags = XFS_IS_REALTIME_INODE(ip) ? 1976 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; 1977 1978 if (XFS_IS_UQUOTA_ON(mp) && udqp && 1979 ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) { 1980 delblksudq = udqp; 1981 /* 1982 * If there are delayed allocation blocks, then we have to 1983 * unreserve those from the old dquot, and add them to the 1984 * new dquot. 1985 */ 1986 if (delblks) { 1987 ASSERT(ip->i_udquot); 1988 unresudq = ip->i_udquot; 1989 } 1990 } 1991 if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { 1992 if (XFS_IS_PQUOTA_ON(ip->i_mount) && 1993 xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id)) 1994 prjflags = XFS_QMOPT_ENOSPC; 1995 1996 if (prjflags || 1997 (XFS_IS_GQUOTA_ON(ip->i_mount) && 1998 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) { 1999 delblksgdq = gdqp; 2000 if (delblks) { 2001 ASSERT(ip->i_gdquot); 2002 unresgdq = ip->i_gdquot; 2003 } 2004 } 2005 } 2006 2007 if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, 2008 delblksudq, delblksgdq, ip->i_d.di_nblocks, 1, 2009 flags | blkflags | prjflags))) 2010 return (error); 2011 2012 /* 2013 * Do the delayed blks reservations/unreservations now. Since, these 2014 * are done without the help of a transaction, if a reservation fails 2015 * its previous reservations won't be automatically undone by trans 2016 * code. So, we have to do it manually here. 2017 */ 2018 if (delblks) { 2019 /* 2020 * Do the reservations first. Unreservation can't fail. 2021 */ 2022 ASSERT(delblksudq || delblksgdq); 2023 ASSERT(unresudq || unresgdq); 2024 if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 2025 delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0, 2026 flags | blkflags | prjflags))) 2027 return (error); 2028 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 2029 unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0, 2030 blkflags); 2031 } 2032 2033 return (0); 2034 } 2035 2036 int 2037 xfs_qm_vop_rename_dqattach( 2038 struct xfs_inode **i_tab) 2039 { 2040 struct xfs_mount *mp = i_tab[0]->i_mount; 2041 int i; 2042 2043 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 2044 return 0; 2045 2046 for (i = 0; (i < 4 && i_tab[i]); i++) { 2047 struct xfs_inode *ip = i_tab[i]; 2048 int error; 2049 2050 /* 2051 * Watch out for duplicate entries in the table. 2052 */ 2053 if (i == 0 || ip != i_tab[i-1]) { 2054 if (XFS_NOT_DQATTACHED(mp, ip)) { 2055 error = xfs_qm_dqattach(ip, 0); 2056 if (error) 2057 return error; 2058 } 2059 } 2060 } 2061 return 0; 2062 } 2063 2064 void 2065 xfs_qm_vop_create_dqattach( 2066 struct xfs_trans *tp, 2067 struct xfs_inode *ip, 2068 struct xfs_dquot *udqp, 2069 struct xfs_dquot *gdqp) 2070 { 2071 struct xfs_mount *mp = tp->t_mountp; 2072 2073 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 2074 return; 2075 2076 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2077 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 2078 2079 if (udqp) { 2080 ASSERT(ip->i_udquot == NULL); 2081 ASSERT(XFS_IS_UQUOTA_ON(mp)); 2082 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); 2083 2084 ip->i_udquot = xfs_qm_dqhold(udqp); 2085 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 2086 } 2087 if (gdqp) { 2088 ASSERT(ip->i_gdquot == NULL); 2089 ASSERT(XFS_IS_OQUOTA_ON(mp)); 2090 ASSERT((XFS_IS_GQUOTA_ON(mp) ? 2091 ip->i_d.di_gid : xfs_get_projid(ip)) == 2092 be32_to_cpu(gdqp->q_core.d_id)); 2093 2094 ip->i_gdquot = xfs_qm_dqhold(gdqp); 2095 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 2096 } 2097 } 2098 2099