1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 7 8 #include "xfs.h" 9 #include "xfs_fs.h" 10 #include "xfs_shared.h" 11 #include "xfs_format.h" 12 #include "xfs_log_format.h" 13 #include "xfs_trans_resv.h" 14 #include "xfs_sb.h" 15 #include "xfs_mount.h" 16 #include "xfs_inode.h" 17 #include "xfs_trans.h" 18 #include "xfs_quota.h" 19 #include "xfs_qm.h" 20 #include "xfs_icache.h" 21 22 STATIC int 23 xfs_qm_log_quotaoff( 24 struct xfs_mount *mp, 25 struct xfs_qoff_logitem **qoffstartp, 26 uint flags) 27 { 28 struct xfs_trans *tp; 29 int error; 30 struct xfs_qoff_logitem *qoffi; 31 32 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp); 33 if (error) 34 goto out; 35 36 qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); 37 xfs_trans_log_quotaoff_item(tp, qoffi); 38 39 spin_lock(&mp->m_sb_lock); 40 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; 41 spin_unlock(&mp->m_sb_lock); 42 43 xfs_log_sb(tp); 44 45 /* 46 * We have to make sure that the transaction is secure on disk before we 47 * return and actually stop quota accounting. So, make it synchronous. 48 * We don't care about quotoff's performance. 49 */ 50 xfs_trans_set_sync(tp); 51 error = xfs_trans_commit(tp); 52 if (error) 53 goto out; 54 55 *qoffstartp = qoffi; 56 out: 57 return error; 58 } 59 60 STATIC int 61 xfs_qm_log_quotaoff_end( 62 struct xfs_mount *mp, 63 struct xfs_qoff_logitem **startqoff, 64 uint flags) 65 { 66 struct xfs_trans *tp; 67 int error; 68 struct xfs_qoff_logitem *qoffi; 69 70 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp); 71 if (error) 72 return error; 73 74 qoffi = xfs_trans_get_qoff_item(tp, *startqoff, 75 flags & XFS_ALL_QUOTA_ACCT); 76 xfs_trans_log_quotaoff_item(tp, qoffi); 77 *startqoff = NULL; 78 79 /* 80 * We have to make sure that the transaction is secure on disk before we 81 * return and actually stop quota accounting. So, make it synchronous. 82 * We don't care about quotoff's performance. 83 */ 84 xfs_trans_set_sync(tp); 85 return xfs_trans_commit(tp); 86 } 87 88 /* 89 * Turn off quota accounting and/or enforcement for all udquots and/or 90 * gdquots. Called only at unmount time. 91 * 92 * This assumes that there are no dquots of this file system cached 93 * incore, and modifies the ondisk dquot directly. Therefore, for example, 94 * it is an error to call this twice, without purging the cache. 95 */ 96 int 97 xfs_qm_scall_quotaoff( 98 xfs_mount_t *mp, 99 uint flags) 100 { 101 struct xfs_quotainfo *q = mp->m_quotainfo; 102 uint dqtype; 103 int error; 104 uint inactivate_flags; 105 struct xfs_qoff_logitem *qoffstart = NULL; 106 107 /* 108 * No file system can have quotas enabled on disk but not in core. 109 * Note that quota utilities (like quotaoff) _expect_ 110 * errno == -EEXIST here. 111 */ 112 if ((mp->m_qflags & flags) == 0) 113 return -EEXIST; 114 error = 0; 115 116 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); 117 118 /* 119 * We don't want to deal with two quotaoffs messing up each other, 120 * so we're going to serialize it. quotaoff isn't exactly a performance 121 * critical thing. 122 * If quotaoff, then we must be dealing with the root filesystem. 123 */ 124 ASSERT(q); 125 mutex_lock(&q->qi_quotaofflock); 126 127 /* 128 * If we're just turning off quota enforcement, change mp and go. 129 */ 130 if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { 131 mp->m_qflags &= ~(flags); 132 133 spin_lock(&mp->m_sb_lock); 134 mp->m_sb.sb_qflags = mp->m_qflags; 135 spin_unlock(&mp->m_sb_lock); 136 mutex_unlock(&q->qi_quotaofflock); 137 138 /* XXX what to do if error ? Revert back to old vals incore ? */ 139 return xfs_sync_sb(mp, false); 140 } 141 142 dqtype = 0; 143 inactivate_flags = 0; 144 /* 145 * If accounting is off, we must turn enforcement off, clear the 146 * quota 'CHKD' certificate to make it known that we have to 147 * do a quotacheck the next time this quota is turned on. 148 */ 149 if (flags & XFS_UQUOTA_ACCT) { 150 dqtype |= XFS_QMOPT_UQUOTA; 151 flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD); 152 inactivate_flags |= XFS_UQUOTA_ACTIVE; 153 } 154 if (flags & XFS_GQUOTA_ACCT) { 155 dqtype |= XFS_QMOPT_GQUOTA; 156 flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD); 157 inactivate_flags |= XFS_GQUOTA_ACTIVE; 158 } 159 if (flags & XFS_PQUOTA_ACCT) { 160 dqtype |= XFS_QMOPT_PQUOTA; 161 flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD); 162 inactivate_flags |= XFS_PQUOTA_ACTIVE; 163 } 164 165 /* 166 * Nothing to do? Don't complain. This happens when we're just 167 * turning off quota enforcement. 168 */ 169 if ((mp->m_qflags & flags) == 0) 170 goto out_unlock; 171 172 /* 173 * Write the LI_QUOTAOFF log record, and do SB changes atomically, 174 * and synchronously. If we fail to write, we should abort the 175 * operation as it cannot be recovered safely if we crash. 176 */ 177 error = xfs_qm_log_quotaoff(mp, &qoffstart, flags); 178 if (error) 179 goto out_unlock; 180 181 /* 182 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct 183 * to take care of the race between dqget and quotaoff. We don't take 184 * any special locks to reset these bits. All processes need to check 185 * these bits *after* taking inode lock(s) to see if the particular 186 * quota type is in the process of being turned off. If *ACTIVE, it is 187 * guaranteed that all dquot structures and all quotainode ptrs will all 188 * stay valid as long as that inode is kept locked. 189 * 190 * There is no turning back after this. 191 */ 192 mp->m_qflags &= ~inactivate_flags; 193 194 /* 195 * Give back all the dquot reference(s) held by inodes. 196 * Here we go thru every single incore inode in this file system, and 197 * do a dqrele on the i_udquot/i_gdquot that it may have. 198 * Essentially, as long as somebody has an inode locked, this guarantees 199 * that quotas will not be turned off. This is handy because in a 200 * transaction once we lock the inode(s) and check for quotaon, we can 201 * depend on the quota inodes (and other things) being valid as long as 202 * we keep the lock(s). 203 */ 204 xfs_qm_dqrele_all_inodes(mp, flags); 205 206 /* 207 * Next we make the changes in the quota flag in the mount struct. 208 * This isn't protected by a particular lock directly, because we 209 * don't want to take a mrlock every time we depend on quotas being on. 210 */ 211 mp->m_qflags &= ~flags; 212 213 /* 214 * Go through all the dquots of this file system and purge them, 215 * according to what was turned off. 216 */ 217 xfs_qm_dqpurge_all(mp, dqtype); 218 219 /* 220 * Transactions that had started before ACTIVE state bit was cleared 221 * could have logged many dquots, so they'd have higher LSNs than 222 * the first QUOTAOFF log record does. If we happen to crash when 223 * the tail of the log has gone past the QUOTAOFF record, but 224 * before the last dquot modification, those dquots __will__ 225 * recover, and that's not good. 226 * 227 * So, we have QUOTAOFF start and end logitems; the start 228 * logitem won't get overwritten until the end logitem appears... 229 */ 230 error = xfs_qm_log_quotaoff_end(mp, &qoffstart, flags); 231 if (error) { 232 /* We're screwed now. Shutdown is the only option. */ 233 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 234 goto out_unlock; 235 } 236 237 /* 238 * If all quotas are completely turned off, close shop. 239 */ 240 if (mp->m_qflags == 0) { 241 mutex_unlock(&q->qi_quotaofflock); 242 xfs_qm_destroy_quotainfo(mp); 243 return 0; 244 } 245 246 /* 247 * Release our quotainode references if we don't need them anymore. 248 */ 249 if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) { 250 xfs_irele(q->qi_uquotaip); 251 q->qi_uquotaip = NULL; 252 } 253 if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) { 254 xfs_irele(q->qi_gquotaip); 255 q->qi_gquotaip = NULL; 256 } 257 if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) { 258 xfs_irele(q->qi_pquotaip); 259 q->qi_pquotaip = NULL; 260 } 261 262 out_unlock: 263 if (error && qoffstart) 264 xfs_qm_qoff_logitem_relse(qoffstart); 265 mutex_unlock(&q->qi_quotaofflock); 266 return error; 267 } 268 269 STATIC int 270 xfs_qm_scall_trunc_qfile( 271 struct xfs_mount *mp, 272 xfs_ino_t ino) 273 { 274 struct xfs_inode *ip; 275 struct xfs_trans *tp; 276 int error; 277 278 if (ino == NULLFSINO) 279 return 0; 280 281 error = xfs_iget(mp, NULL, ino, 0, 0, &ip); 282 if (error) 283 return error; 284 285 xfs_ilock(ip, XFS_IOLOCK_EXCL); 286 287 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); 288 if (error) { 289 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 290 goto out_put; 291 } 292 293 xfs_ilock(ip, XFS_ILOCK_EXCL); 294 xfs_trans_ijoin(tp, ip, 0); 295 296 ip->i_d.di_size = 0; 297 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 298 299 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); 300 if (error) { 301 xfs_trans_cancel(tp); 302 goto out_unlock; 303 } 304 305 ASSERT(ip->i_df.if_nextents == 0); 306 307 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 308 error = xfs_trans_commit(tp); 309 310 out_unlock: 311 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 312 out_put: 313 xfs_irele(ip); 314 return error; 315 } 316 317 int 318 xfs_qm_scall_trunc_qfiles( 319 xfs_mount_t *mp, 320 uint flags) 321 { 322 int error = -EINVAL; 323 324 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 || 325 (flags & ~XFS_QMOPT_QUOTALL)) { 326 xfs_debug(mp, "%s: flags=%x m_qflags=%x", 327 __func__, flags, mp->m_qflags); 328 return -EINVAL; 329 } 330 331 if (flags & XFS_QMOPT_UQUOTA) { 332 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino); 333 if (error) 334 return error; 335 } 336 if (flags & XFS_QMOPT_GQUOTA) { 337 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); 338 if (error) 339 return error; 340 } 341 if (flags & XFS_QMOPT_PQUOTA) 342 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino); 343 344 return error; 345 } 346 347 /* 348 * Switch on (a given) quota enforcement for a filesystem. This takes 349 * effect immediately. 350 * (Switching on quota accounting must be done at mount time.) 351 */ 352 int 353 xfs_qm_scall_quotaon( 354 xfs_mount_t *mp, 355 uint flags) 356 { 357 int error; 358 uint qf; 359 360 /* 361 * Switching on quota accounting must be done at mount time, 362 * only consider quota enforcement stuff here. 363 */ 364 flags &= XFS_ALL_QUOTA_ENFD; 365 366 if (flags == 0) { 367 xfs_debug(mp, "%s: zero flags, m_qflags=%x", 368 __func__, mp->m_qflags); 369 return -EINVAL; 370 } 371 372 /* 373 * Can't enforce without accounting. We check the superblock 374 * qflags here instead of m_qflags because rootfs can have 375 * quota acct on ondisk without m_qflags' knowing. 376 */ 377 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 && 378 (flags & XFS_UQUOTA_ENFD)) || 379 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && 380 (flags & XFS_GQUOTA_ENFD)) || 381 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 && 382 (flags & XFS_PQUOTA_ENFD))) { 383 xfs_debug(mp, 384 "%s: Can't enforce without acct, flags=%x sbflags=%x", 385 __func__, flags, mp->m_sb.sb_qflags); 386 return -EINVAL; 387 } 388 /* 389 * If everything's up to-date incore, then don't waste time. 390 */ 391 if ((mp->m_qflags & flags) == flags) 392 return -EEXIST; 393 394 /* 395 * Change sb_qflags on disk but not incore mp->qflags 396 * if this is the root filesystem. 397 */ 398 spin_lock(&mp->m_sb_lock); 399 qf = mp->m_sb.sb_qflags; 400 mp->m_sb.sb_qflags = qf | flags; 401 spin_unlock(&mp->m_sb_lock); 402 403 /* 404 * There's nothing to change if it's the same. 405 */ 406 if ((qf & flags) == flags) 407 return -EEXIST; 408 409 error = xfs_sync_sb(mp, false); 410 if (error) 411 return error; 412 /* 413 * If we aren't trying to switch on quota enforcement, we are done. 414 */ 415 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) != 416 (mp->m_qflags & XFS_UQUOTA_ACCT)) || 417 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) != 418 (mp->m_qflags & XFS_PQUOTA_ACCT)) || 419 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) != 420 (mp->m_qflags & XFS_GQUOTA_ACCT))) 421 return 0; 422 423 if (! XFS_IS_QUOTA_RUNNING(mp)) 424 return -ESRCH; 425 426 /* 427 * Switch on quota enforcement in core. 428 */ 429 mutex_lock(&mp->m_quotainfo->qi_quotaofflock); 430 mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); 431 mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); 432 433 return 0; 434 } 435 436 #define XFS_QC_MASK \ 437 (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK) 438 439 /* 440 * Adjust limits of this quota, and the defaults if passed in. Returns true 441 * if the new limits made sense and were applied, false otherwise. 442 */ 443 static inline bool 444 xfs_setqlim_limits( 445 struct xfs_mount *mp, 446 struct xfs_dquot_res *res, 447 struct xfs_quota_limits *qlim, 448 xfs_qcnt_t hard, 449 xfs_qcnt_t soft, 450 const char *tag) 451 { 452 /* The hard limit can't be less than the soft limit. */ 453 if (hard != 0 && hard < soft) { 454 xfs_debug(mp, "%shard %lld < %ssoft %lld", tag, hard, tag, 455 soft); 456 return false; 457 } 458 459 res->hardlimit = hard; 460 res->softlimit = soft; 461 if (qlim) { 462 qlim->hard = hard; 463 qlim->soft = soft; 464 } 465 466 return true; 467 } 468 469 static inline void 470 xfs_setqlim_warns( 471 struct xfs_dquot_res *res, 472 struct xfs_quota_limits *qlim, 473 int warns) 474 { 475 res->warnings = warns; 476 if (qlim) 477 qlim->warn = warns; 478 } 479 480 static inline void 481 xfs_setqlim_timer( 482 struct xfs_dquot_res *res, 483 struct xfs_quota_limits *qlim, 484 s64 timer) 485 { 486 res->timer = timer; 487 if (qlim) 488 qlim->time = timer; 489 } 490 491 /* 492 * Adjust quota limits, and start/stop timers accordingly. 493 */ 494 int 495 xfs_qm_scall_setqlim( 496 struct xfs_mount *mp, 497 xfs_dqid_t id, 498 xfs_dqtype_t type, 499 struct qc_dqblk *newlim) 500 { 501 struct xfs_quotainfo *q = mp->m_quotainfo; 502 struct xfs_dquot *dqp; 503 struct xfs_trans *tp; 504 struct xfs_def_quota *defq; 505 struct xfs_dquot_res *res; 506 struct xfs_quota_limits *qlim; 507 int error; 508 xfs_qcnt_t hard, soft; 509 510 if (newlim->d_fieldmask & ~XFS_QC_MASK) 511 return -EINVAL; 512 if ((newlim->d_fieldmask & XFS_QC_MASK) == 0) 513 return 0; 514 515 /* 516 * We don't want to race with a quotaoff so take the quotaoff lock. 517 * We don't hold an inode lock, so there's nothing else to stop 518 * a quotaoff from happening. 519 */ 520 mutex_lock(&q->qi_quotaofflock); 521 522 /* 523 * Get the dquot (locked) before we start, as we need to do a 524 * transaction to allocate it if it doesn't exist. Once we have the 525 * dquot, unlock it so we can start the next transaction safely. We hold 526 * a reference to the dquot, so it's safe to do this unlock/lock without 527 * it being reclaimed in the mean time. 528 */ 529 error = xfs_qm_dqget(mp, id, type, true, &dqp); 530 if (error) { 531 ASSERT(error != -ENOENT); 532 goto out_unlock; 533 } 534 535 defq = xfs_get_defquota(q, xfs_dquot_type(dqp)); 536 xfs_dqunlock(dqp); 537 538 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp); 539 if (error) 540 goto out_rele; 541 542 xfs_dqlock(dqp); 543 xfs_trans_dqjoin(tp, dqp); 544 545 /* 546 * Update quota limits, warnings, and timers, and the defaults 547 * if we're touching id == 0. 548 * 549 * Make sure that hardlimits are >= soft limits before changing. 550 * 551 * Update warnings counter(s) if requested. 552 * 553 * Timelimits for the super user set the relative time the other users 554 * can be over quota for this file system. If it is zero a default is 555 * used. Ditto for the default soft and hard limit values (already 556 * done, above), and for warnings. 557 * 558 * For other IDs, userspace can bump out the grace period if over 559 * the soft limit. 560 */ 561 562 /* Blocks on the data device. */ 563 hard = (newlim->d_fieldmask & QC_SPC_HARD) ? 564 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) : 565 dqp->q_blk.hardlimit; 566 soft = (newlim->d_fieldmask & QC_SPC_SOFT) ? 567 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) : 568 dqp->q_blk.softlimit; 569 res = &dqp->q_blk; 570 qlim = id == 0 ? &defq->blk : NULL; 571 572 if (xfs_setqlim_limits(mp, res, qlim, hard, soft, "blk")) 573 xfs_dquot_set_prealloc_limits(dqp); 574 if (newlim->d_fieldmask & QC_SPC_WARNS) 575 xfs_setqlim_warns(res, qlim, newlim->d_spc_warns); 576 if (newlim->d_fieldmask & QC_SPC_TIMER) 577 xfs_setqlim_timer(res, qlim, newlim->d_spc_timer); 578 579 /* Blocks on the realtime device. */ 580 hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ? 581 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) : 582 dqp->q_rtb.hardlimit; 583 soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ? 584 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) : 585 dqp->q_rtb.softlimit; 586 res = &dqp->q_rtb; 587 qlim = id == 0 ? &defq->rtb : NULL; 588 589 xfs_setqlim_limits(mp, res, qlim, hard, soft, "rtb"); 590 if (newlim->d_fieldmask & QC_RT_SPC_WARNS) 591 xfs_setqlim_warns(res, qlim, newlim->d_rt_spc_warns); 592 if (newlim->d_fieldmask & QC_RT_SPC_TIMER) 593 xfs_setqlim_timer(res, qlim, newlim->d_rt_spc_timer); 594 595 /* Inodes */ 596 hard = (newlim->d_fieldmask & QC_INO_HARD) ? 597 (xfs_qcnt_t) newlim->d_ino_hardlimit : 598 dqp->q_ino.hardlimit; 599 soft = (newlim->d_fieldmask & QC_INO_SOFT) ? 600 (xfs_qcnt_t) newlim->d_ino_softlimit : 601 dqp->q_ino.softlimit; 602 res = &dqp->q_ino; 603 qlim = id == 0 ? &defq->ino : NULL; 604 605 xfs_setqlim_limits(mp, res, qlim, hard, soft, "ino"); 606 if (newlim->d_fieldmask & QC_INO_WARNS) 607 xfs_setqlim_warns(res, qlim, newlim->d_ino_warns); 608 if (newlim->d_fieldmask & QC_INO_TIMER) 609 xfs_setqlim_timer(res, qlim, newlim->d_ino_timer); 610 611 if (id != 0) { 612 /* 613 * If the user is now over quota, start the timelimit. 614 * The user will not be 'warned'. 615 * Note that we keep the timers ticking, whether enforcement 616 * is on or off. We don't really want to bother with iterating 617 * over all ondisk dquots and turning the timers on/off. 618 */ 619 xfs_qm_adjust_dqtimers(dqp); 620 } 621 dqp->q_flags |= XFS_DQFLAG_DIRTY; 622 xfs_trans_log_dquot(tp, dqp); 623 624 error = xfs_trans_commit(tp); 625 626 out_rele: 627 xfs_qm_dqrele(dqp); 628 out_unlock: 629 mutex_unlock(&q->qi_quotaofflock); 630 return error; 631 } 632 633 /* Fill out the quota context. */ 634 static void 635 xfs_qm_scall_getquota_fill_qc( 636 struct xfs_mount *mp, 637 xfs_dqtype_t type, 638 const struct xfs_dquot *dqp, 639 struct qc_dqblk *dst) 640 { 641 memset(dst, 0, sizeof(*dst)); 642 dst->d_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_blk.hardlimit); 643 dst->d_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_blk.softlimit); 644 dst->d_ino_hardlimit = dqp->q_ino.hardlimit; 645 dst->d_ino_softlimit = dqp->q_ino.softlimit; 646 dst->d_space = XFS_FSB_TO_B(mp, dqp->q_blk.reserved); 647 dst->d_ino_count = dqp->q_ino.reserved; 648 dst->d_spc_timer = dqp->q_blk.timer; 649 dst->d_ino_timer = dqp->q_ino.timer; 650 dst->d_ino_warns = dqp->q_ino.warnings; 651 dst->d_spc_warns = dqp->q_blk.warnings; 652 dst->d_rt_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.hardlimit); 653 dst->d_rt_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.softlimit); 654 dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_rtb.reserved); 655 dst->d_rt_spc_timer = dqp->q_rtb.timer; 656 dst->d_rt_spc_warns = dqp->q_rtb.warnings; 657 658 /* 659 * Internally, we don't reset all the timers when quota enforcement 660 * gets turned off. No need to confuse the user level code, 661 * so return zeroes in that case. 662 */ 663 if (!xfs_dquot_is_enforced(dqp)) { 664 dst->d_spc_timer = 0; 665 dst->d_ino_timer = 0; 666 dst->d_rt_spc_timer = 0; 667 } 668 669 #ifdef DEBUG 670 if (xfs_dquot_is_enforced(dqp) && dqp->q_id != 0) { 671 if ((dst->d_space > dst->d_spc_softlimit) && 672 (dst->d_spc_softlimit > 0)) { 673 ASSERT(dst->d_spc_timer != 0); 674 } 675 if ((dst->d_ino_count > dqp->q_ino.softlimit) && 676 (dqp->q_ino.softlimit > 0)) { 677 ASSERT(dst->d_ino_timer != 0); 678 } 679 } 680 #endif 681 } 682 683 /* Return the quota information for the dquot matching id. */ 684 int 685 xfs_qm_scall_getquota( 686 struct xfs_mount *mp, 687 xfs_dqid_t id, 688 xfs_dqtype_t type, 689 struct qc_dqblk *dst) 690 { 691 struct xfs_dquot *dqp; 692 int error; 693 694 /* 695 * Try to get the dquot. We don't want it allocated on disk, so don't 696 * set doalloc. If it doesn't exist, we'll get ENOENT back. 697 */ 698 error = xfs_qm_dqget(mp, id, type, false, &dqp); 699 if (error) 700 return error; 701 702 /* 703 * If everything's NULL, this dquot doesn't quite exist as far as 704 * our utility programs are concerned. 705 */ 706 if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { 707 error = -ENOENT; 708 goto out_put; 709 } 710 711 xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst); 712 713 out_put: 714 xfs_qm_dqput(dqp); 715 return error; 716 } 717 718 /* 719 * Return the quota information for the first initialized dquot whose id 720 * is at least as high as id. 721 */ 722 int 723 xfs_qm_scall_getquota_next( 724 struct xfs_mount *mp, 725 xfs_dqid_t *id, 726 xfs_dqtype_t type, 727 struct qc_dqblk *dst) 728 { 729 struct xfs_dquot *dqp; 730 int error; 731 732 error = xfs_qm_dqget_next(mp, *id, type, &dqp); 733 if (error) 734 return error; 735 736 /* Fill in the ID we actually read from disk */ 737 *id = dqp->q_id; 738 739 xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst); 740 741 xfs_qm_dqput(dqp); 742 return error; 743 } 744 745 STATIC int 746 xfs_dqrele_inode( 747 struct xfs_inode *ip, 748 void *args) 749 { 750 uint *flags = args; 751 752 /* skip quota inodes */ 753 if (ip == ip->i_mount->m_quotainfo->qi_uquotaip || 754 ip == ip->i_mount->m_quotainfo->qi_gquotaip || 755 ip == ip->i_mount->m_quotainfo->qi_pquotaip) { 756 ASSERT(ip->i_udquot == NULL); 757 ASSERT(ip->i_gdquot == NULL); 758 ASSERT(ip->i_pdquot == NULL); 759 return 0; 760 } 761 762 xfs_ilock(ip, XFS_ILOCK_EXCL); 763 if ((*flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { 764 xfs_qm_dqrele(ip->i_udquot); 765 ip->i_udquot = NULL; 766 } 767 if ((*flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) { 768 xfs_qm_dqrele(ip->i_gdquot); 769 ip->i_gdquot = NULL; 770 } 771 if ((*flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) { 772 xfs_qm_dqrele(ip->i_pdquot); 773 ip->i_pdquot = NULL; 774 } 775 xfs_iunlock(ip, XFS_ILOCK_EXCL); 776 return 0; 777 } 778 779 780 /* 781 * Go thru all the inodes in the file system, releasing their dquots. 782 * 783 * Note that the mount structure gets modified to indicate that quotas are off 784 * AFTER this, in the case of quotaoff. 785 */ 786 void 787 xfs_qm_dqrele_all_inodes( 788 struct xfs_mount *mp, 789 uint flags) 790 { 791 ASSERT(mp->m_quotainfo); 792 xfs_inode_walk(mp, XFS_INODE_WALK_INEW_WAIT, xfs_dqrele_inode, 793 &flags, XFS_ICI_NO_TAG); 794 } 795