1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 7 8 #include "xfs.h" 9 #include "xfs_fs.h" 10 #include "xfs_shared.h" 11 #include "xfs_format.h" 12 #include "xfs_log_format.h" 13 #include "xfs_trans_resv.h" 14 #include "xfs_sb.h" 15 #include "xfs_mount.h" 16 #include "xfs_inode.h" 17 #include "xfs_trans.h" 18 #include "xfs_quota.h" 19 #include "xfs_qm.h" 20 #include "xfs_icache.h" 21 22 STATIC int 23 xfs_qm_log_quotaoff( 24 struct xfs_mount *mp, 25 struct xfs_qoff_logitem **qoffstartp, 26 uint flags) 27 { 28 struct xfs_trans *tp; 29 int error; 30 struct xfs_qoff_logitem *qoffi; 31 32 *qoffstartp = NULL; 33 34 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp); 35 if (error) 36 goto out; 37 38 qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); 39 xfs_trans_log_quotaoff_item(tp, qoffi); 40 41 spin_lock(&mp->m_sb_lock); 42 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; 43 spin_unlock(&mp->m_sb_lock); 44 45 xfs_log_sb(tp); 46 47 /* 48 * We have to make sure that the transaction is secure on disk before we 49 * return and actually stop quota accounting. So, make it synchronous. 50 * We don't care about quotoff's performance. 51 */ 52 xfs_trans_set_sync(tp); 53 error = xfs_trans_commit(tp); 54 if (error) 55 goto out; 56 57 *qoffstartp = qoffi; 58 out: 59 return error; 60 } 61 62 STATIC int 63 xfs_qm_log_quotaoff_end( 64 struct xfs_mount *mp, 65 struct xfs_qoff_logitem *startqoff, 66 uint flags) 67 { 68 struct xfs_trans *tp; 69 int error; 70 struct xfs_qoff_logitem *qoffi; 71 72 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp); 73 if (error) 74 return error; 75 76 qoffi = xfs_trans_get_qoff_item(tp, startqoff, 77 flags & XFS_ALL_QUOTA_ACCT); 78 xfs_trans_log_quotaoff_item(tp, qoffi); 79 80 /* 81 * We have to make sure that the transaction is secure on disk before we 82 * return and actually stop quota accounting. So, make it synchronous. 83 * We don't care about quotoff's performance. 84 */ 85 xfs_trans_set_sync(tp); 86 return xfs_trans_commit(tp); 87 } 88 89 /* 90 * Turn off quota accounting and/or enforcement for all udquots and/or 91 * gdquots. Called only at unmount time. 92 * 93 * This assumes that there are no dquots of this file system cached 94 * incore, and modifies the ondisk dquot directly. Therefore, for example, 95 * it is an error to call this twice, without purging the cache. 96 */ 97 int 98 xfs_qm_scall_quotaoff( 99 xfs_mount_t *mp, 100 uint flags) 101 { 102 struct xfs_quotainfo *q = mp->m_quotainfo; 103 uint dqtype; 104 int error; 105 uint inactivate_flags; 106 struct xfs_qoff_logitem *qoffstart; 107 108 /* 109 * No file system can have quotas enabled on disk but not in core. 110 * Note that quota utilities (like quotaoff) _expect_ 111 * errno == -EEXIST here. 112 */ 113 if ((mp->m_qflags & flags) == 0) 114 return -EEXIST; 115 error = 0; 116 117 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); 118 119 /* 120 * We don't want to deal with two quotaoffs messing up each other, 121 * so we're going to serialize it. quotaoff isn't exactly a performance 122 * critical thing. 123 * If quotaoff, then we must be dealing with the root filesystem. 124 */ 125 ASSERT(q); 126 mutex_lock(&q->qi_quotaofflock); 127 128 /* 129 * If we're just turning off quota enforcement, change mp and go. 130 */ 131 if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { 132 mp->m_qflags &= ~(flags); 133 134 spin_lock(&mp->m_sb_lock); 135 mp->m_sb.sb_qflags = mp->m_qflags; 136 spin_unlock(&mp->m_sb_lock); 137 mutex_unlock(&q->qi_quotaofflock); 138 139 /* XXX what to do if error ? Revert back to old vals incore ? */ 140 return xfs_sync_sb(mp, false); 141 } 142 143 dqtype = 0; 144 inactivate_flags = 0; 145 /* 146 * If accounting is off, we must turn enforcement off, clear the 147 * quota 'CHKD' certificate to make it known that we have to 148 * do a quotacheck the next time this quota is turned on. 149 */ 150 if (flags & XFS_UQUOTA_ACCT) { 151 dqtype |= XFS_QMOPT_UQUOTA; 152 flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD); 153 inactivate_flags |= XFS_UQUOTA_ACTIVE; 154 } 155 if (flags & XFS_GQUOTA_ACCT) { 156 dqtype |= XFS_QMOPT_GQUOTA; 157 flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD); 158 inactivate_flags |= XFS_GQUOTA_ACTIVE; 159 } 160 if (flags & XFS_PQUOTA_ACCT) { 161 dqtype |= XFS_QMOPT_PQUOTA; 162 flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD); 163 inactivate_flags |= XFS_PQUOTA_ACTIVE; 164 } 165 166 /* 167 * Nothing to do? Don't complain. This happens when we're just 168 * turning off quota enforcement. 169 */ 170 if ((mp->m_qflags & flags) == 0) 171 goto out_unlock; 172 173 /* 174 * Write the LI_QUOTAOFF log record, and do SB changes atomically, 175 * and synchronously. If we fail to write, we should abort the 176 * operation as it cannot be recovered safely if we crash. 177 */ 178 error = xfs_qm_log_quotaoff(mp, &qoffstart, flags); 179 if (error) 180 goto out_unlock; 181 182 /* 183 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct 184 * to take care of the race between dqget and quotaoff. We don't take 185 * any special locks to reset these bits. All processes need to check 186 * these bits *after* taking inode lock(s) to see if the particular 187 * quota type is in the process of being turned off. If *ACTIVE, it is 188 * guaranteed that all dquot structures and all quotainode ptrs will all 189 * stay valid as long as that inode is kept locked. 190 * 191 * There is no turning back after this. 192 */ 193 mp->m_qflags &= ~inactivate_flags; 194 195 /* 196 * Give back all the dquot reference(s) held by inodes. 197 * Here we go thru every single incore inode in this file system, and 198 * do a dqrele on the i_udquot/i_gdquot that it may have. 199 * Essentially, as long as somebody has an inode locked, this guarantees 200 * that quotas will not be turned off. This is handy because in a 201 * transaction once we lock the inode(s) and check for quotaon, we can 202 * depend on the quota inodes (and other things) being valid as long as 203 * we keep the lock(s). 204 */ 205 xfs_qm_dqrele_all_inodes(mp, flags); 206 207 /* 208 * Next we make the changes in the quota flag in the mount struct. 209 * This isn't protected by a particular lock directly, because we 210 * don't want to take a mrlock every time we depend on quotas being on. 211 */ 212 mp->m_qflags &= ~flags; 213 214 /* 215 * Go through all the dquots of this file system and purge them, 216 * according to what was turned off. 217 */ 218 xfs_qm_dqpurge_all(mp, dqtype); 219 220 /* 221 * Transactions that had started before ACTIVE state bit was cleared 222 * could have logged many dquots, so they'd have higher LSNs than 223 * the first QUOTAOFF log record does. If we happen to crash when 224 * the tail of the log has gone past the QUOTAOFF record, but 225 * before the last dquot modification, those dquots __will__ 226 * recover, and that's not good. 227 * 228 * So, we have QUOTAOFF start and end logitems; the start 229 * logitem won't get overwritten until the end logitem appears... 230 */ 231 error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags); 232 if (error) { 233 /* We're screwed now. Shutdown is the only option. */ 234 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 235 goto out_unlock; 236 } 237 238 /* 239 * If all quotas are completely turned off, close shop. 240 */ 241 if (mp->m_qflags == 0) { 242 mutex_unlock(&q->qi_quotaofflock); 243 xfs_qm_destroy_quotainfo(mp); 244 return 0; 245 } 246 247 /* 248 * Release our quotainode references if we don't need them anymore. 249 */ 250 if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) { 251 xfs_irele(q->qi_uquotaip); 252 q->qi_uquotaip = NULL; 253 } 254 if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) { 255 xfs_irele(q->qi_gquotaip); 256 q->qi_gquotaip = NULL; 257 } 258 if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) { 259 xfs_irele(q->qi_pquotaip); 260 q->qi_pquotaip = NULL; 261 } 262 263 out_unlock: 264 mutex_unlock(&q->qi_quotaofflock); 265 return error; 266 } 267 268 STATIC int 269 xfs_qm_scall_trunc_qfile( 270 struct xfs_mount *mp, 271 xfs_ino_t ino) 272 { 273 struct xfs_inode *ip; 274 struct xfs_trans *tp; 275 int error; 276 277 if (ino == NULLFSINO) 278 return 0; 279 280 error = xfs_iget(mp, NULL, ino, 0, 0, &ip); 281 if (error) 282 return error; 283 284 xfs_ilock(ip, XFS_IOLOCK_EXCL); 285 286 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); 287 if (error) { 288 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 289 goto out_put; 290 } 291 292 xfs_ilock(ip, XFS_ILOCK_EXCL); 293 xfs_trans_ijoin(tp, ip, 0); 294 295 ip->i_d.di_size = 0; 296 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 297 298 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); 299 if (error) { 300 xfs_trans_cancel(tp); 301 goto out_unlock; 302 } 303 304 ASSERT(ip->i_d.di_nextents == 0); 305 306 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 307 error = xfs_trans_commit(tp); 308 309 out_unlock: 310 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 311 out_put: 312 xfs_irele(ip); 313 return error; 314 } 315 316 int 317 xfs_qm_scall_trunc_qfiles( 318 xfs_mount_t *mp, 319 uint flags) 320 { 321 int error = -EINVAL; 322 323 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 || 324 (flags & ~XFS_DQ_ALLTYPES)) { 325 xfs_debug(mp, "%s: flags=%x m_qflags=%x", 326 __func__, flags, mp->m_qflags); 327 return -EINVAL; 328 } 329 330 if (flags & XFS_DQ_USER) { 331 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino); 332 if (error) 333 return error; 334 } 335 if (flags & XFS_DQ_GROUP) { 336 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); 337 if (error) 338 return error; 339 } 340 if (flags & XFS_DQ_PROJ) 341 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino); 342 343 return error; 344 } 345 346 /* 347 * Switch on (a given) quota enforcement for a filesystem. This takes 348 * effect immediately. 349 * (Switching on quota accounting must be done at mount time.) 350 */ 351 int 352 xfs_qm_scall_quotaon( 353 xfs_mount_t *mp, 354 uint flags) 355 { 356 int error; 357 uint qf; 358 359 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); 360 /* 361 * Switching on quota accounting must be done at mount time. 362 */ 363 flags &= ~(XFS_ALL_QUOTA_ACCT); 364 365 if (flags == 0) { 366 xfs_debug(mp, "%s: zero flags, m_qflags=%x", 367 __func__, mp->m_qflags); 368 return -EINVAL; 369 } 370 371 /* 372 * Can't enforce without accounting. We check the superblock 373 * qflags here instead of m_qflags because rootfs can have 374 * quota acct on ondisk without m_qflags' knowing. 375 */ 376 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 && 377 (flags & XFS_UQUOTA_ENFD)) || 378 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && 379 (flags & XFS_GQUOTA_ENFD)) || 380 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 && 381 (flags & XFS_PQUOTA_ENFD))) { 382 xfs_debug(mp, 383 "%s: Can't enforce without acct, flags=%x sbflags=%x", 384 __func__, flags, mp->m_sb.sb_qflags); 385 return -EINVAL; 386 } 387 /* 388 * If everything's up to-date incore, then don't waste time. 389 */ 390 if ((mp->m_qflags & flags) == flags) 391 return -EEXIST; 392 393 /* 394 * Change sb_qflags on disk but not incore mp->qflags 395 * if this is the root filesystem. 396 */ 397 spin_lock(&mp->m_sb_lock); 398 qf = mp->m_sb.sb_qflags; 399 mp->m_sb.sb_qflags = qf | flags; 400 spin_unlock(&mp->m_sb_lock); 401 402 /* 403 * There's nothing to change if it's the same. 404 */ 405 if ((qf & flags) == flags) 406 return -EEXIST; 407 408 error = xfs_sync_sb(mp, false); 409 if (error) 410 return error; 411 /* 412 * If we aren't trying to switch on quota enforcement, we are done. 413 */ 414 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) != 415 (mp->m_qflags & XFS_UQUOTA_ACCT)) || 416 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) != 417 (mp->m_qflags & XFS_PQUOTA_ACCT)) || 418 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) != 419 (mp->m_qflags & XFS_GQUOTA_ACCT))) 420 return 0; 421 422 if (! XFS_IS_QUOTA_RUNNING(mp)) 423 return -ESRCH; 424 425 /* 426 * Switch on quota enforcement in core. 427 */ 428 mutex_lock(&mp->m_quotainfo->qi_quotaofflock); 429 mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); 430 mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); 431 432 return 0; 433 } 434 435 #define XFS_QC_MASK \ 436 (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK) 437 438 /* 439 * Adjust quota limits, and start/stop timers accordingly. 440 */ 441 int 442 xfs_qm_scall_setqlim( 443 struct xfs_mount *mp, 444 xfs_dqid_t id, 445 uint type, 446 struct qc_dqblk *newlim) 447 { 448 struct xfs_quotainfo *q = mp->m_quotainfo; 449 struct xfs_disk_dquot *ddq; 450 struct xfs_dquot *dqp; 451 struct xfs_trans *tp; 452 struct xfs_def_quota *defq; 453 int error; 454 xfs_qcnt_t hard, soft; 455 456 if (newlim->d_fieldmask & ~XFS_QC_MASK) 457 return -EINVAL; 458 if ((newlim->d_fieldmask & XFS_QC_MASK) == 0) 459 return 0; 460 461 /* 462 * We don't want to race with a quotaoff so take the quotaoff lock. 463 * We don't hold an inode lock, so there's nothing else to stop 464 * a quotaoff from happening. 465 */ 466 mutex_lock(&q->qi_quotaofflock); 467 468 /* 469 * Get the dquot (locked) before we start, as we need to do a 470 * transaction to allocate it if it doesn't exist. Once we have the 471 * dquot, unlock it so we can start the next transaction safely. We hold 472 * a reference to the dquot, so it's safe to do this unlock/lock without 473 * it being reclaimed in the mean time. 474 */ 475 error = xfs_qm_dqget(mp, id, type, true, &dqp); 476 if (error) { 477 ASSERT(error != -ENOENT); 478 goto out_unlock; 479 } 480 481 defq = xfs_get_defquota(dqp, q); 482 xfs_dqunlock(dqp); 483 484 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp); 485 if (error) 486 goto out_rele; 487 488 xfs_dqlock(dqp); 489 xfs_trans_dqjoin(tp, dqp); 490 ddq = &dqp->q_core; 491 492 /* 493 * Make sure that hardlimits are >= soft limits before changing. 494 */ 495 hard = (newlim->d_fieldmask & QC_SPC_HARD) ? 496 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) : 497 be64_to_cpu(ddq->d_blk_hardlimit); 498 soft = (newlim->d_fieldmask & QC_SPC_SOFT) ? 499 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) : 500 be64_to_cpu(ddq->d_blk_softlimit); 501 if (hard == 0 || hard >= soft) { 502 ddq->d_blk_hardlimit = cpu_to_be64(hard); 503 ddq->d_blk_softlimit = cpu_to_be64(soft); 504 xfs_dquot_set_prealloc_limits(dqp); 505 if (id == 0) { 506 defq->bhardlimit = hard; 507 defq->bsoftlimit = soft; 508 } 509 } else { 510 xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); 511 } 512 hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ? 513 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) : 514 be64_to_cpu(ddq->d_rtb_hardlimit); 515 soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ? 516 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) : 517 be64_to_cpu(ddq->d_rtb_softlimit); 518 if (hard == 0 || hard >= soft) { 519 ddq->d_rtb_hardlimit = cpu_to_be64(hard); 520 ddq->d_rtb_softlimit = cpu_to_be64(soft); 521 if (id == 0) { 522 defq->rtbhardlimit = hard; 523 defq->rtbsoftlimit = soft; 524 } 525 } else { 526 xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); 527 } 528 529 hard = (newlim->d_fieldmask & QC_INO_HARD) ? 530 (xfs_qcnt_t) newlim->d_ino_hardlimit : 531 be64_to_cpu(ddq->d_ino_hardlimit); 532 soft = (newlim->d_fieldmask & QC_INO_SOFT) ? 533 (xfs_qcnt_t) newlim->d_ino_softlimit : 534 be64_to_cpu(ddq->d_ino_softlimit); 535 if (hard == 0 || hard >= soft) { 536 ddq->d_ino_hardlimit = cpu_to_be64(hard); 537 ddq->d_ino_softlimit = cpu_to_be64(soft); 538 if (id == 0) { 539 defq->ihardlimit = hard; 540 defq->isoftlimit = soft; 541 } 542 } else { 543 xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft); 544 } 545 546 /* 547 * Update warnings counter(s) if requested 548 */ 549 if (newlim->d_fieldmask & QC_SPC_WARNS) 550 ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns); 551 if (newlim->d_fieldmask & QC_INO_WARNS) 552 ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns); 553 if (newlim->d_fieldmask & QC_RT_SPC_WARNS) 554 ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns); 555 556 if (id == 0) { 557 /* 558 * Timelimits for the super user set the relative time 559 * the other users can be over quota for this file system. 560 * If it is zero a default is used. Ditto for the default 561 * soft and hard limit values (already done, above), and 562 * for warnings. 563 */ 564 if (newlim->d_fieldmask & QC_SPC_TIMER) { 565 q->qi_btimelimit = newlim->d_spc_timer; 566 ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer); 567 } 568 if (newlim->d_fieldmask & QC_INO_TIMER) { 569 q->qi_itimelimit = newlim->d_ino_timer; 570 ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer); 571 } 572 if (newlim->d_fieldmask & QC_RT_SPC_TIMER) { 573 q->qi_rtbtimelimit = newlim->d_rt_spc_timer; 574 ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer); 575 } 576 if (newlim->d_fieldmask & QC_SPC_WARNS) 577 q->qi_bwarnlimit = newlim->d_spc_warns; 578 if (newlim->d_fieldmask & QC_INO_WARNS) 579 q->qi_iwarnlimit = newlim->d_ino_warns; 580 if (newlim->d_fieldmask & QC_RT_SPC_WARNS) 581 q->qi_rtbwarnlimit = newlim->d_rt_spc_warns; 582 } else { 583 /* 584 * If the user is now over quota, start the timelimit. 585 * The user will not be 'warned'. 586 * Note that we keep the timers ticking, whether enforcement 587 * is on or off. We don't really want to bother with iterating 588 * over all ondisk dquots and turning the timers on/off. 589 */ 590 xfs_qm_adjust_dqtimers(mp, ddq); 591 } 592 dqp->dq_flags |= XFS_DQ_DIRTY; 593 xfs_trans_log_dquot(tp, dqp); 594 595 error = xfs_trans_commit(tp); 596 597 out_rele: 598 xfs_qm_dqrele(dqp); 599 out_unlock: 600 mutex_unlock(&q->qi_quotaofflock); 601 return error; 602 } 603 604 /* Fill out the quota context. */ 605 static void 606 xfs_qm_scall_getquota_fill_qc( 607 struct xfs_mount *mp, 608 uint type, 609 const struct xfs_dquot *dqp, 610 struct qc_dqblk *dst) 611 { 612 memset(dst, 0, sizeof(*dst)); 613 dst->d_spc_hardlimit = 614 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); 615 dst->d_spc_softlimit = 616 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); 617 dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); 618 dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); 619 dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount); 620 dst->d_ino_count = dqp->q_res_icount; 621 dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer); 622 dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer); 623 dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns); 624 dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns); 625 dst->d_rt_spc_hardlimit = 626 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); 627 dst->d_rt_spc_softlimit = 628 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); 629 dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount); 630 dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer); 631 dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns); 632 633 /* 634 * Internally, we don't reset all the timers when quota enforcement 635 * gets turned off. No need to confuse the user level code, 636 * so return zeroes in that case. 637 */ 638 if ((!XFS_IS_UQUOTA_ENFORCED(mp) && 639 dqp->q_core.d_flags == XFS_DQ_USER) || 640 (!XFS_IS_GQUOTA_ENFORCED(mp) && 641 dqp->q_core.d_flags == XFS_DQ_GROUP) || 642 (!XFS_IS_PQUOTA_ENFORCED(mp) && 643 dqp->q_core.d_flags == XFS_DQ_PROJ)) { 644 dst->d_spc_timer = 0; 645 dst->d_ino_timer = 0; 646 dst->d_rt_spc_timer = 0; 647 } 648 649 #ifdef DEBUG 650 if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) || 651 (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) || 652 (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) && 653 dqp->q_core.d_id != 0) { 654 if ((dst->d_space > dst->d_spc_softlimit) && 655 (dst->d_spc_softlimit > 0)) { 656 ASSERT(dst->d_spc_timer != 0); 657 } 658 if ((dst->d_ino_count > dst->d_ino_softlimit) && 659 (dst->d_ino_softlimit > 0)) { 660 ASSERT(dst->d_ino_timer != 0); 661 } 662 } 663 #endif 664 } 665 666 /* Return the quota information for the dquot matching id. */ 667 int 668 xfs_qm_scall_getquota( 669 struct xfs_mount *mp, 670 xfs_dqid_t id, 671 uint type, 672 struct qc_dqblk *dst) 673 { 674 struct xfs_dquot *dqp; 675 int error; 676 677 /* 678 * Try to get the dquot. We don't want it allocated on disk, so don't 679 * set doalloc. If it doesn't exist, we'll get ENOENT back. 680 */ 681 error = xfs_qm_dqget(mp, id, type, false, &dqp); 682 if (error) 683 return error; 684 685 /* 686 * If everything's NULL, this dquot doesn't quite exist as far as 687 * our utility programs are concerned. 688 */ 689 if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { 690 error = -ENOENT; 691 goto out_put; 692 } 693 694 xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst); 695 696 out_put: 697 xfs_qm_dqput(dqp); 698 return error; 699 } 700 701 /* 702 * Return the quota information for the first initialized dquot whose id 703 * is at least as high as id. 704 */ 705 int 706 xfs_qm_scall_getquota_next( 707 struct xfs_mount *mp, 708 xfs_dqid_t *id, 709 uint type, 710 struct qc_dqblk *dst) 711 { 712 struct xfs_dquot *dqp; 713 int error; 714 715 error = xfs_qm_dqget_next(mp, *id, type, &dqp); 716 if (error) 717 return error; 718 719 /* Fill in the ID we actually read from disk */ 720 *id = be32_to_cpu(dqp->q_core.d_id); 721 722 xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst); 723 724 xfs_qm_dqput(dqp); 725 return error; 726 } 727 728 STATIC int 729 xfs_dqrele_inode( 730 struct xfs_inode *ip, 731 int flags, 732 void *args) 733 { 734 /* skip quota inodes */ 735 if (ip == ip->i_mount->m_quotainfo->qi_uquotaip || 736 ip == ip->i_mount->m_quotainfo->qi_gquotaip || 737 ip == ip->i_mount->m_quotainfo->qi_pquotaip) { 738 ASSERT(ip->i_udquot == NULL); 739 ASSERT(ip->i_gdquot == NULL); 740 ASSERT(ip->i_pdquot == NULL); 741 return 0; 742 } 743 744 xfs_ilock(ip, XFS_ILOCK_EXCL); 745 if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { 746 xfs_qm_dqrele(ip->i_udquot); 747 ip->i_udquot = NULL; 748 } 749 if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) { 750 xfs_qm_dqrele(ip->i_gdquot); 751 ip->i_gdquot = NULL; 752 } 753 if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) { 754 xfs_qm_dqrele(ip->i_pdquot); 755 ip->i_pdquot = NULL; 756 } 757 xfs_iunlock(ip, XFS_ILOCK_EXCL); 758 return 0; 759 } 760 761 762 /* 763 * Go thru all the inodes in the file system, releasing their dquots. 764 * 765 * Note that the mount structure gets modified to indicate that quotas are off 766 * AFTER this, in the case of quotaoff. 767 */ 768 void 769 xfs_qm_dqrele_all_inodes( 770 struct xfs_mount *mp, 771 uint flags) 772 { 773 ASSERT(mp->m_quotainfo); 774 xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL, 775 XFS_AGITER_INEW_WAIT); 776 } 777