1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_inode.h" 14 #include "xfs_trans.h" 15 #include "xfs_trans_priv.h" 16 #include "xfs_quota.h" 17 #include "xfs_qm.h" 18 19 STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); 20 21 /* 22 * Add the locked dquot to the transaction. 23 * The dquot must be locked, and it cannot be associated with any 24 * transaction. 25 */ 26 void 27 xfs_trans_dqjoin( 28 struct xfs_trans *tp, 29 struct xfs_dquot *dqp) 30 { 31 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 32 ASSERT(dqp->q_logitem.qli_dquot == dqp); 33 34 /* 35 * Get a log_item_desc to point at the new item. 36 */ 37 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); 38 } 39 40 /* 41 * This is called to mark the dquot as needing 42 * to be logged when the transaction is committed. The dquot must 43 * already be associated with the given transaction. 44 * Note that it marks the entire transaction as dirty. In the ordinary 45 * case, this gets called via xfs_trans_commit, after the transaction 46 * is already dirty. However, there's nothing stop this from getting 47 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY 48 * flag. 49 */ 50 void 51 xfs_trans_log_dquot( 52 struct xfs_trans *tp, 53 struct xfs_dquot *dqp) 54 { 55 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 56 57 tp->t_flags |= XFS_TRANS_DIRTY; 58 set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags); 59 } 60 61 /* 62 * Carry forward whatever is left of the quota blk reservation to 63 * the spanky new transaction 64 */ 65 void 66 xfs_trans_dup_dqinfo( 67 struct xfs_trans *otp, 68 struct xfs_trans *ntp) 69 { 70 struct xfs_dqtrx *oq, *nq; 71 int i, j; 72 struct xfs_dqtrx *oqa, *nqa; 73 uint64_t blk_res_used; 74 75 if (!otp->t_dqinfo) 76 return; 77 78 xfs_trans_alloc_dqinfo(ntp); 79 80 /* 81 * Because the quota blk reservation is carried forward, 82 * it is also necessary to carry forward the DQ_DIRTY flag. 83 */ 84 if (otp->t_flags & XFS_TRANS_DQ_DIRTY) 85 ntp->t_flags |= XFS_TRANS_DQ_DIRTY; 86 87 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 88 oqa = otp->t_dqinfo->dqs[j]; 89 nqa = ntp->t_dqinfo->dqs[j]; 90 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 91 blk_res_used = 0; 92 93 if (oqa[i].qt_dquot == NULL) 94 break; 95 oq = &oqa[i]; 96 nq = &nqa[i]; 97 98 if (oq->qt_blk_res && oq->qt_bcount_delta > 0) 99 blk_res_used = oq->qt_bcount_delta; 100 101 nq->qt_dquot = oq->qt_dquot; 102 nq->qt_bcount_delta = nq->qt_icount_delta = 0; 103 nq->qt_rtbcount_delta = 0; 104 105 /* 106 * Transfer whatever is left of the reservations. 107 */ 108 nq->qt_blk_res = oq->qt_blk_res - blk_res_used; 109 oq->qt_blk_res = blk_res_used; 110 111 nq->qt_rtblk_res = oq->qt_rtblk_res - 112 oq->qt_rtblk_res_used; 113 oq->qt_rtblk_res = oq->qt_rtblk_res_used; 114 115 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; 116 oq->qt_ino_res = oq->qt_ino_res_used; 117 118 } 119 } 120 } 121 122 /* 123 * Wrap around mod_dquot to account for both user and group quotas. 124 */ 125 void 126 xfs_trans_mod_dquot_byino( 127 xfs_trans_t *tp, 128 xfs_inode_t *ip, 129 uint field, 130 int64_t delta) 131 { 132 xfs_mount_t *mp = tp->t_mountp; 133 134 if (!XFS_IS_QUOTA_RUNNING(mp) || 135 !XFS_IS_QUOTA_ON(mp) || 136 xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) 137 return; 138 139 if (tp->t_dqinfo == NULL) 140 xfs_trans_alloc_dqinfo(tp); 141 142 if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) 143 (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta); 144 if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) 145 (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta); 146 if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot) 147 (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta); 148 } 149 150 STATIC struct xfs_dqtrx * 151 xfs_trans_get_dqtrx( 152 struct xfs_trans *tp, 153 struct xfs_dquot *dqp) 154 { 155 int i; 156 struct xfs_dqtrx *qa; 157 158 if (XFS_QM_ISUDQ(dqp)) 159 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR]; 160 else if (XFS_QM_ISGDQ(dqp)) 161 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP]; 162 else if (XFS_QM_ISPDQ(dqp)) 163 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ]; 164 else 165 return NULL; 166 167 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 168 if (qa[i].qt_dquot == NULL || 169 qa[i].qt_dquot == dqp) 170 return &qa[i]; 171 } 172 173 return NULL; 174 } 175 176 /* 177 * Make the changes in the transaction structure. 178 * The moral equivalent to xfs_trans_mod_sb(). 179 * We don't touch any fields in the dquot, so we don't care 180 * if it's locked or not (most of the time it won't be). 181 */ 182 void 183 xfs_trans_mod_dquot( 184 struct xfs_trans *tp, 185 struct xfs_dquot *dqp, 186 uint field, 187 int64_t delta) 188 { 189 struct xfs_dqtrx *qtrx; 190 191 ASSERT(tp); 192 ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); 193 qtrx = NULL; 194 195 if (tp->t_dqinfo == NULL) 196 xfs_trans_alloc_dqinfo(tp); 197 /* 198 * Find either the first free slot or the slot that belongs 199 * to this dquot. 200 */ 201 qtrx = xfs_trans_get_dqtrx(tp, dqp); 202 ASSERT(qtrx); 203 if (qtrx->qt_dquot == NULL) 204 qtrx->qt_dquot = dqp; 205 206 switch (field) { 207 208 /* 209 * regular disk blk reservation 210 */ 211 case XFS_TRANS_DQ_RES_BLKS: 212 qtrx->qt_blk_res += delta; 213 break; 214 215 /* 216 * inode reservation 217 */ 218 case XFS_TRANS_DQ_RES_INOS: 219 qtrx->qt_ino_res += delta; 220 break; 221 222 /* 223 * disk blocks used. 224 */ 225 case XFS_TRANS_DQ_BCOUNT: 226 qtrx->qt_bcount_delta += delta; 227 break; 228 229 case XFS_TRANS_DQ_DELBCOUNT: 230 qtrx->qt_delbcnt_delta += delta; 231 break; 232 233 /* 234 * Inode Count 235 */ 236 case XFS_TRANS_DQ_ICOUNT: 237 if (qtrx->qt_ino_res && delta > 0) { 238 qtrx->qt_ino_res_used += delta; 239 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); 240 } 241 qtrx->qt_icount_delta += delta; 242 break; 243 244 /* 245 * rtblk reservation 246 */ 247 case XFS_TRANS_DQ_RES_RTBLKS: 248 qtrx->qt_rtblk_res += delta; 249 break; 250 251 /* 252 * rtblk count 253 */ 254 case XFS_TRANS_DQ_RTBCOUNT: 255 if (qtrx->qt_rtblk_res && delta > 0) { 256 qtrx->qt_rtblk_res_used += delta; 257 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used); 258 } 259 qtrx->qt_rtbcount_delta += delta; 260 break; 261 262 case XFS_TRANS_DQ_DELRTBCOUNT: 263 qtrx->qt_delrtb_delta += delta; 264 break; 265 266 default: 267 ASSERT(0); 268 } 269 tp->t_flags |= XFS_TRANS_DQ_DIRTY; 270 } 271 272 273 /* 274 * Given an array of dqtrx structures, lock all the dquots associated and join 275 * them to the transaction, provided they have been modified. We know that the 276 * highest number of dquots of one type - usr, grp and prj - involved in a 277 * transaction is 3 so we don't need to make this very generic. 278 */ 279 STATIC void 280 xfs_trans_dqlockedjoin( 281 struct xfs_trans *tp, 282 struct xfs_dqtrx *q) 283 { 284 ASSERT(q[0].qt_dquot != NULL); 285 if (q[1].qt_dquot == NULL) { 286 xfs_dqlock(q[0].qt_dquot); 287 xfs_trans_dqjoin(tp, q[0].qt_dquot); 288 } else { 289 ASSERT(XFS_QM_TRANS_MAXDQS == 2); 290 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); 291 xfs_trans_dqjoin(tp, q[0].qt_dquot); 292 xfs_trans_dqjoin(tp, q[1].qt_dquot); 293 } 294 } 295 296 297 /* 298 * Called by xfs_trans_commit() and similar in spirit to 299 * xfs_trans_apply_sb_deltas(). 300 * Go thru all the dquots belonging to this transaction and modify the 301 * INCORE dquot to reflect the actual usages. 302 * Unreserve just the reservations done by this transaction. 303 * dquot is still left locked at exit. 304 */ 305 void 306 xfs_trans_apply_dquot_deltas( 307 struct xfs_trans *tp) 308 { 309 int i, j; 310 struct xfs_dquot *dqp; 311 struct xfs_dqtrx *qtrx, *qa; 312 struct xfs_disk_dquot *d; 313 int64_t totalbdelta; 314 int64_t totalrtbdelta; 315 316 if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY)) 317 return; 318 319 ASSERT(tp->t_dqinfo); 320 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 321 qa = tp->t_dqinfo->dqs[j]; 322 if (qa[0].qt_dquot == NULL) 323 continue; 324 325 /* 326 * Lock all of the dquots and join them to the transaction. 327 */ 328 xfs_trans_dqlockedjoin(tp, qa); 329 330 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 331 qtrx = &qa[i]; 332 /* 333 * The array of dquots is filled 334 * sequentially, not sparsely. 335 */ 336 if ((dqp = qtrx->qt_dquot) == NULL) 337 break; 338 339 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 340 341 /* 342 * adjust the actual number of blocks used 343 */ 344 d = &dqp->q_core; 345 346 /* 347 * The issue here is - sometimes we don't make a blkquota 348 * reservation intentionally to be fair to users 349 * (when the amount is small). On the other hand, 350 * delayed allocs do make reservations, but that's 351 * outside of a transaction, so we have no 352 * idea how much was really reserved. 353 * So, here we've accumulated delayed allocation blks and 354 * non-delay blks. The assumption is that the 355 * delayed ones are always reserved (outside of a 356 * transaction), and the others may or may not have 357 * quota reservations. 358 */ 359 totalbdelta = qtrx->qt_bcount_delta + 360 qtrx->qt_delbcnt_delta; 361 totalrtbdelta = qtrx->qt_rtbcount_delta + 362 qtrx->qt_delrtb_delta; 363 #ifdef DEBUG 364 if (totalbdelta < 0) 365 ASSERT(be64_to_cpu(d->d_bcount) >= 366 -totalbdelta); 367 368 if (totalrtbdelta < 0) 369 ASSERT(be64_to_cpu(d->d_rtbcount) >= 370 -totalrtbdelta); 371 372 if (qtrx->qt_icount_delta < 0) 373 ASSERT(be64_to_cpu(d->d_icount) >= 374 -qtrx->qt_icount_delta); 375 #endif 376 if (totalbdelta) 377 be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta); 378 379 if (qtrx->qt_icount_delta) 380 be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta); 381 382 if (totalrtbdelta) 383 be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta); 384 385 /* 386 * Get any default limits in use. 387 * Start/reset the timer(s) if needed. 388 */ 389 if (d->d_id) { 390 xfs_qm_adjust_dqlimits(tp->t_mountp, dqp); 391 xfs_qm_adjust_dqtimers(tp->t_mountp, dqp); 392 } 393 394 dqp->dq_flags |= XFS_DQ_DIRTY; 395 /* 396 * add this to the list of items to get logged 397 */ 398 xfs_trans_log_dquot(tp, dqp); 399 /* 400 * Take off what's left of the original reservation. 401 * In case of delayed allocations, there's no 402 * reservation that a transaction structure knows of. 403 */ 404 if (qtrx->qt_blk_res != 0) { 405 uint64_t blk_res_used = 0; 406 407 if (qtrx->qt_bcount_delta > 0) 408 blk_res_used = qtrx->qt_bcount_delta; 409 410 if (qtrx->qt_blk_res != blk_res_used) { 411 if (qtrx->qt_blk_res > blk_res_used) 412 dqp->q_res_bcount -= (xfs_qcnt_t) 413 (qtrx->qt_blk_res - 414 blk_res_used); 415 else 416 dqp->q_res_bcount -= (xfs_qcnt_t) 417 (blk_res_used - 418 qtrx->qt_blk_res); 419 } 420 } else { 421 /* 422 * These blks were never reserved, either inside 423 * a transaction or outside one (in a delayed 424 * allocation). Also, this isn't always a 425 * negative number since we sometimes 426 * deliberately skip quota reservations. 427 */ 428 if (qtrx->qt_bcount_delta) { 429 dqp->q_res_bcount += 430 (xfs_qcnt_t)qtrx->qt_bcount_delta; 431 } 432 } 433 /* 434 * Adjust the RT reservation. 435 */ 436 if (qtrx->qt_rtblk_res != 0) { 437 if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) { 438 if (qtrx->qt_rtblk_res > 439 qtrx->qt_rtblk_res_used) 440 dqp->q_res_rtbcount -= (xfs_qcnt_t) 441 (qtrx->qt_rtblk_res - 442 qtrx->qt_rtblk_res_used); 443 else 444 dqp->q_res_rtbcount -= (xfs_qcnt_t) 445 (qtrx->qt_rtblk_res_used - 446 qtrx->qt_rtblk_res); 447 } 448 } else { 449 if (qtrx->qt_rtbcount_delta) 450 dqp->q_res_rtbcount += 451 (xfs_qcnt_t)qtrx->qt_rtbcount_delta; 452 } 453 454 /* 455 * Adjust the inode reservation. 456 */ 457 if (qtrx->qt_ino_res != 0) { 458 ASSERT(qtrx->qt_ino_res >= 459 qtrx->qt_ino_res_used); 460 if (qtrx->qt_ino_res > qtrx->qt_ino_res_used) 461 dqp->q_res_icount -= (xfs_qcnt_t) 462 (qtrx->qt_ino_res - 463 qtrx->qt_ino_res_used); 464 } else { 465 if (qtrx->qt_icount_delta) 466 dqp->q_res_icount += 467 (xfs_qcnt_t)qtrx->qt_icount_delta; 468 } 469 470 ASSERT(dqp->q_res_bcount >= 471 be64_to_cpu(dqp->q_core.d_bcount)); 472 ASSERT(dqp->q_res_icount >= 473 be64_to_cpu(dqp->q_core.d_icount)); 474 ASSERT(dqp->q_res_rtbcount >= 475 be64_to_cpu(dqp->q_core.d_rtbcount)); 476 } 477 } 478 } 479 480 /* 481 * Release the reservations, and adjust the dquots accordingly. 482 * This is called only when the transaction is being aborted. If by 483 * any chance we have done dquot modifications incore (ie. deltas) already, 484 * we simply throw those away, since that's the expected behavior 485 * when a transaction is curtailed without a commit. 486 */ 487 void 488 xfs_trans_unreserve_and_mod_dquots( 489 struct xfs_trans *tp) 490 { 491 int i, j; 492 struct xfs_dquot *dqp; 493 struct xfs_dqtrx *qtrx, *qa; 494 bool locked; 495 496 if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY)) 497 return; 498 499 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { 500 qa = tp->t_dqinfo->dqs[j]; 501 502 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { 503 qtrx = &qa[i]; 504 /* 505 * We assume that the array of dquots is filled 506 * sequentially, not sparsely. 507 */ 508 if ((dqp = qtrx->qt_dquot) == NULL) 509 break; 510 /* 511 * Unreserve the original reservation. We don't care 512 * about the number of blocks used field, or deltas. 513 * Also we don't bother to zero the fields. 514 */ 515 locked = false; 516 if (qtrx->qt_blk_res) { 517 xfs_dqlock(dqp); 518 locked = true; 519 dqp->q_res_bcount -= 520 (xfs_qcnt_t)qtrx->qt_blk_res; 521 } 522 if (qtrx->qt_ino_res) { 523 if (!locked) { 524 xfs_dqlock(dqp); 525 locked = true; 526 } 527 dqp->q_res_icount -= 528 (xfs_qcnt_t)qtrx->qt_ino_res; 529 } 530 531 if (qtrx->qt_rtblk_res) { 532 if (!locked) { 533 xfs_dqlock(dqp); 534 locked = true; 535 } 536 dqp->q_res_rtbcount -= 537 (xfs_qcnt_t)qtrx->qt_rtblk_res; 538 } 539 if (locked) 540 xfs_dqunlock(dqp); 541 542 } 543 } 544 } 545 546 STATIC void 547 xfs_quota_warn( 548 struct xfs_mount *mp, 549 struct xfs_dquot *dqp, 550 int type) 551 { 552 enum quota_type qtype; 553 554 if (dqp->dq_flags & XFS_DQ_PROJ) 555 qtype = PRJQUOTA; 556 else if (dqp->dq_flags & XFS_DQ_USER) 557 qtype = USRQUOTA; 558 else 559 qtype = GRPQUOTA; 560 561 quota_send_warning(make_kqid(&init_user_ns, qtype, 562 be32_to_cpu(dqp->q_core.d_id)), 563 mp->m_super->s_dev, type); 564 } 565 566 /* 567 * This reserves disk blocks and inodes against a dquot. 568 * Flags indicate if the dquot is to be locked here and also 569 * if the blk reservation is for RT or regular blocks. 570 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. 571 */ 572 STATIC int 573 xfs_trans_dqresv( 574 struct xfs_trans *tp, 575 struct xfs_mount *mp, 576 struct xfs_dquot *dqp, 577 int64_t nblks, 578 long ninos, 579 uint flags) 580 { 581 xfs_qcnt_t hardlimit; 582 xfs_qcnt_t softlimit; 583 time64_t timer; 584 xfs_qwarncnt_t warns; 585 xfs_qwarncnt_t warnlimit; 586 xfs_qcnt_t total_count; 587 xfs_qcnt_t *resbcountp; 588 struct xfs_quotainfo *q = mp->m_quotainfo; 589 struct xfs_def_quota *defq; 590 591 592 xfs_dqlock(dqp); 593 594 defq = xfs_get_defquota(q, xfs_dquot_type(dqp)); 595 596 if (flags & XFS_TRANS_DQ_RES_BLKS) { 597 hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit); 598 if (!hardlimit) 599 hardlimit = defq->bhardlimit; 600 softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit); 601 if (!softlimit) 602 softlimit = defq->bsoftlimit; 603 timer = be32_to_cpu(dqp->q_core.d_btimer); 604 warns = be16_to_cpu(dqp->q_core.d_bwarns); 605 warnlimit = defq->bwarnlimit; 606 resbcountp = &dqp->q_res_bcount; 607 } else { 608 ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); 609 hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit); 610 if (!hardlimit) 611 hardlimit = defq->rtbhardlimit; 612 softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit); 613 if (!softlimit) 614 softlimit = defq->rtbsoftlimit; 615 timer = be32_to_cpu(dqp->q_core.d_rtbtimer); 616 warns = be16_to_cpu(dqp->q_core.d_rtbwarns); 617 warnlimit = defq->rtbwarnlimit; 618 resbcountp = &dqp->q_res_rtbcount; 619 } 620 621 if ((flags & XFS_QMOPT_FORCE_RES) == 0 && 622 dqp->q_core.d_id && 623 ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) || 624 (XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) || 625 (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) { 626 if (nblks > 0) { 627 /* 628 * dquot is locked already. See if we'd go over the 629 * hardlimit or exceed the timelimit if we allocate 630 * nblks. 631 */ 632 total_count = *resbcountp + nblks; 633 if (hardlimit && total_count > hardlimit) { 634 xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); 635 goto error_return; 636 } 637 if (softlimit && total_count > softlimit) { 638 if ((timer != 0 && 639 ktime_get_real_seconds() > timer) || 640 (warns != 0 && warns >= warnlimit)) { 641 xfs_quota_warn(mp, dqp, 642 QUOTA_NL_BSOFTLONGWARN); 643 goto error_return; 644 } 645 646 xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN); 647 } 648 } 649 if (ninos > 0) { 650 total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; 651 timer = be32_to_cpu(dqp->q_core.d_itimer); 652 warns = be16_to_cpu(dqp->q_core.d_iwarns); 653 warnlimit = defq->iwarnlimit; 654 hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); 655 if (!hardlimit) 656 hardlimit = defq->ihardlimit; 657 softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); 658 if (!softlimit) 659 softlimit = defq->isoftlimit; 660 661 if (hardlimit && total_count > hardlimit) { 662 xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); 663 goto error_return; 664 } 665 if (softlimit && total_count > softlimit) { 666 if ((timer != 0 && 667 ktime_get_real_seconds() > timer) || 668 (warns != 0 && warns >= warnlimit)) { 669 xfs_quota_warn(mp, dqp, 670 QUOTA_NL_ISOFTLONGWARN); 671 goto error_return; 672 } 673 xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN); 674 } 675 } 676 } 677 678 /* 679 * Change the reservation, but not the actual usage. 680 * Note that q_res_bcount = q_core.d_bcount + resv 681 */ 682 (*resbcountp) += (xfs_qcnt_t)nblks; 683 if (ninos != 0) 684 dqp->q_res_icount += (xfs_qcnt_t)ninos; 685 686 /* 687 * note the reservation amt in the trans struct too, 688 * so that the transaction knows how much was reserved by 689 * it against this particular dquot. 690 * We don't do this when we are reserving for a delayed allocation, 691 * because we don't have the luxury of a transaction envelope then. 692 */ 693 if (tp) { 694 ASSERT(tp->t_dqinfo); 695 ASSERT(flags & XFS_QMOPT_RESBLK_MASK); 696 if (nblks != 0) 697 xfs_trans_mod_dquot(tp, dqp, 698 flags & XFS_QMOPT_RESBLK_MASK, 699 nblks); 700 if (ninos != 0) 701 xfs_trans_mod_dquot(tp, dqp, 702 XFS_TRANS_DQ_RES_INOS, 703 ninos); 704 } 705 ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount)); 706 ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount)); 707 ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount)); 708 709 xfs_dqunlock(dqp); 710 return 0; 711 712 error_return: 713 xfs_dqunlock(dqp); 714 if (XFS_QM_ISPDQ(dqp)) 715 return -ENOSPC; 716 return -EDQUOT; 717 } 718 719 720 /* 721 * Given dquot(s), make disk block and/or inode reservations against them. 722 * The fact that this does the reservation against user, group and 723 * project quotas is important, because this follows a all-or-nothing 724 * approach. 725 * 726 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown. 727 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota. 728 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks 729 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks 730 * dquots are unlocked on return, if they were not locked by caller. 731 */ 732 int 733 xfs_trans_reserve_quota_bydquots( 734 struct xfs_trans *tp, 735 struct xfs_mount *mp, 736 struct xfs_dquot *udqp, 737 struct xfs_dquot *gdqp, 738 struct xfs_dquot *pdqp, 739 int64_t nblks, 740 long ninos, 741 uint flags) 742 { 743 int error; 744 745 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 746 return 0; 747 748 if (tp && tp->t_dqinfo == NULL) 749 xfs_trans_alloc_dqinfo(tp); 750 751 ASSERT(flags & XFS_QMOPT_RESBLK_MASK); 752 753 if (udqp) { 754 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags); 755 if (error) 756 return error; 757 } 758 759 if (gdqp) { 760 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags); 761 if (error) 762 goto unwind_usr; 763 } 764 765 if (pdqp) { 766 error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags); 767 if (error) 768 goto unwind_grp; 769 } 770 771 /* 772 * Didn't change anything critical, so, no need to log 773 */ 774 return 0; 775 776 unwind_grp: 777 flags |= XFS_QMOPT_FORCE_RES; 778 if (gdqp) 779 xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags); 780 unwind_usr: 781 flags |= XFS_QMOPT_FORCE_RES; 782 if (udqp) 783 xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags); 784 return error; 785 } 786 787 788 /* 789 * Lock the dquot and change the reservation if we can. 790 * This doesn't change the actual usage, just the reservation. 791 * The inode sent in is locked. 792 */ 793 int 794 xfs_trans_reserve_quota_nblks( 795 struct xfs_trans *tp, 796 struct xfs_inode *ip, 797 int64_t nblks, 798 long ninos, 799 uint flags) 800 { 801 struct xfs_mount *mp = ip->i_mount; 802 803 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 804 return 0; 805 806 ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino)); 807 808 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 809 ASSERT((flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_RTBLKS || 810 (flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_BLKS); 811 812 /* 813 * Reserve nblks against these dquots, with trans as the mediator. 814 */ 815 return xfs_trans_reserve_quota_bydquots(tp, mp, 816 ip->i_udquot, ip->i_gdquot, 817 ip->i_pdquot, 818 nblks, ninos, flags); 819 } 820 821 /* 822 * This routine is called to allocate a quotaoff log item. 823 */ 824 struct xfs_qoff_logitem * 825 xfs_trans_get_qoff_item( 826 struct xfs_trans *tp, 827 struct xfs_qoff_logitem *startqoff, 828 uint flags) 829 { 830 struct xfs_qoff_logitem *q; 831 832 ASSERT(tp != NULL); 833 834 q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags); 835 ASSERT(q != NULL); 836 837 /* 838 * Get a log_item_desc to point at the new item. 839 */ 840 xfs_trans_add_item(tp, &q->qql_item); 841 return q; 842 } 843 844 845 /* 846 * This is called to mark the quotaoff logitem as needing 847 * to be logged when the transaction is committed. The logitem must 848 * already be associated with the given transaction. 849 */ 850 void 851 xfs_trans_log_quotaoff_item( 852 struct xfs_trans *tp, 853 struct xfs_qoff_logitem *qlp) 854 { 855 tp->t_flags |= XFS_TRANS_DIRTY; 856 set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags); 857 } 858 859 STATIC void 860 xfs_trans_alloc_dqinfo( 861 xfs_trans_t *tp) 862 { 863 tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, 0); 864 } 865 866 void 867 xfs_trans_free_dqinfo( 868 xfs_trans_t *tp) 869 { 870 if (!tp->t_dqinfo) 871 return; 872 kmem_cache_free(xfs_qm_dqtrxzone, tp->t_dqinfo); 873 tp->t_dqinfo = NULL; 874 } 875