1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 /* 11 * Quota change tags are associated with each transaction that allocates or 12 * deallocates space. Those changes are accumulated locally to each node (in a 13 * per-node file) and then are periodically synced to the quota file. This 14 * avoids the bottleneck of constantly touching the quota file, but introduces 15 * fuzziness in the current usage value of IDs that are being used on different 16 * nodes in the cluster simultaneously. So, it is possible for a user on 17 * multiple nodes to overrun their quota, but that overrun is controlable. 18 * Since quota tags are part of transactions, there is no need to a quota check 19 * program to be run on node crashes or anything like that. 20 * 21 * There are couple of knobs that let the administrator manage the quota 22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be 23 * sitting on one node before being synced to the quota file. (The default is 24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency 25 * of quota file syncs increases as the user moves closer to their limit. The 26 * more frequent the syncs, the more accurate the quota enforcement, but that 27 * means that there is more contention between the nodes for the quota file. 28 * The default value is one. This sets the maximum theoretical quota overrun 29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In 30 * practice, the maximum overrun you see should be much less.) A "quota_scale" 31 * number greater than one makes quota syncs more frequent and reduces the 32 * maximum overrun. Numbers less than one (but greater than zero) make quota 33 * syncs less frequent. 34 * 35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of 36 * the quota file, so it is not being constantly read. 37 */ 38 39 #include <linux/sched.h> 40 #include <linux/slab.h> 41 #include <linux/spinlock.h> 42 #include <linux/completion.h> 43 #include <linux/buffer_head.h> 44 #include <linux/sort.h> 45 #include <linux/fs.h> 46 #include <linux/bio.h> 47 #include <linux/gfs2_ondisk.h> 48 #include <linux/lm_interface.h> 49 50 #include "gfs2.h" 51 #include "incore.h" 52 #include "bmap.h" 53 #include "glock.h" 54 #include "glops.h" 55 #include "log.h" 56 #include "meta_io.h" 57 #include "quota.h" 58 #include "rgrp.h" 59 #include "super.h" 60 #include "trans.h" 61 #include "inode.h" 62 #include "ops_address.h" 63 #include "util.h" 64 65 #define QUOTA_USER 1 66 #define QUOTA_GROUP 0 67 68 struct gfs2_quota_host { 69 u64 qu_limit; 70 u64 qu_warn; 71 s64 qu_value; 72 u32 qu_ll_next; 73 }; 74 75 struct gfs2_quota_change_host { 76 u64 qc_change; 77 u32 qc_flags; /* GFS2_QCF_... */ 78 u32 qc_id; 79 }; 80 81 static u64 qd2offset(struct gfs2_quota_data *qd) 82 { 83 u64 offset; 84 85 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags); 86 offset *= sizeof(struct gfs2_quota); 87 88 return offset; 89 } 90 91 static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id, 92 struct gfs2_quota_data **qdp) 93 { 94 struct gfs2_quota_data *qd; 95 int error; 96 97 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL); 98 if (!qd) 99 return -ENOMEM; 100 101 qd->qd_count = 1; 102 qd->qd_id = id; 103 if (user) 104 set_bit(QDF_USER, &qd->qd_flags); 105 qd->qd_slot = -1; 106 107 error = gfs2_glock_get(sdp, 2 * (u64)id + !user, 108 &gfs2_quota_glops, CREATE, &qd->qd_gl); 109 if (error) 110 goto fail; 111 112 error = gfs2_lvb_hold(qd->qd_gl); 113 gfs2_glock_put(qd->qd_gl); 114 if (error) 115 goto fail; 116 117 *qdp = qd; 118 119 return 0; 120 121 fail: 122 kfree(qd); 123 return error; 124 } 125 126 static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, 127 struct gfs2_quota_data **qdp) 128 { 129 struct gfs2_quota_data *qd = NULL, *new_qd = NULL; 130 int error, found; 131 132 *qdp = NULL; 133 134 for (;;) { 135 found = 0; 136 spin_lock(&sdp->sd_quota_spin); 137 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 138 if (qd->qd_id == id && 139 !test_bit(QDF_USER, &qd->qd_flags) == !user) { 140 qd->qd_count++; 141 found = 1; 142 break; 143 } 144 } 145 146 if (!found) 147 qd = NULL; 148 149 if (!qd && new_qd) { 150 qd = new_qd; 151 list_add(&qd->qd_list, &sdp->sd_quota_list); 152 atomic_inc(&sdp->sd_quota_count); 153 new_qd = NULL; 154 } 155 156 spin_unlock(&sdp->sd_quota_spin); 157 158 if (qd || !create) { 159 if (new_qd) { 160 gfs2_lvb_unhold(new_qd->qd_gl); 161 kfree(new_qd); 162 } 163 *qdp = qd; 164 return 0; 165 } 166 167 error = qd_alloc(sdp, user, id, &new_qd); 168 if (error) 169 return error; 170 } 171 } 172 173 static void qd_hold(struct gfs2_quota_data *qd) 174 { 175 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 176 177 spin_lock(&sdp->sd_quota_spin); 178 gfs2_assert(sdp, qd->qd_count); 179 qd->qd_count++; 180 spin_unlock(&sdp->sd_quota_spin); 181 } 182 183 static void qd_put(struct gfs2_quota_data *qd) 184 { 185 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 186 spin_lock(&sdp->sd_quota_spin); 187 gfs2_assert(sdp, qd->qd_count); 188 if (!--qd->qd_count) 189 qd->qd_last_touched = jiffies; 190 spin_unlock(&sdp->sd_quota_spin); 191 } 192 193 static int slot_get(struct gfs2_quota_data *qd) 194 { 195 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 196 unsigned int c, o = 0, b; 197 unsigned char byte = 0; 198 199 spin_lock(&sdp->sd_quota_spin); 200 201 if (qd->qd_slot_count++) { 202 spin_unlock(&sdp->sd_quota_spin); 203 return 0; 204 } 205 206 for (c = 0; c < sdp->sd_quota_chunks; c++) 207 for (o = 0; o < PAGE_SIZE; o++) { 208 byte = sdp->sd_quota_bitmap[c][o]; 209 if (byte != 0xFF) 210 goto found; 211 } 212 213 goto fail; 214 215 found: 216 for (b = 0; b < 8; b++) 217 if (!(byte & (1 << b))) 218 break; 219 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b; 220 221 if (qd->qd_slot >= sdp->sd_quota_slots) 222 goto fail; 223 224 sdp->sd_quota_bitmap[c][o] |= 1 << b; 225 226 spin_unlock(&sdp->sd_quota_spin); 227 228 return 0; 229 230 fail: 231 qd->qd_slot_count--; 232 spin_unlock(&sdp->sd_quota_spin); 233 return -ENOSPC; 234 } 235 236 static void slot_hold(struct gfs2_quota_data *qd) 237 { 238 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 239 240 spin_lock(&sdp->sd_quota_spin); 241 gfs2_assert(sdp, qd->qd_slot_count); 242 qd->qd_slot_count++; 243 spin_unlock(&sdp->sd_quota_spin); 244 } 245 246 static void slot_put(struct gfs2_quota_data *qd) 247 { 248 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 249 250 spin_lock(&sdp->sd_quota_spin); 251 gfs2_assert(sdp, qd->qd_slot_count); 252 if (!--qd->qd_slot_count) { 253 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0); 254 qd->qd_slot = -1; 255 } 256 spin_unlock(&sdp->sd_quota_spin); 257 } 258 259 static int bh_get(struct gfs2_quota_data *qd) 260 { 261 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 262 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 263 unsigned int block, offset; 264 struct buffer_head *bh; 265 int error; 266 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; 267 268 mutex_lock(&sdp->sd_quota_mutex); 269 270 if (qd->qd_bh_count++) { 271 mutex_unlock(&sdp->sd_quota_mutex); 272 return 0; 273 } 274 275 block = qd->qd_slot / sdp->sd_qc_per_block; 276 offset = qd->qd_slot % sdp->sd_qc_per_block; 277 278 bh_map.b_size = 1 << ip->i_inode.i_blkbits; 279 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0); 280 if (error) 281 goto fail; 282 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh); 283 if (error) 284 goto fail; 285 error = -EIO; 286 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) 287 goto fail_brelse; 288 289 qd->qd_bh = bh; 290 qd->qd_bh_qc = (struct gfs2_quota_change *) 291 (bh->b_data + sizeof(struct gfs2_meta_header) + 292 offset * sizeof(struct gfs2_quota_change)); 293 294 mutex_unlock(&sdp->sd_quota_mutex); 295 296 return 0; 297 298 fail_brelse: 299 brelse(bh); 300 fail: 301 qd->qd_bh_count--; 302 mutex_unlock(&sdp->sd_quota_mutex); 303 return error; 304 } 305 306 static void bh_put(struct gfs2_quota_data *qd) 307 { 308 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 309 310 mutex_lock(&sdp->sd_quota_mutex); 311 gfs2_assert(sdp, qd->qd_bh_count); 312 if (!--qd->qd_bh_count) { 313 brelse(qd->qd_bh); 314 qd->qd_bh = NULL; 315 qd->qd_bh_qc = NULL; 316 } 317 mutex_unlock(&sdp->sd_quota_mutex); 318 } 319 320 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) 321 { 322 struct gfs2_quota_data *qd = NULL; 323 int error; 324 int found = 0; 325 326 *qdp = NULL; 327 328 if (sdp->sd_vfs->s_flags & MS_RDONLY) 329 return 0; 330 331 spin_lock(&sdp->sd_quota_spin); 332 333 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 334 if (test_bit(QDF_LOCKED, &qd->qd_flags) || 335 !test_bit(QDF_CHANGE, &qd->qd_flags) || 336 qd->qd_sync_gen >= sdp->sd_quota_sync_gen) 337 continue; 338 339 list_move_tail(&qd->qd_list, &sdp->sd_quota_list); 340 341 set_bit(QDF_LOCKED, &qd->qd_flags); 342 gfs2_assert_warn(sdp, qd->qd_count); 343 qd->qd_count++; 344 qd->qd_change_sync = qd->qd_change; 345 gfs2_assert_warn(sdp, qd->qd_slot_count); 346 qd->qd_slot_count++; 347 found = 1; 348 349 break; 350 } 351 352 if (!found) 353 qd = NULL; 354 355 spin_unlock(&sdp->sd_quota_spin); 356 357 if (qd) { 358 gfs2_assert_warn(sdp, qd->qd_change_sync); 359 error = bh_get(qd); 360 if (error) { 361 clear_bit(QDF_LOCKED, &qd->qd_flags); 362 slot_put(qd); 363 qd_put(qd); 364 return error; 365 } 366 } 367 368 *qdp = qd; 369 370 return 0; 371 } 372 373 static int qd_trylock(struct gfs2_quota_data *qd) 374 { 375 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 376 377 if (sdp->sd_vfs->s_flags & MS_RDONLY) 378 return 0; 379 380 spin_lock(&sdp->sd_quota_spin); 381 382 if (test_bit(QDF_LOCKED, &qd->qd_flags) || 383 !test_bit(QDF_CHANGE, &qd->qd_flags)) { 384 spin_unlock(&sdp->sd_quota_spin); 385 return 0; 386 } 387 388 list_move_tail(&qd->qd_list, &sdp->sd_quota_list); 389 390 set_bit(QDF_LOCKED, &qd->qd_flags); 391 gfs2_assert_warn(sdp, qd->qd_count); 392 qd->qd_count++; 393 qd->qd_change_sync = qd->qd_change; 394 gfs2_assert_warn(sdp, qd->qd_slot_count); 395 qd->qd_slot_count++; 396 397 spin_unlock(&sdp->sd_quota_spin); 398 399 gfs2_assert_warn(sdp, qd->qd_change_sync); 400 if (bh_get(qd)) { 401 clear_bit(QDF_LOCKED, &qd->qd_flags); 402 slot_put(qd); 403 qd_put(qd); 404 return 0; 405 } 406 407 return 1; 408 } 409 410 static void qd_unlock(struct gfs2_quota_data *qd) 411 { 412 gfs2_assert_warn(qd->qd_gl->gl_sbd, 413 test_bit(QDF_LOCKED, &qd->qd_flags)); 414 clear_bit(QDF_LOCKED, &qd->qd_flags); 415 bh_put(qd); 416 slot_put(qd); 417 qd_put(qd); 418 } 419 420 static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create, 421 struct gfs2_quota_data **qdp) 422 { 423 int error; 424 425 error = qd_get(sdp, user, id, create, qdp); 426 if (error) 427 return error; 428 429 error = slot_get(*qdp); 430 if (error) 431 goto fail; 432 433 error = bh_get(*qdp); 434 if (error) 435 goto fail_slot; 436 437 return 0; 438 439 fail_slot: 440 slot_put(*qdp); 441 fail: 442 qd_put(*qdp); 443 return error; 444 } 445 446 static void qdsb_put(struct gfs2_quota_data *qd) 447 { 448 bh_put(qd); 449 slot_put(qd); 450 qd_put(qd); 451 } 452 453 int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid) 454 { 455 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 456 struct gfs2_alloc *al = ip->i_alloc; 457 struct gfs2_quota_data **qd = al->al_qd; 458 int error; 459 460 if (gfs2_assert_warn(sdp, !al->al_qd_num) || 461 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) 462 return -EIO; 463 464 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 465 return 0; 466 467 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd); 468 if (error) 469 goto out; 470 al->al_qd_num++; 471 qd++; 472 473 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd); 474 if (error) 475 goto out; 476 al->al_qd_num++; 477 qd++; 478 479 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) { 480 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd); 481 if (error) 482 goto out; 483 al->al_qd_num++; 484 qd++; 485 } 486 487 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) { 488 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd); 489 if (error) 490 goto out; 491 al->al_qd_num++; 492 qd++; 493 } 494 495 out: 496 if (error) 497 gfs2_quota_unhold(ip); 498 return error; 499 } 500 501 void gfs2_quota_unhold(struct gfs2_inode *ip) 502 { 503 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 504 struct gfs2_alloc *al = ip->i_alloc; 505 unsigned int x; 506 507 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); 508 509 for (x = 0; x < al->al_qd_num; x++) { 510 qdsb_put(al->al_qd[x]); 511 al->al_qd[x] = NULL; 512 } 513 al->al_qd_num = 0; 514 } 515 516 static int sort_qd(const void *a, const void *b) 517 { 518 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a; 519 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b; 520 521 if (!test_bit(QDF_USER, &qd_a->qd_flags) != 522 !test_bit(QDF_USER, &qd_b->qd_flags)) { 523 if (test_bit(QDF_USER, &qd_a->qd_flags)) 524 return -1; 525 else 526 return 1; 527 } 528 if (qd_a->qd_id < qd_b->qd_id) 529 return -1; 530 if (qd_a->qd_id > qd_b->qd_id) 531 return 1; 532 533 return 0; 534 } 535 536 static void do_qc(struct gfs2_quota_data *qd, s64 change) 537 { 538 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 539 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 540 struct gfs2_quota_change *qc = qd->qd_bh_qc; 541 s64 x; 542 543 mutex_lock(&sdp->sd_quota_mutex); 544 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1); 545 546 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) { 547 qc->qc_change = 0; 548 qc->qc_flags = 0; 549 if (test_bit(QDF_USER, &qd->qd_flags)) 550 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); 551 qc->qc_id = cpu_to_be32(qd->qd_id); 552 } 553 554 x = be64_to_cpu(qc->qc_change) + change; 555 qc->qc_change = cpu_to_be64(x); 556 557 spin_lock(&sdp->sd_quota_spin); 558 qd->qd_change = x; 559 spin_unlock(&sdp->sd_quota_spin); 560 561 if (!x) { 562 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); 563 clear_bit(QDF_CHANGE, &qd->qd_flags); 564 qc->qc_flags = 0; 565 qc->qc_id = 0; 566 slot_put(qd); 567 qd_put(qd); 568 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) { 569 qd_hold(qd); 570 slot_hold(qd); 571 } 572 573 mutex_unlock(&sdp->sd_quota_mutex); 574 } 575 576 static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf) 577 { 578 const struct gfs2_quota *str = buf; 579 580 qu->qu_limit = be64_to_cpu(str->qu_limit); 581 qu->qu_warn = be64_to_cpu(str->qu_warn); 582 qu->qu_value = be64_to_cpu(str->qu_value); 583 qu->qu_ll_next = be32_to_cpu(str->qu_ll_next); 584 } 585 586 static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf) 587 { 588 struct gfs2_quota *str = buf; 589 590 str->qu_limit = cpu_to_be64(qu->qu_limit); 591 str->qu_warn = cpu_to_be64(qu->qu_warn); 592 str->qu_value = cpu_to_be64(qu->qu_value); 593 str->qu_ll_next = cpu_to_be32(qu->qu_ll_next); 594 memset(&str->qu_reserved, 0, sizeof(str->qu_reserved)); 595 } 596 597 /** 598 * gfs2_adjust_quota 599 * 600 * This function was mostly borrowed from gfs2_block_truncate_page which was 601 * in turn mostly borrowed from ext3 602 */ 603 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 604 s64 change, struct gfs2_quota_data *qd) 605 { 606 struct inode *inode = &ip->i_inode; 607 struct address_space *mapping = inode->i_mapping; 608 unsigned long index = loc >> PAGE_CACHE_SHIFT; 609 unsigned offset = loc & (PAGE_CACHE_SIZE - 1); 610 unsigned blocksize, iblock, pos; 611 struct buffer_head *bh; 612 struct page *page; 613 void *kaddr; 614 char *ptr; 615 struct gfs2_quota_host qp; 616 s64 value; 617 int err = -EIO; 618 619 if (gfs2_is_stuffed(ip)) { 620 struct gfs2_alloc *al = NULL; 621 al = gfs2_alloc_get(ip); 622 /* just request 1 blk */ 623 al->al_requested = 1; 624 gfs2_inplace_reserve(ip); 625 gfs2_unstuff_dinode(ip, NULL); 626 gfs2_inplace_release(ip); 627 gfs2_alloc_put(ip); 628 } 629 page = grab_cache_page(mapping, index); 630 if (!page) 631 return -ENOMEM; 632 633 blocksize = inode->i_sb->s_blocksize; 634 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 635 636 if (!page_has_buffers(page)) 637 create_empty_buffers(page, blocksize, 0); 638 639 bh = page_buffers(page); 640 pos = blocksize; 641 while (offset >= pos) { 642 bh = bh->b_this_page; 643 iblock++; 644 pos += blocksize; 645 } 646 647 if (!buffer_mapped(bh)) { 648 gfs2_block_map(inode, iblock, bh, 1); 649 if (!buffer_mapped(bh)) 650 goto unlock; 651 } 652 653 if (PageUptodate(page)) 654 set_buffer_uptodate(bh); 655 656 if (!buffer_uptodate(bh)) { 657 ll_rw_block(READ_META, 1, &bh); 658 wait_on_buffer(bh); 659 if (!buffer_uptodate(bh)) 660 goto unlock; 661 } 662 663 gfs2_trans_add_bh(ip->i_gl, bh, 0); 664 665 kaddr = kmap_atomic(page, KM_USER0); 666 ptr = kaddr + offset; 667 gfs2_quota_in(&qp, ptr); 668 qp.qu_value += change; 669 value = qp.qu_value; 670 gfs2_quota_out(&qp, ptr); 671 flush_dcache_page(page); 672 kunmap_atomic(kaddr, KM_USER0); 673 err = 0; 674 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC); 675 qd->qd_qb.qb_value = cpu_to_be64(value); 676 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC); 677 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value); 678 unlock: 679 unlock_page(page); 680 page_cache_release(page); 681 return err; 682 } 683 684 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) 685 { 686 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd; 687 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 688 unsigned int data_blocks, ind_blocks; 689 struct gfs2_holder *ghs, i_gh; 690 unsigned int qx, x; 691 struct gfs2_quota_data *qd; 692 loff_t offset; 693 unsigned int nalloc = 0; 694 struct gfs2_alloc *al = NULL; 695 int error; 696 697 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 698 &data_blocks, &ind_blocks); 699 700 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL); 701 if (!ghs) 702 return -ENOMEM; 703 704 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); 705 for (qx = 0; qx < num_qd; qx++) { 706 error = gfs2_glock_nq_init(qda[qx]->qd_gl, 707 LM_ST_EXCLUSIVE, 708 GL_NOCACHE, &ghs[qx]); 709 if (error) 710 goto out; 711 } 712 713 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 714 if (error) 715 goto out; 716 717 for (x = 0; x < num_qd; x++) { 718 int alloc_required; 719 720 offset = qd2offset(qda[x]); 721 error = gfs2_write_alloc_required(ip, offset, 722 sizeof(struct gfs2_quota), 723 &alloc_required); 724 if (error) 725 goto out_gunlock; 726 if (alloc_required) 727 nalloc++; 728 } 729 730 if (nalloc) { 731 al = gfs2_alloc_get(ip); 732 733 al->al_requested = nalloc * (data_blocks + ind_blocks); 734 735 error = gfs2_inplace_reserve(ip); 736 if (error) 737 goto out_alloc; 738 739 error = gfs2_trans_begin(sdp, 740 al->al_rgd->rd_length + 741 num_qd * data_blocks + 742 nalloc * ind_blocks + 743 RES_DINODE + num_qd + 744 RES_STATFS, 0); 745 if (error) 746 goto out_ipres; 747 } else { 748 error = gfs2_trans_begin(sdp, 749 num_qd * data_blocks + 750 RES_DINODE + num_qd, 0); 751 if (error) 752 goto out_gunlock; 753 } 754 755 for (x = 0; x < num_qd; x++) { 756 qd = qda[x]; 757 offset = qd2offset(qd); 758 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, 759 (struct gfs2_quota_data *) 760 qd); 761 if (error) 762 goto out_end_trans; 763 764 do_qc(qd, -qd->qd_change_sync); 765 } 766 767 error = 0; 768 769 out_end_trans: 770 gfs2_trans_end(sdp); 771 out_ipres: 772 if (nalloc) 773 gfs2_inplace_release(ip); 774 out_alloc: 775 if (nalloc) 776 gfs2_alloc_put(ip); 777 out_gunlock: 778 gfs2_glock_dq_uninit(&i_gh); 779 out: 780 while (qx--) 781 gfs2_glock_dq_uninit(&ghs[qx]); 782 kfree(ghs); 783 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); 784 return error; 785 } 786 787 static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 788 struct gfs2_holder *q_gh) 789 { 790 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 791 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 792 struct gfs2_holder i_gh; 793 struct gfs2_quota_host q; 794 char buf[sizeof(struct gfs2_quota)]; 795 int error; 796 struct gfs2_quota_lvb *qlvb; 797 798 restart: 799 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); 800 if (error) 801 return error; 802 803 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 804 805 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { 806 loff_t pos; 807 gfs2_glock_dq_uninit(q_gh); 808 error = gfs2_glock_nq_init(qd->qd_gl, 809 LM_ST_EXCLUSIVE, GL_NOCACHE, 810 q_gh); 811 if (error) 812 return error; 813 814 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); 815 if (error) 816 goto fail; 817 818 memset(buf, 0, sizeof(struct gfs2_quota)); 819 pos = qd2offset(qd); 820 error = gfs2_internal_read(ip, NULL, buf, &pos, 821 sizeof(struct gfs2_quota)); 822 if (error < 0) 823 goto fail_gunlock; 824 825 gfs2_glock_dq_uninit(&i_gh); 826 827 828 gfs2_quota_in(&q, buf); 829 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 830 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); 831 qlvb->__pad = 0; 832 qlvb->qb_limit = cpu_to_be64(q.qu_limit); 833 qlvb->qb_warn = cpu_to_be64(q.qu_warn); 834 qlvb->qb_value = cpu_to_be64(q.qu_value); 835 qd->qd_qb = *qlvb; 836 837 if (gfs2_glock_is_blocking(qd->qd_gl)) { 838 gfs2_glock_dq_uninit(q_gh); 839 force_refresh = 0; 840 goto restart; 841 } 842 } 843 844 return 0; 845 846 fail_gunlock: 847 gfs2_glock_dq_uninit(&i_gh); 848 fail: 849 gfs2_glock_dq_uninit(q_gh); 850 return error; 851 } 852 853 int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid) 854 { 855 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 856 struct gfs2_alloc *al = ip->i_alloc; 857 unsigned int x; 858 int error = 0; 859 860 gfs2_quota_hold(ip, uid, gid); 861 862 if (capable(CAP_SYS_RESOURCE) || 863 sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 864 return 0; 865 866 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *), 867 sort_qd, NULL); 868 869 for (x = 0; x < al->al_qd_num; x++) { 870 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]); 871 if (error) 872 break; 873 } 874 875 if (!error) 876 set_bit(GIF_QD_LOCKED, &ip->i_flags); 877 else { 878 while (x--) 879 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]); 880 gfs2_quota_unhold(ip); 881 } 882 883 return error; 884 } 885 886 static int need_sync(struct gfs2_quota_data *qd) 887 { 888 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 889 struct gfs2_tune *gt = &sdp->sd_tune; 890 s64 value; 891 unsigned int num, den; 892 int do_sync = 1; 893 894 if (!qd->qd_qb.qb_limit) 895 return 0; 896 897 spin_lock(&sdp->sd_quota_spin); 898 value = qd->qd_change; 899 spin_unlock(&sdp->sd_quota_spin); 900 901 spin_lock(>->gt_spin); 902 num = gt->gt_quota_scale_num; 903 den = gt->gt_quota_scale_den; 904 spin_unlock(>->gt_spin); 905 906 if (value < 0) 907 do_sync = 0; 908 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= 909 (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 910 do_sync = 0; 911 else { 912 value *= gfs2_jindex_size(sdp) * num; 913 do_div(value, den); 914 value += (s64)be64_to_cpu(qd->qd_qb.qb_value); 915 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 916 do_sync = 0; 917 } 918 919 return do_sync; 920 } 921 922 void gfs2_quota_unlock(struct gfs2_inode *ip) 923 { 924 struct gfs2_alloc *al = ip->i_alloc; 925 struct gfs2_quota_data *qda[4]; 926 unsigned int count = 0; 927 unsigned int x; 928 929 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) 930 goto out; 931 932 for (x = 0; x < al->al_qd_num; x++) { 933 struct gfs2_quota_data *qd; 934 int sync; 935 936 qd = al->al_qd[x]; 937 sync = need_sync(qd); 938 939 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]); 940 941 if (sync && qd_trylock(qd)) 942 qda[count++] = qd; 943 } 944 945 if (count) { 946 do_sync(count, qda); 947 for (x = 0; x < count; x++) 948 qd_unlock(qda[x]); 949 } 950 951 out: 952 gfs2_quota_unhold(ip); 953 } 954 955 #define MAX_LINE 256 956 957 static int print_message(struct gfs2_quota_data *qd, char *type) 958 { 959 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 960 961 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n", 962 sdp->sd_fsname, type, 963 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group", 964 qd->qd_id); 965 966 return 0; 967 } 968 969 int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid) 970 { 971 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 972 struct gfs2_alloc *al = ip->i_alloc; 973 struct gfs2_quota_data *qd; 974 s64 value; 975 unsigned int x; 976 int error = 0; 977 978 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) 979 return 0; 980 981 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 982 return 0; 983 984 for (x = 0; x < al->al_qd_num; x++) { 985 qd = al->al_qd[x]; 986 987 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) || 988 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags)))) 989 continue; 990 991 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); 992 spin_lock(&sdp->sd_quota_spin); 993 value += qd->qd_change; 994 spin_unlock(&sdp->sd_quota_spin); 995 996 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { 997 print_message(qd, "exceeded"); 998 error = -EDQUOT; 999 break; 1000 } else if (be64_to_cpu(qd->qd_qb.qb_warn) && 1001 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value && 1002 time_after_eq(jiffies, qd->qd_last_warn + 1003 gfs2_tune_get(sdp, 1004 gt_quota_warn_period) * HZ)) { 1005 error = print_message(qd, "warning"); 1006 qd->qd_last_warn = jiffies; 1007 } 1008 } 1009 1010 return error; 1011 } 1012 1013 void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 1014 u32 uid, u32 gid) 1015 { 1016 struct gfs2_alloc *al = ip->i_alloc; 1017 struct gfs2_quota_data *qd; 1018 unsigned int x; 1019 1020 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change)) 1021 return; 1022 if (ip->i_di.di_flags & GFS2_DIF_SYSTEM) 1023 return; 1024 1025 for (x = 0; x < al->al_qd_num; x++) { 1026 qd = al->al_qd[x]; 1027 1028 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) || 1029 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) { 1030 do_qc(qd, change); 1031 } 1032 } 1033 } 1034 1035 int gfs2_quota_sync(struct gfs2_sbd *sdp) 1036 { 1037 struct gfs2_quota_data **qda; 1038 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync); 1039 unsigned int num_qd; 1040 unsigned int x; 1041 int error = 0; 1042 1043 sdp->sd_quota_sync_gen++; 1044 1045 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL); 1046 if (!qda) 1047 return -ENOMEM; 1048 1049 do { 1050 num_qd = 0; 1051 1052 for (;;) { 1053 error = qd_fish(sdp, qda + num_qd); 1054 if (error || !qda[num_qd]) 1055 break; 1056 if (++num_qd == max_qd) 1057 break; 1058 } 1059 1060 if (num_qd) { 1061 if (!error) 1062 error = do_sync(num_qd, qda); 1063 if (!error) 1064 for (x = 0; x < num_qd; x++) 1065 qda[x]->qd_sync_gen = 1066 sdp->sd_quota_sync_gen; 1067 1068 for (x = 0; x < num_qd; x++) 1069 qd_unlock(qda[x]); 1070 } 1071 } while (!error && num_qd == max_qd); 1072 1073 kfree(qda); 1074 1075 return error; 1076 } 1077 1078 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) 1079 { 1080 struct gfs2_quota_data *qd; 1081 struct gfs2_holder q_gh; 1082 int error; 1083 1084 error = qd_get(sdp, user, id, CREATE, &qd); 1085 if (error) 1086 return error; 1087 1088 error = do_glock(qd, FORCE, &q_gh); 1089 if (!error) 1090 gfs2_glock_dq_uninit(&q_gh); 1091 1092 qd_put(qd); 1093 1094 return error; 1095 } 1096 1097 static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf) 1098 { 1099 const struct gfs2_quota_change *str = buf; 1100 1101 qc->qc_change = be64_to_cpu(str->qc_change); 1102 qc->qc_flags = be32_to_cpu(str->qc_flags); 1103 qc->qc_id = be32_to_cpu(str->qc_id); 1104 } 1105 1106 int gfs2_quota_init(struct gfs2_sbd *sdp) 1107 { 1108 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 1109 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift; 1110 unsigned int x, slot = 0; 1111 unsigned int found = 0; 1112 u64 dblock; 1113 u32 extlen = 0; 1114 int error; 1115 1116 if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) || 1117 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) { 1118 gfs2_consist_inode(ip); 1119 return -EIO; 1120 } 1121 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; 1122 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE); 1123 1124 error = -ENOMEM; 1125 1126 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks, 1127 sizeof(unsigned char *), GFP_KERNEL); 1128 if (!sdp->sd_quota_bitmap) 1129 return error; 1130 1131 for (x = 0; x < sdp->sd_quota_chunks; x++) { 1132 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL); 1133 if (!sdp->sd_quota_bitmap[x]) 1134 goto fail; 1135 } 1136 1137 for (x = 0; x < blocks; x++) { 1138 struct buffer_head *bh; 1139 unsigned int y; 1140 1141 if (!extlen) { 1142 int new = 0; 1143 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen); 1144 if (error) 1145 goto fail; 1146 } 1147 error = -EIO; 1148 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); 1149 if (!bh) 1150 goto fail; 1151 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) { 1152 brelse(bh); 1153 goto fail; 1154 } 1155 1156 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; 1157 y++, slot++) { 1158 struct gfs2_quota_change_host qc; 1159 struct gfs2_quota_data *qd; 1160 1161 gfs2_quota_change_in(&qc, bh->b_data + 1162 sizeof(struct gfs2_meta_header) + 1163 y * sizeof(struct gfs2_quota_change)); 1164 if (!qc.qc_change) 1165 continue; 1166 1167 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER), 1168 qc.qc_id, &qd); 1169 if (error) { 1170 brelse(bh); 1171 goto fail; 1172 } 1173 1174 set_bit(QDF_CHANGE, &qd->qd_flags); 1175 qd->qd_change = qc.qc_change; 1176 qd->qd_slot = slot; 1177 qd->qd_slot_count = 1; 1178 qd->qd_last_touched = jiffies; 1179 1180 spin_lock(&sdp->sd_quota_spin); 1181 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); 1182 list_add(&qd->qd_list, &sdp->sd_quota_list); 1183 atomic_inc(&sdp->sd_quota_count); 1184 spin_unlock(&sdp->sd_quota_spin); 1185 1186 found++; 1187 } 1188 1189 brelse(bh); 1190 dblock++; 1191 extlen--; 1192 } 1193 1194 if (found) 1195 fs_info(sdp, "found %u quota changes\n", found); 1196 1197 return 0; 1198 1199 fail: 1200 gfs2_quota_cleanup(sdp); 1201 return error; 1202 } 1203 1204 void gfs2_quota_scan(struct gfs2_sbd *sdp) 1205 { 1206 struct gfs2_quota_data *qd, *safe; 1207 LIST_HEAD(dead); 1208 1209 spin_lock(&sdp->sd_quota_spin); 1210 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) { 1211 if (!qd->qd_count && 1212 time_after_eq(jiffies, qd->qd_last_touched + 1213 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) { 1214 list_move(&qd->qd_list, &dead); 1215 gfs2_assert_warn(sdp, 1216 atomic_read(&sdp->sd_quota_count) > 0); 1217 atomic_dec(&sdp->sd_quota_count); 1218 } 1219 } 1220 spin_unlock(&sdp->sd_quota_spin); 1221 1222 while (!list_empty(&dead)) { 1223 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list); 1224 list_del(&qd->qd_list); 1225 1226 gfs2_assert_warn(sdp, !qd->qd_change); 1227 gfs2_assert_warn(sdp, !qd->qd_slot_count); 1228 gfs2_assert_warn(sdp, !qd->qd_bh_count); 1229 1230 gfs2_lvb_unhold(qd->qd_gl); 1231 kfree(qd); 1232 } 1233 } 1234 1235 void gfs2_quota_cleanup(struct gfs2_sbd *sdp) 1236 { 1237 struct list_head *head = &sdp->sd_quota_list; 1238 struct gfs2_quota_data *qd; 1239 unsigned int x; 1240 1241 spin_lock(&sdp->sd_quota_spin); 1242 while (!list_empty(head)) { 1243 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); 1244 1245 if (qd->qd_count > 1 || 1246 (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) { 1247 list_move(&qd->qd_list, head); 1248 spin_unlock(&sdp->sd_quota_spin); 1249 schedule(); 1250 spin_lock(&sdp->sd_quota_spin); 1251 continue; 1252 } 1253 1254 list_del(&qd->qd_list); 1255 atomic_dec(&sdp->sd_quota_count); 1256 spin_unlock(&sdp->sd_quota_spin); 1257 1258 if (!qd->qd_count) { 1259 gfs2_assert_warn(sdp, !qd->qd_change); 1260 gfs2_assert_warn(sdp, !qd->qd_slot_count); 1261 } else 1262 gfs2_assert_warn(sdp, qd->qd_slot_count == 1); 1263 gfs2_assert_warn(sdp, !qd->qd_bh_count); 1264 1265 gfs2_lvb_unhold(qd->qd_gl); 1266 kfree(qd); 1267 1268 spin_lock(&sdp->sd_quota_spin); 1269 } 1270 spin_unlock(&sdp->sd_quota_spin); 1271 1272 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); 1273 1274 if (sdp->sd_quota_bitmap) { 1275 for (x = 0; x < sdp->sd_quota_chunks; x++) 1276 kfree(sdp->sd_quota_bitmap[x]); 1277 kfree(sdp->sd_quota_bitmap); 1278 } 1279 } 1280 1281