1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 /* 11 * Quota change tags are associated with each transaction that allocates or 12 * deallocates space. Those changes are accumulated locally to each node (in a 13 * per-node file) and then are periodically synced to the quota file. This 14 * avoids the bottleneck of constantly touching the quota file, but introduces 15 * fuzziness in the current usage value of IDs that are being used on different 16 * nodes in the cluster simultaneously. So, it is possible for a user on 17 * multiple nodes to overrun their quota, but that overrun is controlable. 18 * Since quota tags are part of transactions, there is no need for a quota check 19 * program to be run on node crashes or anything like that. 20 * 21 * There are couple of knobs that let the administrator manage the quota 22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be 23 * sitting on one node before being synced to the quota file. (The default is 24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency 25 * of quota file syncs increases as the user moves closer to their limit. The 26 * more frequent the syncs, the more accurate the quota enforcement, but that 27 * means that there is more contention between the nodes for the quota file. 28 * The default value is one. This sets the maximum theoretical quota overrun 29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In 30 * practice, the maximum overrun you see should be much less.) A "quota_scale" 31 * number greater than one makes quota syncs more frequent and reduces the 32 * maximum overrun. Numbers less than one (but greater than zero) make quota 33 * syncs less frequent. 34 * 35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of 36 * the quota file, so it is not being constantly read. 37 */ 38 39 #include <linux/sched.h> 40 #include <linux/slab.h> 41 #include <linux/spinlock.h> 42 #include <linux/completion.h> 43 #include <linux/buffer_head.h> 44 #include <linux/sort.h> 45 #include <linux/fs.h> 46 #include <linux/bio.h> 47 #include <linux/gfs2_ondisk.h> 48 #include <linux/kthread.h> 49 #include <linux/freezer.h> 50 #include <linux/quota.h> 51 #include <linux/dqblk_xfs.h> 52 53 #include "gfs2.h" 54 #include "incore.h" 55 #include "bmap.h" 56 #include "glock.h" 57 #include "glops.h" 58 #include "log.h" 59 #include "meta_io.h" 60 #include "quota.h" 61 #include "rgrp.h" 62 #include "super.h" 63 #include "trans.h" 64 #include "inode.h" 65 #include "util.h" 66 67 #define QUOTA_USER 1 68 #define QUOTA_GROUP 0 69 70 struct gfs2_quota_change_host { 71 u64 qc_change; 72 u32 qc_flags; /* GFS2_QCF_... */ 73 u32 qc_id; 74 }; 75 76 static LIST_HEAD(qd_lru_list); 77 static atomic_t qd_lru_count = ATOMIC_INIT(0); 78 static DEFINE_SPINLOCK(qd_lru_lock); 79 80 int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask) 81 { 82 struct gfs2_quota_data *qd; 83 struct gfs2_sbd *sdp; 84 85 if (nr == 0) 86 goto out; 87 88 if (!(gfp_mask & __GFP_FS)) 89 return -1; 90 91 spin_lock(&qd_lru_lock); 92 while (nr && !list_empty(&qd_lru_list)) { 93 qd = list_entry(qd_lru_list.next, 94 struct gfs2_quota_data, qd_reclaim); 95 sdp = qd->qd_gl->gl_sbd; 96 97 /* Free from the filesystem-specific list */ 98 list_del(&qd->qd_list); 99 100 gfs2_assert_warn(sdp, !qd->qd_change); 101 gfs2_assert_warn(sdp, !qd->qd_slot_count); 102 gfs2_assert_warn(sdp, !qd->qd_bh_count); 103 104 gfs2_glock_put(qd->qd_gl); 105 atomic_dec(&sdp->sd_quota_count); 106 107 /* Delete it from the common reclaim list */ 108 list_del_init(&qd->qd_reclaim); 109 atomic_dec(&qd_lru_count); 110 spin_unlock(&qd_lru_lock); 111 kmem_cache_free(gfs2_quotad_cachep, qd); 112 spin_lock(&qd_lru_lock); 113 nr--; 114 } 115 spin_unlock(&qd_lru_lock); 116 117 out: 118 return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100; 119 } 120 121 static u64 qd2offset(struct gfs2_quota_data *qd) 122 { 123 u64 offset; 124 125 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags); 126 offset *= sizeof(struct gfs2_quota); 127 128 return offset; 129 } 130 131 static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id, 132 struct gfs2_quota_data **qdp) 133 { 134 struct gfs2_quota_data *qd; 135 int error; 136 137 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS); 138 if (!qd) 139 return -ENOMEM; 140 141 atomic_set(&qd->qd_count, 1); 142 qd->qd_id = id; 143 if (user) 144 set_bit(QDF_USER, &qd->qd_flags); 145 qd->qd_slot = -1; 146 INIT_LIST_HEAD(&qd->qd_reclaim); 147 148 error = gfs2_glock_get(sdp, 2 * (u64)id + !user, 149 &gfs2_quota_glops, CREATE, &qd->qd_gl); 150 if (error) 151 goto fail; 152 153 *qdp = qd; 154 155 return 0; 156 157 fail: 158 kmem_cache_free(gfs2_quotad_cachep, qd); 159 return error; 160 } 161 162 static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, 163 struct gfs2_quota_data **qdp) 164 { 165 struct gfs2_quota_data *qd = NULL, *new_qd = NULL; 166 int error, found; 167 168 *qdp = NULL; 169 170 for (;;) { 171 found = 0; 172 spin_lock(&qd_lru_lock); 173 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 174 if (qd->qd_id == id && 175 !test_bit(QDF_USER, &qd->qd_flags) == !user) { 176 if (!atomic_read(&qd->qd_count) && 177 !list_empty(&qd->qd_reclaim)) { 178 /* Remove it from reclaim list */ 179 list_del_init(&qd->qd_reclaim); 180 atomic_dec(&qd_lru_count); 181 } 182 atomic_inc(&qd->qd_count); 183 found = 1; 184 break; 185 } 186 } 187 188 if (!found) 189 qd = NULL; 190 191 if (!qd && new_qd) { 192 qd = new_qd; 193 list_add(&qd->qd_list, &sdp->sd_quota_list); 194 atomic_inc(&sdp->sd_quota_count); 195 new_qd = NULL; 196 } 197 198 spin_unlock(&qd_lru_lock); 199 200 if (qd) { 201 if (new_qd) { 202 gfs2_glock_put(new_qd->qd_gl); 203 kmem_cache_free(gfs2_quotad_cachep, new_qd); 204 } 205 *qdp = qd; 206 return 0; 207 } 208 209 error = qd_alloc(sdp, user, id, &new_qd); 210 if (error) 211 return error; 212 } 213 } 214 215 static void qd_hold(struct gfs2_quota_data *qd) 216 { 217 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 218 gfs2_assert(sdp, atomic_read(&qd->qd_count)); 219 atomic_inc(&qd->qd_count); 220 } 221 222 static void qd_put(struct gfs2_quota_data *qd) 223 { 224 if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) { 225 /* Add to the reclaim list */ 226 list_add_tail(&qd->qd_reclaim, &qd_lru_list); 227 atomic_inc(&qd_lru_count); 228 spin_unlock(&qd_lru_lock); 229 } 230 } 231 232 static int slot_get(struct gfs2_quota_data *qd) 233 { 234 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 235 unsigned int c, o = 0, b; 236 unsigned char byte = 0; 237 238 spin_lock(&qd_lru_lock); 239 240 if (qd->qd_slot_count++) { 241 spin_unlock(&qd_lru_lock); 242 return 0; 243 } 244 245 for (c = 0; c < sdp->sd_quota_chunks; c++) 246 for (o = 0; o < PAGE_SIZE; o++) { 247 byte = sdp->sd_quota_bitmap[c][o]; 248 if (byte != 0xFF) 249 goto found; 250 } 251 252 goto fail; 253 254 found: 255 for (b = 0; b < 8; b++) 256 if (!(byte & (1 << b))) 257 break; 258 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b; 259 260 if (qd->qd_slot >= sdp->sd_quota_slots) 261 goto fail; 262 263 sdp->sd_quota_bitmap[c][o] |= 1 << b; 264 265 spin_unlock(&qd_lru_lock); 266 267 return 0; 268 269 fail: 270 qd->qd_slot_count--; 271 spin_unlock(&qd_lru_lock); 272 return -ENOSPC; 273 } 274 275 static void slot_hold(struct gfs2_quota_data *qd) 276 { 277 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 278 279 spin_lock(&qd_lru_lock); 280 gfs2_assert(sdp, qd->qd_slot_count); 281 qd->qd_slot_count++; 282 spin_unlock(&qd_lru_lock); 283 } 284 285 static void slot_put(struct gfs2_quota_data *qd) 286 { 287 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 288 289 spin_lock(&qd_lru_lock); 290 gfs2_assert(sdp, qd->qd_slot_count); 291 if (!--qd->qd_slot_count) { 292 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0); 293 qd->qd_slot = -1; 294 } 295 spin_unlock(&qd_lru_lock); 296 } 297 298 static int bh_get(struct gfs2_quota_data *qd) 299 { 300 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 301 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 302 unsigned int block, offset; 303 struct buffer_head *bh; 304 int error; 305 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; 306 307 mutex_lock(&sdp->sd_quota_mutex); 308 309 if (qd->qd_bh_count++) { 310 mutex_unlock(&sdp->sd_quota_mutex); 311 return 0; 312 } 313 314 block = qd->qd_slot / sdp->sd_qc_per_block; 315 offset = qd->qd_slot % sdp->sd_qc_per_block; 316 317 bh_map.b_size = 1 << ip->i_inode.i_blkbits; 318 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0); 319 if (error) 320 goto fail; 321 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh); 322 if (error) 323 goto fail; 324 error = -EIO; 325 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) 326 goto fail_brelse; 327 328 qd->qd_bh = bh; 329 qd->qd_bh_qc = (struct gfs2_quota_change *) 330 (bh->b_data + sizeof(struct gfs2_meta_header) + 331 offset * sizeof(struct gfs2_quota_change)); 332 333 mutex_unlock(&sdp->sd_quota_mutex); 334 335 return 0; 336 337 fail_brelse: 338 brelse(bh); 339 fail: 340 qd->qd_bh_count--; 341 mutex_unlock(&sdp->sd_quota_mutex); 342 return error; 343 } 344 345 static void bh_put(struct gfs2_quota_data *qd) 346 { 347 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 348 349 mutex_lock(&sdp->sd_quota_mutex); 350 gfs2_assert(sdp, qd->qd_bh_count); 351 if (!--qd->qd_bh_count) { 352 brelse(qd->qd_bh); 353 qd->qd_bh = NULL; 354 qd->qd_bh_qc = NULL; 355 } 356 mutex_unlock(&sdp->sd_quota_mutex); 357 } 358 359 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) 360 { 361 struct gfs2_quota_data *qd = NULL; 362 int error; 363 int found = 0; 364 365 *qdp = NULL; 366 367 if (sdp->sd_vfs->s_flags & MS_RDONLY) 368 return 0; 369 370 spin_lock(&qd_lru_lock); 371 372 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 373 if (test_bit(QDF_LOCKED, &qd->qd_flags) || 374 !test_bit(QDF_CHANGE, &qd->qd_flags) || 375 qd->qd_sync_gen >= sdp->sd_quota_sync_gen) 376 continue; 377 378 list_move_tail(&qd->qd_list, &sdp->sd_quota_list); 379 380 set_bit(QDF_LOCKED, &qd->qd_flags); 381 gfs2_assert_warn(sdp, atomic_read(&qd->qd_count)); 382 atomic_inc(&qd->qd_count); 383 qd->qd_change_sync = qd->qd_change; 384 gfs2_assert_warn(sdp, qd->qd_slot_count); 385 qd->qd_slot_count++; 386 found = 1; 387 388 break; 389 } 390 391 if (!found) 392 qd = NULL; 393 394 spin_unlock(&qd_lru_lock); 395 396 if (qd) { 397 gfs2_assert_warn(sdp, qd->qd_change_sync); 398 error = bh_get(qd); 399 if (error) { 400 clear_bit(QDF_LOCKED, &qd->qd_flags); 401 slot_put(qd); 402 qd_put(qd); 403 return error; 404 } 405 } 406 407 *qdp = qd; 408 409 return 0; 410 } 411 412 static int qd_trylock(struct gfs2_quota_data *qd) 413 { 414 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 415 416 if (sdp->sd_vfs->s_flags & MS_RDONLY) 417 return 0; 418 419 spin_lock(&qd_lru_lock); 420 421 if (test_bit(QDF_LOCKED, &qd->qd_flags) || 422 !test_bit(QDF_CHANGE, &qd->qd_flags)) { 423 spin_unlock(&qd_lru_lock); 424 return 0; 425 } 426 427 list_move_tail(&qd->qd_list, &sdp->sd_quota_list); 428 429 set_bit(QDF_LOCKED, &qd->qd_flags); 430 gfs2_assert_warn(sdp, atomic_read(&qd->qd_count)); 431 atomic_inc(&qd->qd_count); 432 qd->qd_change_sync = qd->qd_change; 433 gfs2_assert_warn(sdp, qd->qd_slot_count); 434 qd->qd_slot_count++; 435 436 spin_unlock(&qd_lru_lock); 437 438 gfs2_assert_warn(sdp, qd->qd_change_sync); 439 if (bh_get(qd)) { 440 clear_bit(QDF_LOCKED, &qd->qd_flags); 441 slot_put(qd); 442 qd_put(qd); 443 return 0; 444 } 445 446 return 1; 447 } 448 449 static void qd_unlock(struct gfs2_quota_data *qd) 450 { 451 gfs2_assert_warn(qd->qd_gl->gl_sbd, 452 test_bit(QDF_LOCKED, &qd->qd_flags)); 453 clear_bit(QDF_LOCKED, &qd->qd_flags); 454 bh_put(qd); 455 slot_put(qd); 456 qd_put(qd); 457 } 458 459 static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, 460 struct gfs2_quota_data **qdp) 461 { 462 int error; 463 464 error = qd_get(sdp, user, id, qdp); 465 if (error) 466 return error; 467 468 error = slot_get(*qdp); 469 if (error) 470 goto fail; 471 472 error = bh_get(*qdp); 473 if (error) 474 goto fail_slot; 475 476 return 0; 477 478 fail_slot: 479 slot_put(*qdp); 480 fail: 481 qd_put(*qdp); 482 return error; 483 } 484 485 static void qdsb_put(struct gfs2_quota_data *qd) 486 { 487 bh_put(qd); 488 slot_put(qd); 489 qd_put(qd); 490 } 491 492 int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid) 493 { 494 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 495 struct gfs2_alloc *al = ip->i_alloc; 496 struct gfs2_quota_data **qd = al->al_qd; 497 int error; 498 499 if (gfs2_assert_warn(sdp, !al->al_qd_num) || 500 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) 501 return -EIO; 502 503 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 504 return 0; 505 506 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd); 507 if (error) 508 goto out; 509 al->al_qd_num++; 510 qd++; 511 512 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd); 513 if (error) 514 goto out; 515 al->al_qd_num++; 516 qd++; 517 518 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) { 519 error = qdsb_get(sdp, QUOTA_USER, uid, qd); 520 if (error) 521 goto out; 522 al->al_qd_num++; 523 qd++; 524 } 525 526 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) { 527 error = qdsb_get(sdp, QUOTA_GROUP, gid, qd); 528 if (error) 529 goto out; 530 al->al_qd_num++; 531 qd++; 532 } 533 534 out: 535 if (error) 536 gfs2_quota_unhold(ip); 537 return error; 538 } 539 540 void gfs2_quota_unhold(struct gfs2_inode *ip) 541 { 542 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 543 struct gfs2_alloc *al = ip->i_alloc; 544 unsigned int x; 545 546 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); 547 548 for (x = 0; x < al->al_qd_num; x++) { 549 qdsb_put(al->al_qd[x]); 550 al->al_qd[x] = NULL; 551 } 552 al->al_qd_num = 0; 553 } 554 555 static int sort_qd(const void *a, const void *b) 556 { 557 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a; 558 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b; 559 560 if (!test_bit(QDF_USER, &qd_a->qd_flags) != 561 !test_bit(QDF_USER, &qd_b->qd_flags)) { 562 if (test_bit(QDF_USER, &qd_a->qd_flags)) 563 return -1; 564 else 565 return 1; 566 } 567 if (qd_a->qd_id < qd_b->qd_id) 568 return -1; 569 if (qd_a->qd_id > qd_b->qd_id) 570 return 1; 571 572 return 0; 573 } 574 575 static void do_qc(struct gfs2_quota_data *qd, s64 change) 576 { 577 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 578 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 579 struct gfs2_quota_change *qc = qd->qd_bh_qc; 580 s64 x; 581 582 mutex_lock(&sdp->sd_quota_mutex); 583 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1); 584 585 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) { 586 qc->qc_change = 0; 587 qc->qc_flags = 0; 588 if (test_bit(QDF_USER, &qd->qd_flags)) 589 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); 590 qc->qc_id = cpu_to_be32(qd->qd_id); 591 } 592 593 x = be64_to_cpu(qc->qc_change) + change; 594 qc->qc_change = cpu_to_be64(x); 595 596 spin_lock(&qd_lru_lock); 597 qd->qd_change = x; 598 spin_unlock(&qd_lru_lock); 599 600 if (!x) { 601 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); 602 clear_bit(QDF_CHANGE, &qd->qd_flags); 603 qc->qc_flags = 0; 604 qc->qc_id = 0; 605 slot_put(qd); 606 qd_put(qd); 607 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) { 608 qd_hold(qd); 609 slot_hold(qd); 610 } 611 612 mutex_unlock(&sdp->sd_quota_mutex); 613 } 614 615 /** 616 * gfs2_adjust_quota - adjust record of current block usage 617 * @ip: The quota inode 618 * @loc: Offset of the entry in the quota file 619 * @change: The amount of usage change to record 620 * @qd: The quota data 621 * @fdq: The updated limits to record 622 * 623 * This function was mostly borrowed from gfs2_block_truncate_page which was 624 * in turn mostly borrowed from ext3 625 * 626 * Returns: 0 or -ve on error 627 */ 628 629 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 630 s64 change, struct gfs2_quota_data *qd, 631 struct fs_disk_quota *fdq) 632 { 633 struct inode *inode = &ip->i_inode; 634 struct address_space *mapping = inode->i_mapping; 635 unsigned long index = loc >> PAGE_CACHE_SHIFT; 636 unsigned offset = loc & (PAGE_CACHE_SIZE - 1); 637 unsigned blocksize, iblock, pos; 638 struct buffer_head *bh, *dibh; 639 struct page *page; 640 void *kaddr; 641 struct gfs2_quota *qp; 642 s64 value; 643 int err = -EIO; 644 u64 size; 645 646 if (gfs2_is_stuffed(ip)) 647 gfs2_unstuff_dinode(ip, NULL); 648 649 page = grab_cache_page(mapping, index); 650 if (!page) 651 return -ENOMEM; 652 653 blocksize = inode->i_sb->s_blocksize; 654 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 655 656 if (!page_has_buffers(page)) 657 create_empty_buffers(page, blocksize, 0); 658 659 bh = page_buffers(page); 660 pos = blocksize; 661 while (offset >= pos) { 662 bh = bh->b_this_page; 663 iblock++; 664 pos += blocksize; 665 } 666 667 if (!buffer_mapped(bh)) { 668 gfs2_block_map(inode, iblock, bh, 1); 669 if (!buffer_mapped(bh)) 670 goto unlock; 671 } 672 673 if (PageUptodate(page)) 674 set_buffer_uptodate(bh); 675 676 if (!buffer_uptodate(bh)) { 677 ll_rw_block(READ_META, 1, &bh); 678 wait_on_buffer(bh); 679 if (!buffer_uptodate(bh)) 680 goto unlock; 681 } 682 683 gfs2_trans_add_bh(ip->i_gl, bh, 0); 684 685 kaddr = kmap_atomic(page, KM_USER0); 686 qp = kaddr + offset; 687 value = (s64)be64_to_cpu(qp->qu_value) + change; 688 qp->qu_value = cpu_to_be64(value); 689 qd->qd_qb.qb_value = qp->qu_value; 690 if (fdq) { 691 if (fdq->d_fieldmask & FS_DQ_BSOFT) { 692 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit); 693 qd->qd_qb.qb_warn = qp->qu_warn; 694 } 695 if (fdq->d_fieldmask & FS_DQ_BHARD) { 696 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit); 697 qd->qd_qb.qb_limit = qp->qu_limit; 698 } 699 } 700 flush_dcache_page(page); 701 kunmap_atomic(kaddr, KM_USER0); 702 703 err = gfs2_meta_inode_buffer(ip, &dibh); 704 if (err) 705 goto unlock; 706 707 size = loc + sizeof(struct gfs2_quota); 708 if (size > inode->i_size) { 709 ip->i_disksize = size; 710 i_size_write(inode, size); 711 } 712 inode->i_mtime = inode->i_atime = CURRENT_TIME; 713 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 714 gfs2_dinode_out(ip, dibh->b_data); 715 brelse(dibh); 716 mark_inode_dirty(inode); 717 718 unlock: 719 unlock_page(page); 720 page_cache_release(page); 721 return err; 722 } 723 724 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) 725 { 726 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd; 727 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 728 unsigned int data_blocks, ind_blocks; 729 struct gfs2_holder *ghs, i_gh; 730 unsigned int qx, x; 731 struct gfs2_quota_data *qd; 732 loff_t offset; 733 unsigned int nalloc = 0, blocks; 734 struct gfs2_alloc *al = NULL; 735 int error; 736 737 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 738 &data_blocks, &ind_blocks); 739 740 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS); 741 if (!ghs) 742 return -ENOMEM; 743 744 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); 745 mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA); 746 for (qx = 0; qx < num_qd; qx++) { 747 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, 748 GL_NOCACHE, &ghs[qx]); 749 if (error) 750 goto out; 751 } 752 753 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 754 if (error) 755 goto out; 756 757 for (x = 0; x < num_qd; x++) { 758 int alloc_required; 759 760 offset = qd2offset(qda[x]); 761 error = gfs2_write_alloc_required(ip, offset, 762 sizeof(struct gfs2_quota), 763 &alloc_required); 764 if (error) 765 goto out_gunlock; 766 if (alloc_required) 767 nalloc++; 768 } 769 770 al = gfs2_alloc_get(ip); 771 if (!al) { 772 error = -ENOMEM; 773 goto out_gunlock; 774 } 775 /* 776 * 1 blk for unstuffing inode if stuffed. We add this extra 777 * block to the reservation unconditionally. If the inode 778 * doesn't need unstuffing, the block will be released to the 779 * rgrp since it won't be allocated during the transaction 780 */ 781 al->al_requested = 1; 782 /* +1 in the end for block requested above for unstuffing */ 783 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1; 784 785 if (nalloc) 786 al->al_requested += nalloc * (data_blocks + ind_blocks); 787 error = gfs2_inplace_reserve(ip); 788 if (error) 789 goto out_alloc; 790 791 if (nalloc) 792 blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS; 793 794 error = gfs2_trans_begin(sdp, blocks, 0); 795 if (error) 796 goto out_ipres; 797 798 for (x = 0; x < num_qd; x++) { 799 qd = qda[x]; 800 offset = qd2offset(qd); 801 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); 802 if (error) 803 goto out_end_trans; 804 805 do_qc(qd, -qd->qd_change_sync); 806 } 807 808 error = 0; 809 810 out_end_trans: 811 gfs2_trans_end(sdp); 812 out_ipres: 813 gfs2_inplace_release(ip); 814 out_alloc: 815 gfs2_alloc_put(ip); 816 out_gunlock: 817 gfs2_glock_dq_uninit(&i_gh); 818 out: 819 while (qx--) 820 gfs2_glock_dq_uninit(&ghs[qx]); 821 mutex_unlock(&ip->i_inode.i_mutex); 822 kfree(ghs); 823 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); 824 return error; 825 } 826 827 static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) 828 { 829 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 830 struct gfs2_quota q; 831 struct gfs2_quota_lvb *qlvb; 832 loff_t pos; 833 int error; 834 835 memset(&q, 0, sizeof(struct gfs2_quota)); 836 pos = qd2offset(qd); 837 error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q)); 838 if (error < 0) 839 return error; 840 841 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 842 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); 843 qlvb->__pad = 0; 844 qlvb->qb_limit = q.qu_limit; 845 qlvb->qb_warn = q.qu_warn; 846 qlvb->qb_value = q.qu_value; 847 qd->qd_qb = *qlvb; 848 849 return 0; 850 } 851 852 static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 853 struct gfs2_holder *q_gh) 854 { 855 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 856 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 857 struct gfs2_holder i_gh; 858 int error; 859 860 restart: 861 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); 862 if (error) 863 return error; 864 865 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 866 867 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { 868 gfs2_glock_dq_uninit(q_gh); 869 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 870 GL_NOCACHE, q_gh); 871 if (error) 872 return error; 873 874 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); 875 if (error) 876 goto fail; 877 878 error = update_qd(sdp, qd); 879 if (error) 880 goto fail_gunlock; 881 882 gfs2_glock_dq_uninit(&i_gh); 883 gfs2_glock_dq_uninit(q_gh); 884 force_refresh = 0; 885 goto restart; 886 } 887 888 return 0; 889 890 fail_gunlock: 891 gfs2_glock_dq_uninit(&i_gh); 892 fail: 893 gfs2_glock_dq_uninit(q_gh); 894 return error; 895 } 896 897 int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid) 898 { 899 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 900 struct gfs2_alloc *al = ip->i_alloc; 901 unsigned int x; 902 int error = 0; 903 904 gfs2_quota_hold(ip, uid, gid); 905 906 if (capable(CAP_SYS_RESOURCE) || 907 sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 908 return 0; 909 910 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *), 911 sort_qd, NULL); 912 913 for (x = 0; x < al->al_qd_num; x++) { 914 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]); 915 if (error) 916 break; 917 } 918 919 if (!error) 920 set_bit(GIF_QD_LOCKED, &ip->i_flags); 921 else { 922 while (x--) 923 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]); 924 gfs2_quota_unhold(ip); 925 } 926 927 return error; 928 } 929 930 static int need_sync(struct gfs2_quota_data *qd) 931 { 932 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 933 struct gfs2_tune *gt = &sdp->sd_tune; 934 s64 value; 935 unsigned int num, den; 936 int do_sync = 1; 937 938 if (!qd->qd_qb.qb_limit) 939 return 0; 940 941 spin_lock(&qd_lru_lock); 942 value = qd->qd_change; 943 spin_unlock(&qd_lru_lock); 944 945 spin_lock(>->gt_spin); 946 num = gt->gt_quota_scale_num; 947 den = gt->gt_quota_scale_den; 948 spin_unlock(>->gt_spin); 949 950 if (value < 0) 951 do_sync = 0; 952 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= 953 (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 954 do_sync = 0; 955 else { 956 value *= gfs2_jindex_size(sdp) * num; 957 value = div_s64(value, den); 958 value += (s64)be64_to_cpu(qd->qd_qb.qb_value); 959 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 960 do_sync = 0; 961 } 962 963 return do_sync; 964 } 965 966 void gfs2_quota_unlock(struct gfs2_inode *ip) 967 { 968 struct gfs2_alloc *al = ip->i_alloc; 969 struct gfs2_quota_data *qda[4]; 970 unsigned int count = 0; 971 unsigned int x; 972 973 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) 974 goto out; 975 976 for (x = 0; x < al->al_qd_num; x++) { 977 struct gfs2_quota_data *qd; 978 int sync; 979 980 qd = al->al_qd[x]; 981 sync = need_sync(qd); 982 983 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]); 984 985 if (sync && qd_trylock(qd)) 986 qda[count++] = qd; 987 } 988 989 if (count) { 990 do_sync(count, qda); 991 for (x = 0; x < count; x++) 992 qd_unlock(qda[x]); 993 } 994 995 out: 996 gfs2_quota_unhold(ip); 997 } 998 999 #define MAX_LINE 256 1000 1001 static int print_message(struct gfs2_quota_data *qd, char *type) 1002 { 1003 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 1004 1005 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n", 1006 sdp->sd_fsname, type, 1007 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group", 1008 qd->qd_id); 1009 1010 return 0; 1011 } 1012 1013 int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid) 1014 { 1015 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1016 struct gfs2_alloc *al = ip->i_alloc; 1017 struct gfs2_quota_data *qd; 1018 s64 value; 1019 unsigned int x; 1020 int error = 0; 1021 1022 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) 1023 return 0; 1024 1025 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 1026 return 0; 1027 1028 for (x = 0; x < al->al_qd_num; x++) { 1029 qd = al->al_qd[x]; 1030 1031 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) || 1032 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags)))) 1033 continue; 1034 1035 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); 1036 spin_lock(&qd_lru_lock); 1037 value += qd->qd_change; 1038 spin_unlock(&qd_lru_lock); 1039 1040 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { 1041 print_message(qd, "exceeded"); 1042 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ? 1043 USRQUOTA : GRPQUOTA, qd->qd_id, 1044 sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN); 1045 1046 error = -EDQUOT; 1047 break; 1048 } else if (be64_to_cpu(qd->qd_qb.qb_warn) && 1049 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value && 1050 time_after_eq(jiffies, qd->qd_last_warn + 1051 gfs2_tune_get(sdp, 1052 gt_quota_warn_period) * HZ)) { 1053 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ? 1054 USRQUOTA : GRPQUOTA, qd->qd_id, 1055 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); 1056 error = print_message(qd, "warning"); 1057 qd->qd_last_warn = jiffies; 1058 } 1059 } 1060 1061 return error; 1062 } 1063 1064 void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 1065 u32 uid, u32 gid) 1066 { 1067 struct gfs2_alloc *al = ip->i_alloc; 1068 struct gfs2_quota_data *qd; 1069 unsigned int x; 1070 1071 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change)) 1072 return; 1073 if (ip->i_diskflags & GFS2_DIF_SYSTEM) 1074 return; 1075 1076 for (x = 0; x < al->al_qd_num; x++) { 1077 qd = al->al_qd[x]; 1078 1079 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) || 1080 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) { 1081 do_qc(qd, change); 1082 } 1083 } 1084 } 1085 1086 int gfs2_quota_sync(struct super_block *sb, int type) 1087 { 1088 struct gfs2_sbd *sdp = sb->s_fs_info; 1089 struct gfs2_quota_data **qda; 1090 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync); 1091 unsigned int num_qd; 1092 unsigned int x; 1093 int error = 0; 1094 1095 sdp->sd_quota_sync_gen++; 1096 1097 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL); 1098 if (!qda) 1099 return -ENOMEM; 1100 1101 do { 1102 num_qd = 0; 1103 1104 for (;;) { 1105 error = qd_fish(sdp, qda + num_qd); 1106 if (error || !qda[num_qd]) 1107 break; 1108 if (++num_qd == max_qd) 1109 break; 1110 } 1111 1112 if (num_qd) { 1113 if (!error) 1114 error = do_sync(num_qd, qda); 1115 if (!error) 1116 for (x = 0; x < num_qd; x++) 1117 qda[x]->qd_sync_gen = 1118 sdp->sd_quota_sync_gen; 1119 1120 for (x = 0; x < num_qd; x++) 1121 qd_unlock(qda[x]); 1122 } 1123 } while (!error && num_qd == max_qd); 1124 1125 kfree(qda); 1126 1127 return error; 1128 } 1129 1130 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) 1131 { 1132 struct gfs2_quota_data *qd; 1133 struct gfs2_holder q_gh; 1134 int error; 1135 1136 error = qd_get(sdp, user, id, &qd); 1137 if (error) 1138 return error; 1139 1140 error = do_glock(qd, FORCE, &q_gh); 1141 if (!error) 1142 gfs2_glock_dq_uninit(&q_gh); 1143 1144 qd_put(qd); 1145 return error; 1146 } 1147 1148 static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf) 1149 { 1150 const struct gfs2_quota_change *str = buf; 1151 1152 qc->qc_change = be64_to_cpu(str->qc_change); 1153 qc->qc_flags = be32_to_cpu(str->qc_flags); 1154 qc->qc_id = be32_to_cpu(str->qc_id); 1155 } 1156 1157 int gfs2_quota_init(struct gfs2_sbd *sdp) 1158 { 1159 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 1160 unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; 1161 unsigned int x, slot = 0; 1162 unsigned int found = 0; 1163 u64 dblock; 1164 u32 extlen = 0; 1165 int error; 1166 1167 if (!ip->i_disksize || ip->i_disksize > (64 << 20) || 1168 ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) { 1169 gfs2_consist_inode(ip); 1170 return -EIO; 1171 } 1172 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; 1173 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE); 1174 1175 error = -ENOMEM; 1176 1177 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks, 1178 sizeof(unsigned char *), GFP_NOFS); 1179 if (!sdp->sd_quota_bitmap) 1180 return error; 1181 1182 for (x = 0; x < sdp->sd_quota_chunks; x++) { 1183 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS); 1184 if (!sdp->sd_quota_bitmap[x]) 1185 goto fail; 1186 } 1187 1188 for (x = 0; x < blocks; x++) { 1189 struct buffer_head *bh; 1190 unsigned int y; 1191 1192 if (!extlen) { 1193 int new = 0; 1194 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen); 1195 if (error) 1196 goto fail; 1197 } 1198 error = -EIO; 1199 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); 1200 if (!bh) 1201 goto fail; 1202 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) { 1203 brelse(bh); 1204 goto fail; 1205 } 1206 1207 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; 1208 y++, slot++) { 1209 struct gfs2_quota_change_host qc; 1210 struct gfs2_quota_data *qd; 1211 1212 gfs2_quota_change_in(&qc, bh->b_data + 1213 sizeof(struct gfs2_meta_header) + 1214 y * sizeof(struct gfs2_quota_change)); 1215 if (!qc.qc_change) 1216 continue; 1217 1218 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER), 1219 qc.qc_id, &qd); 1220 if (error) { 1221 brelse(bh); 1222 goto fail; 1223 } 1224 1225 set_bit(QDF_CHANGE, &qd->qd_flags); 1226 qd->qd_change = qc.qc_change; 1227 qd->qd_slot = slot; 1228 qd->qd_slot_count = 1; 1229 1230 spin_lock(&qd_lru_lock); 1231 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); 1232 list_add(&qd->qd_list, &sdp->sd_quota_list); 1233 atomic_inc(&sdp->sd_quota_count); 1234 spin_unlock(&qd_lru_lock); 1235 1236 found++; 1237 } 1238 1239 brelse(bh); 1240 dblock++; 1241 extlen--; 1242 } 1243 1244 if (found) 1245 fs_info(sdp, "found %u quota changes\n", found); 1246 1247 return 0; 1248 1249 fail: 1250 gfs2_quota_cleanup(sdp); 1251 return error; 1252 } 1253 1254 void gfs2_quota_cleanup(struct gfs2_sbd *sdp) 1255 { 1256 struct list_head *head = &sdp->sd_quota_list; 1257 struct gfs2_quota_data *qd; 1258 unsigned int x; 1259 1260 spin_lock(&qd_lru_lock); 1261 while (!list_empty(head)) { 1262 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); 1263 1264 if (atomic_read(&qd->qd_count) > 1 || 1265 (atomic_read(&qd->qd_count) && 1266 !test_bit(QDF_CHANGE, &qd->qd_flags))) { 1267 list_move(&qd->qd_list, head); 1268 spin_unlock(&qd_lru_lock); 1269 schedule(); 1270 spin_lock(&qd_lru_lock); 1271 continue; 1272 } 1273 1274 list_del(&qd->qd_list); 1275 /* Also remove if this qd exists in the reclaim list */ 1276 if (!list_empty(&qd->qd_reclaim)) { 1277 list_del_init(&qd->qd_reclaim); 1278 atomic_dec(&qd_lru_count); 1279 } 1280 atomic_dec(&sdp->sd_quota_count); 1281 spin_unlock(&qd_lru_lock); 1282 1283 if (!atomic_read(&qd->qd_count)) { 1284 gfs2_assert_warn(sdp, !qd->qd_change); 1285 gfs2_assert_warn(sdp, !qd->qd_slot_count); 1286 } else 1287 gfs2_assert_warn(sdp, qd->qd_slot_count == 1); 1288 gfs2_assert_warn(sdp, !qd->qd_bh_count); 1289 1290 gfs2_glock_put(qd->qd_gl); 1291 kmem_cache_free(gfs2_quotad_cachep, qd); 1292 1293 spin_lock(&qd_lru_lock); 1294 } 1295 spin_unlock(&qd_lru_lock); 1296 1297 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); 1298 1299 if (sdp->sd_quota_bitmap) { 1300 for (x = 0; x < sdp->sd_quota_chunks; x++) 1301 kfree(sdp->sd_quota_bitmap[x]); 1302 kfree(sdp->sd_quota_bitmap); 1303 } 1304 } 1305 1306 static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error) 1307 { 1308 if (error == 0 || error == -EROFS) 1309 return; 1310 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) 1311 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error); 1312 } 1313 1314 static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, 1315 int (*fxn)(struct super_block *sb, int type), 1316 unsigned long t, unsigned long *timeo, 1317 unsigned int *new_timeo) 1318 { 1319 if (t >= *timeo) { 1320 int error = fxn(sdp->sd_vfs, 0); 1321 quotad_error(sdp, msg, error); 1322 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; 1323 } else { 1324 *timeo -= t; 1325 } 1326 } 1327 1328 static void quotad_check_trunc_list(struct gfs2_sbd *sdp) 1329 { 1330 struct gfs2_inode *ip; 1331 1332 while(1) { 1333 ip = NULL; 1334 spin_lock(&sdp->sd_trunc_lock); 1335 if (!list_empty(&sdp->sd_trunc_list)) { 1336 ip = list_entry(sdp->sd_trunc_list.next, 1337 struct gfs2_inode, i_trunc_list); 1338 list_del_init(&ip->i_trunc_list); 1339 } 1340 spin_unlock(&sdp->sd_trunc_lock); 1341 if (ip == NULL) 1342 return; 1343 gfs2_glock_finish_truncate(ip); 1344 } 1345 } 1346 1347 void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) { 1348 if (!sdp->sd_statfs_force_sync) { 1349 sdp->sd_statfs_force_sync = 1; 1350 wake_up(&sdp->sd_quota_wait); 1351 } 1352 } 1353 1354 1355 /** 1356 * gfs2_quotad - Write cached quota changes into the quota file 1357 * @sdp: Pointer to GFS2 superblock 1358 * 1359 */ 1360 1361 int gfs2_quotad(void *data) 1362 { 1363 struct gfs2_sbd *sdp = data; 1364 struct gfs2_tune *tune = &sdp->sd_tune; 1365 unsigned long statfs_timeo = 0; 1366 unsigned long quotad_timeo = 0; 1367 unsigned long t = 0; 1368 DEFINE_WAIT(wait); 1369 int empty; 1370 1371 while (!kthread_should_stop()) { 1372 1373 /* Update the master statfs file */ 1374 if (sdp->sd_statfs_force_sync) { 1375 int error = gfs2_statfs_sync(sdp->sd_vfs, 0); 1376 quotad_error(sdp, "statfs", error); 1377 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ; 1378 } 1379 else 1380 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, 1381 &statfs_timeo, 1382 &tune->gt_statfs_quantum); 1383 1384 /* Update quota file */ 1385 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, 1386 "ad_timeo, &tune->gt_quota_quantum); 1387 1388 /* Check for & recover partially truncated inodes */ 1389 quotad_check_trunc_list(sdp); 1390 1391 if (freezing(current)) 1392 refrigerator(); 1393 t = min(quotad_timeo, statfs_timeo); 1394 1395 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); 1396 spin_lock(&sdp->sd_trunc_lock); 1397 empty = list_empty(&sdp->sd_trunc_list); 1398 spin_unlock(&sdp->sd_trunc_lock); 1399 if (empty && !sdp->sd_statfs_force_sync) 1400 t -= schedule_timeout(t); 1401 else 1402 t = 0; 1403 finish_wait(&sdp->sd_quota_wait, &wait); 1404 } 1405 1406 return 0; 1407 } 1408 1409 static int gfs2_quota_get_xstate(struct super_block *sb, 1410 struct fs_quota_stat *fqs) 1411 { 1412 struct gfs2_sbd *sdp = sb->s_fs_info; 1413 1414 memset(fqs, 0, sizeof(struct fs_quota_stat)); 1415 fqs->qs_version = FS_QSTAT_VERSION; 1416 if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON) 1417 fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD); 1418 else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT) 1419 fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT); 1420 if (sdp->sd_quota_inode) { 1421 fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr; 1422 fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks; 1423 } 1424 fqs->qs_uquota.qfs_nextents = 1; /* unsupported */ 1425 fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */ 1426 fqs->qs_incoredqs = atomic_read(&qd_lru_count); 1427 return 0; 1428 } 1429 1430 static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id, 1431 struct fs_disk_quota *fdq) 1432 { 1433 struct gfs2_sbd *sdp = sb->s_fs_info; 1434 struct gfs2_quota_lvb *qlvb; 1435 struct gfs2_quota_data *qd; 1436 struct gfs2_holder q_gh; 1437 int error; 1438 1439 memset(fdq, 0, sizeof(struct fs_disk_quota)); 1440 1441 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1442 return -ESRCH; /* Crazy XFS error code */ 1443 1444 if (type == USRQUOTA) 1445 type = QUOTA_USER; 1446 else if (type == GRPQUOTA) 1447 type = QUOTA_GROUP; 1448 else 1449 return -EINVAL; 1450 1451 error = qd_get(sdp, type, id, &qd); 1452 if (error) 1453 return error; 1454 error = do_glock(qd, FORCE, &q_gh); 1455 if (error) 1456 goto out; 1457 1458 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 1459 fdq->d_version = FS_DQUOT_VERSION; 1460 fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA; 1461 fdq->d_id = id; 1462 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit); 1463 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn); 1464 fdq->d_bcount = be64_to_cpu(qlvb->qb_value); 1465 1466 gfs2_glock_dq_uninit(&q_gh); 1467 out: 1468 qd_put(qd); 1469 return error; 1470 } 1471 1472 /* GFS2 only supports a subset of the XFS fields */ 1473 #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD) 1474 1475 static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id, 1476 struct fs_disk_quota *fdq) 1477 { 1478 struct gfs2_sbd *sdp = sb->s_fs_info; 1479 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 1480 struct gfs2_quota_data *qd; 1481 struct gfs2_holder q_gh, i_gh; 1482 unsigned int data_blocks, ind_blocks; 1483 unsigned int blocks = 0; 1484 int alloc_required; 1485 struct gfs2_alloc *al; 1486 loff_t offset; 1487 int error; 1488 1489 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1490 return -ESRCH; /* Crazy XFS error code */ 1491 1492 switch(type) { 1493 case USRQUOTA: 1494 type = QUOTA_USER; 1495 if (fdq->d_flags != XFS_USER_QUOTA) 1496 return -EINVAL; 1497 break; 1498 case GRPQUOTA: 1499 type = QUOTA_GROUP; 1500 if (fdq->d_flags != XFS_GROUP_QUOTA) 1501 return -EINVAL; 1502 break; 1503 default: 1504 return -EINVAL; 1505 } 1506 1507 if (fdq->d_fieldmask & ~GFS2_FIELDMASK) 1508 return -EINVAL; 1509 if (fdq->d_id != id) 1510 return -EINVAL; 1511 1512 error = qd_get(sdp, type, id, &qd); 1513 if (error) 1514 return error; 1515 1516 mutex_lock(&ip->i_inode.i_mutex); 1517 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh); 1518 if (error) 1519 goto out_put; 1520 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 1521 if (error) 1522 goto out_q; 1523 1524 /* Check for existing entry, if none then alloc new blocks */ 1525 error = update_qd(sdp, qd); 1526 if (error) 1527 goto out_i; 1528 1529 /* If nothing has changed, this is a no-op */ 1530 if ((fdq->d_fieldmask & FS_DQ_BSOFT) && 1531 (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn))) 1532 fdq->d_fieldmask ^= FS_DQ_BSOFT; 1533 if ((fdq->d_fieldmask & FS_DQ_BHARD) && 1534 (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit))) 1535 fdq->d_fieldmask ^= FS_DQ_BHARD; 1536 if (fdq->d_fieldmask == 0) 1537 goto out_i; 1538 1539 offset = qd2offset(qd); 1540 error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota), 1541 &alloc_required); 1542 if (error) 1543 goto out_i; 1544 if (alloc_required) { 1545 al = gfs2_alloc_get(ip); 1546 if (al == NULL) 1547 goto out_i; 1548 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 1549 &data_blocks, &ind_blocks); 1550 blocks = al->al_requested = 1 + data_blocks + ind_blocks; 1551 error = gfs2_inplace_reserve(ip); 1552 if (error) 1553 goto out_alloc; 1554 } 1555 1556 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0); 1557 if (error) 1558 goto out_release; 1559 1560 /* Apply changes */ 1561 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); 1562 1563 gfs2_trans_end(sdp); 1564 out_release: 1565 if (alloc_required) { 1566 gfs2_inplace_release(ip); 1567 out_alloc: 1568 gfs2_alloc_put(ip); 1569 } 1570 out_i: 1571 gfs2_glock_dq_uninit(&i_gh); 1572 out_q: 1573 gfs2_glock_dq_uninit(&q_gh); 1574 out_put: 1575 mutex_unlock(&ip->i_inode.i_mutex); 1576 qd_put(qd); 1577 return error; 1578 } 1579 1580 const struct quotactl_ops gfs2_quotactl_ops = { 1581 .quota_sync = gfs2_quota_sync, 1582 .get_xstate = gfs2_quota_get_xstate, 1583 .get_xquota = gfs2_xquota_get, 1584 .set_xquota = gfs2_xquota_set, 1585 }; 1586 1587