1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 /* 11 * Quota change tags are associated with each transaction that allocates or 12 * deallocates space. Those changes are accumulated locally to each node (in a 13 * per-node file) and then are periodically synced to the quota file. This 14 * avoids the bottleneck of constantly touching the quota file, but introduces 15 * fuzziness in the current usage value of IDs that are being used on different 16 * nodes in the cluster simultaneously. So, it is possible for a user on 17 * multiple nodes to overrun their quota, but that overrun is controlable. 18 * Since quota tags are part of transactions, there is no need for a quota check 19 * program to be run on node crashes or anything like that. 20 * 21 * There are couple of knobs that let the administrator manage the quota 22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be 23 * sitting on one node before being synced to the quota file. (The default is 24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency 25 * of quota file syncs increases as the user moves closer to their limit. The 26 * more frequent the syncs, the more accurate the quota enforcement, but that 27 * means that there is more contention between the nodes for the quota file. 28 * The default value is one. This sets the maximum theoretical quota overrun 29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In 30 * practice, the maximum overrun you see should be much less.) A "quota_scale" 31 * number greater than one makes quota syncs more frequent and reduces the 32 * maximum overrun. Numbers less than one (but greater than zero) make quota 33 * syncs less frequent. 34 * 35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of 36 * the quota file, so it is not being constantly read. 37 */ 38 39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40 41 #include <linux/sched.h> 42 #include <linux/slab.h> 43 #include <linux/mm.h> 44 #include <linux/spinlock.h> 45 #include <linux/completion.h> 46 #include <linux/buffer_head.h> 47 #include <linux/sort.h> 48 #include <linux/fs.h> 49 #include <linux/bio.h> 50 #include <linux/gfs2_ondisk.h> 51 #include <linux/kthread.h> 52 #include <linux/freezer.h> 53 #include <linux/quota.h> 54 #include <linux/dqblk_xfs.h> 55 #include <linux/lockref.h> 56 #include <linux/list_lru.h> 57 #include <linux/rcupdate.h> 58 #include <linux/rculist_bl.h> 59 #include <linux/bit_spinlock.h> 60 #include <linux/jhash.h> 61 #include <linux/vmalloc.h> 62 63 #include "gfs2.h" 64 #include "incore.h" 65 #include "bmap.h" 66 #include "glock.h" 67 #include "glops.h" 68 #include "log.h" 69 #include "meta_io.h" 70 #include "quota.h" 71 #include "rgrp.h" 72 #include "super.h" 73 #include "trans.h" 74 #include "inode.h" 75 #include "util.h" 76 77 #define GFS2_QD_HASH_SHIFT 12 78 #define GFS2_QD_HASH_SIZE (1 << GFS2_QD_HASH_SHIFT) 79 #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1) 80 81 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */ 82 /* -> sd_bitmap_lock */ 83 static DEFINE_SPINLOCK(qd_lock); 84 struct list_lru gfs2_qd_lru; 85 86 static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE]; 87 88 static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp, 89 const struct kqid qid) 90 { 91 unsigned int h; 92 93 h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0); 94 h = jhash(&qid, sizeof(struct kqid), h); 95 96 return h & GFS2_QD_HASH_MASK; 97 } 98 99 static inline void spin_lock_bucket(unsigned int hash) 100 { 101 hlist_bl_lock(&qd_hash_table[hash]); 102 } 103 104 static inline void spin_unlock_bucket(unsigned int hash) 105 { 106 hlist_bl_unlock(&qd_hash_table[hash]); 107 } 108 109 static void gfs2_qd_dealloc(struct rcu_head *rcu) 110 { 111 struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu); 112 kmem_cache_free(gfs2_quotad_cachep, qd); 113 } 114 115 static void gfs2_qd_dispose(struct list_head *list) 116 { 117 struct gfs2_quota_data *qd; 118 struct gfs2_sbd *sdp; 119 120 while (!list_empty(list)) { 121 qd = list_entry(list->next, struct gfs2_quota_data, qd_lru); 122 sdp = qd->qd_gl->gl_sbd; 123 124 list_del(&qd->qd_lru); 125 126 /* Free from the filesystem-specific list */ 127 spin_lock(&qd_lock); 128 list_del(&qd->qd_list); 129 spin_unlock(&qd_lock); 130 131 spin_lock_bucket(qd->qd_hash); 132 hlist_bl_del_rcu(&qd->qd_hlist); 133 spin_unlock_bucket(qd->qd_hash); 134 135 gfs2_assert_warn(sdp, !qd->qd_change); 136 gfs2_assert_warn(sdp, !qd->qd_slot_count); 137 gfs2_assert_warn(sdp, !qd->qd_bh_count); 138 139 gfs2_glock_put(qd->qd_gl); 140 atomic_dec(&sdp->sd_quota_count); 141 142 /* Delete it from the common reclaim list */ 143 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); 144 } 145 } 146 147 148 static enum lru_status gfs2_qd_isolate(struct list_head *item, 149 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 150 { 151 struct list_head *dispose = arg; 152 struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru); 153 154 if (!spin_trylock(&qd->qd_lockref.lock)) 155 return LRU_SKIP; 156 157 if (qd->qd_lockref.count == 0) { 158 lockref_mark_dead(&qd->qd_lockref); 159 list_lru_isolate_move(lru, &qd->qd_lru, dispose); 160 } 161 162 spin_unlock(&qd->qd_lockref.lock); 163 return LRU_REMOVED; 164 } 165 166 static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, 167 struct shrink_control *sc) 168 { 169 LIST_HEAD(dispose); 170 unsigned long freed; 171 172 if (!(sc->gfp_mask & __GFP_FS)) 173 return SHRINK_STOP; 174 175 freed = list_lru_shrink_walk(&gfs2_qd_lru, sc, 176 gfs2_qd_isolate, &dispose); 177 178 gfs2_qd_dispose(&dispose); 179 180 return freed; 181 } 182 183 static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink, 184 struct shrink_control *sc) 185 { 186 return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc)); 187 } 188 189 struct shrinker gfs2_qd_shrinker = { 190 .count_objects = gfs2_qd_shrink_count, 191 .scan_objects = gfs2_qd_shrink_scan, 192 .seeks = DEFAULT_SEEKS, 193 .flags = SHRINKER_NUMA_AWARE, 194 }; 195 196 197 static u64 qd2index(struct gfs2_quota_data *qd) 198 { 199 struct kqid qid = qd->qd_id; 200 return (2 * (u64)from_kqid(&init_user_ns, qid)) + 201 ((qid.type == USRQUOTA) ? 0 : 1); 202 } 203 204 static u64 qd2offset(struct gfs2_quota_data *qd) 205 { 206 u64 offset; 207 208 offset = qd2index(qd); 209 offset *= sizeof(struct gfs2_quota); 210 211 return offset; 212 } 213 214 static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid) 215 { 216 struct gfs2_quota_data *qd; 217 int error; 218 219 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS); 220 if (!qd) 221 return NULL; 222 223 qd->qd_sbd = sdp; 224 qd->qd_lockref.count = 1; 225 spin_lock_init(&qd->qd_lockref.lock); 226 qd->qd_id = qid; 227 qd->qd_slot = -1; 228 INIT_LIST_HEAD(&qd->qd_lru); 229 qd->qd_hash = hash; 230 231 error = gfs2_glock_get(sdp, qd2index(qd), 232 &gfs2_quota_glops, CREATE, &qd->qd_gl); 233 if (error) 234 goto fail; 235 236 return qd; 237 238 fail: 239 kmem_cache_free(gfs2_quotad_cachep, qd); 240 return NULL; 241 } 242 243 static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash, 244 const struct gfs2_sbd *sdp, 245 struct kqid qid) 246 { 247 struct gfs2_quota_data *qd; 248 struct hlist_bl_node *h; 249 250 hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) { 251 if (!qid_eq(qd->qd_id, qid)) 252 continue; 253 if (qd->qd_sbd != sdp) 254 continue; 255 if (lockref_get_not_dead(&qd->qd_lockref)) { 256 list_lru_del(&gfs2_qd_lru, &qd->qd_lru); 257 return qd; 258 } 259 } 260 261 return NULL; 262 } 263 264 265 static int qd_get(struct gfs2_sbd *sdp, struct kqid qid, 266 struct gfs2_quota_data **qdp) 267 { 268 struct gfs2_quota_data *qd, *new_qd; 269 unsigned int hash = gfs2_qd_hash(sdp, qid); 270 271 rcu_read_lock(); 272 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid); 273 rcu_read_unlock(); 274 275 if (qd) 276 return 0; 277 278 new_qd = qd_alloc(hash, sdp, qid); 279 if (!new_qd) 280 return -ENOMEM; 281 282 spin_lock(&qd_lock); 283 spin_lock_bucket(hash); 284 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid); 285 if (qd == NULL) { 286 *qdp = new_qd; 287 list_add(&new_qd->qd_list, &sdp->sd_quota_list); 288 hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]); 289 atomic_inc(&sdp->sd_quota_count); 290 } 291 spin_unlock_bucket(hash); 292 spin_unlock(&qd_lock); 293 294 if (qd) { 295 gfs2_glock_put(new_qd->qd_gl); 296 kmem_cache_free(gfs2_quotad_cachep, new_qd); 297 } 298 299 return 0; 300 } 301 302 303 static void qd_hold(struct gfs2_quota_data *qd) 304 { 305 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 306 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref)); 307 lockref_get(&qd->qd_lockref); 308 } 309 310 static void qd_put(struct gfs2_quota_data *qd) 311 { 312 if (lockref_put_or_lock(&qd->qd_lockref)) 313 return; 314 315 qd->qd_lockref.count = 0; 316 list_lru_add(&gfs2_qd_lru, &qd->qd_lru); 317 spin_unlock(&qd->qd_lockref.lock); 318 319 } 320 321 static int slot_get(struct gfs2_quota_data *qd) 322 { 323 struct gfs2_sbd *sdp = qd->qd_sbd; 324 unsigned int bit; 325 int error = 0; 326 327 spin_lock(&sdp->sd_bitmap_lock); 328 if (qd->qd_slot_count != 0) 329 goto out; 330 331 error = -ENOSPC; 332 bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots); 333 if (bit < sdp->sd_quota_slots) { 334 set_bit(bit, sdp->sd_quota_bitmap); 335 qd->qd_slot = bit; 336 error = 0; 337 out: 338 qd->qd_slot_count++; 339 } 340 spin_unlock(&sdp->sd_bitmap_lock); 341 342 return error; 343 } 344 345 static void slot_hold(struct gfs2_quota_data *qd) 346 { 347 struct gfs2_sbd *sdp = qd->qd_sbd; 348 349 spin_lock(&sdp->sd_bitmap_lock); 350 gfs2_assert(sdp, qd->qd_slot_count); 351 qd->qd_slot_count++; 352 spin_unlock(&sdp->sd_bitmap_lock); 353 } 354 355 static void slot_put(struct gfs2_quota_data *qd) 356 { 357 struct gfs2_sbd *sdp = qd->qd_sbd; 358 359 spin_lock(&sdp->sd_bitmap_lock); 360 gfs2_assert(sdp, qd->qd_slot_count); 361 if (!--qd->qd_slot_count) { 362 BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap)); 363 qd->qd_slot = -1; 364 } 365 spin_unlock(&sdp->sd_bitmap_lock); 366 } 367 368 static int bh_get(struct gfs2_quota_data *qd) 369 { 370 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 371 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 372 unsigned int block, offset; 373 struct buffer_head *bh; 374 int error; 375 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; 376 377 mutex_lock(&sdp->sd_quota_mutex); 378 379 if (qd->qd_bh_count++) { 380 mutex_unlock(&sdp->sd_quota_mutex); 381 return 0; 382 } 383 384 block = qd->qd_slot / sdp->sd_qc_per_block; 385 offset = qd->qd_slot % sdp->sd_qc_per_block; 386 387 bh_map.b_size = 1 << ip->i_inode.i_blkbits; 388 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0); 389 if (error) 390 goto fail; 391 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh); 392 if (error) 393 goto fail; 394 error = -EIO; 395 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) 396 goto fail_brelse; 397 398 qd->qd_bh = bh; 399 qd->qd_bh_qc = (struct gfs2_quota_change *) 400 (bh->b_data + sizeof(struct gfs2_meta_header) + 401 offset * sizeof(struct gfs2_quota_change)); 402 403 mutex_unlock(&sdp->sd_quota_mutex); 404 405 return 0; 406 407 fail_brelse: 408 brelse(bh); 409 fail: 410 qd->qd_bh_count--; 411 mutex_unlock(&sdp->sd_quota_mutex); 412 return error; 413 } 414 415 static void bh_put(struct gfs2_quota_data *qd) 416 { 417 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 418 419 mutex_lock(&sdp->sd_quota_mutex); 420 gfs2_assert(sdp, qd->qd_bh_count); 421 if (!--qd->qd_bh_count) { 422 brelse(qd->qd_bh); 423 qd->qd_bh = NULL; 424 qd->qd_bh_qc = NULL; 425 } 426 mutex_unlock(&sdp->sd_quota_mutex); 427 } 428 429 static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd, 430 u64 *sync_gen) 431 { 432 if (test_bit(QDF_LOCKED, &qd->qd_flags) || 433 !test_bit(QDF_CHANGE, &qd->qd_flags) || 434 (sync_gen && (qd->qd_sync_gen >= *sync_gen))) 435 return 0; 436 437 if (!lockref_get_not_dead(&qd->qd_lockref)) 438 return 0; 439 440 list_move_tail(&qd->qd_list, &sdp->sd_quota_list); 441 set_bit(QDF_LOCKED, &qd->qd_flags); 442 qd->qd_change_sync = qd->qd_change; 443 slot_hold(qd); 444 return 1; 445 } 446 447 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) 448 { 449 struct gfs2_quota_data *qd = NULL; 450 int error; 451 int found = 0; 452 453 *qdp = NULL; 454 455 if (sdp->sd_vfs->s_flags & MS_RDONLY) 456 return 0; 457 458 spin_lock(&qd_lock); 459 460 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 461 found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen); 462 if (found) 463 break; 464 } 465 466 if (!found) 467 qd = NULL; 468 469 spin_unlock(&qd_lock); 470 471 if (qd) { 472 gfs2_assert_warn(sdp, qd->qd_change_sync); 473 error = bh_get(qd); 474 if (error) { 475 clear_bit(QDF_LOCKED, &qd->qd_flags); 476 slot_put(qd); 477 qd_put(qd); 478 return error; 479 } 480 } 481 482 *qdp = qd; 483 484 return 0; 485 } 486 487 static void qd_unlock(struct gfs2_quota_data *qd) 488 { 489 gfs2_assert_warn(qd->qd_gl->gl_sbd, 490 test_bit(QDF_LOCKED, &qd->qd_flags)); 491 clear_bit(QDF_LOCKED, &qd->qd_flags); 492 bh_put(qd); 493 slot_put(qd); 494 qd_put(qd); 495 } 496 497 static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid, 498 struct gfs2_quota_data **qdp) 499 { 500 int error; 501 502 error = qd_get(sdp, qid, qdp); 503 if (error) 504 return error; 505 506 error = slot_get(*qdp); 507 if (error) 508 goto fail; 509 510 error = bh_get(*qdp); 511 if (error) 512 goto fail_slot; 513 514 return 0; 515 516 fail_slot: 517 slot_put(*qdp); 518 fail: 519 qd_put(*qdp); 520 return error; 521 } 522 523 static void qdsb_put(struct gfs2_quota_data *qd) 524 { 525 bh_put(qd); 526 slot_put(qd); 527 qd_put(qd); 528 } 529 530 int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) 531 { 532 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 533 struct gfs2_quota_data **qd; 534 int error; 535 536 if (ip->i_res == NULL) { 537 error = gfs2_rs_alloc(ip); 538 if (error) 539 return error; 540 } 541 542 qd = ip->i_res->rs_qa_qd; 543 544 if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) || 545 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) 546 return -EIO; 547 548 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 549 return 0; 550 551 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd); 552 if (error) 553 goto out; 554 ip->i_res->rs_qa_qd_num++; 555 qd++; 556 557 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd); 558 if (error) 559 goto out; 560 ip->i_res->rs_qa_qd_num++; 561 qd++; 562 563 if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) && 564 !uid_eq(uid, ip->i_inode.i_uid)) { 565 error = qdsb_get(sdp, make_kqid_uid(uid), qd); 566 if (error) 567 goto out; 568 ip->i_res->rs_qa_qd_num++; 569 qd++; 570 } 571 572 if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) && 573 !gid_eq(gid, ip->i_inode.i_gid)) { 574 error = qdsb_get(sdp, make_kqid_gid(gid), qd); 575 if (error) 576 goto out; 577 ip->i_res->rs_qa_qd_num++; 578 qd++; 579 } 580 581 out: 582 if (error) 583 gfs2_quota_unhold(ip); 584 return error; 585 } 586 587 void gfs2_quota_unhold(struct gfs2_inode *ip) 588 { 589 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 590 unsigned int x; 591 592 if (ip->i_res == NULL) 593 return; 594 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); 595 596 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 597 qdsb_put(ip->i_res->rs_qa_qd[x]); 598 ip->i_res->rs_qa_qd[x] = NULL; 599 } 600 ip->i_res->rs_qa_qd_num = 0; 601 } 602 603 static int sort_qd(const void *a, const void *b) 604 { 605 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a; 606 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b; 607 608 if (qid_lt(qd_a->qd_id, qd_b->qd_id)) 609 return -1; 610 if (qid_lt(qd_b->qd_id, qd_a->qd_id)) 611 return 1; 612 return 0; 613 } 614 615 static void do_qc(struct gfs2_quota_data *qd, s64 change) 616 { 617 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 618 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 619 struct gfs2_quota_change *qc = qd->qd_bh_qc; 620 s64 x; 621 622 mutex_lock(&sdp->sd_quota_mutex); 623 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); 624 625 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) { 626 qc->qc_change = 0; 627 qc->qc_flags = 0; 628 if (qd->qd_id.type == USRQUOTA) 629 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); 630 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id)); 631 } 632 633 x = be64_to_cpu(qc->qc_change) + change; 634 qc->qc_change = cpu_to_be64(x); 635 636 spin_lock(&qd_lock); 637 qd->qd_change = x; 638 spin_unlock(&qd_lock); 639 640 if (!x) { 641 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); 642 clear_bit(QDF_CHANGE, &qd->qd_flags); 643 qc->qc_flags = 0; 644 qc->qc_id = 0; 645 slot_put(qd); 646 qd_put(qd); 647 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) { 648 qd_hold(qd); 649 slot_hold(qd); 650 } 651 652 mutex_unlock(&sdp->sd_quota_mutex); 653 } 654 655 /** 656 * gfs2_adjust_quota - adjust record of current block usage 657 * @ip: The quota inode 658 * @loc: Offset of the entry in the quota file 659 * @change: The amount of usage change to record 660 * @qd: The quota data 661 * @fdq: The updated limits to record 662 * 663 * This function was mostly borrowed from gfs2_block_truncate_page which was 664 * in turn mostly borrowed from ext3 665 * 666 * Returns: 0 or -ve on error 667 */ 668 669 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 670 s64 change, struct gfs2_quota_data *qd, 671 struct qc_dqblk *fdq) 672 { 673 struct inode *inode = &ip->i_inode; 674 struct gfs2_sbd *sdp = GFS2_SB(inode); 675 struct address_space *mapping = inode->i_mapping; 676 unsigned long index = loc >> PAGE_CACHE_SHIFT; 677 unsigned offset = loc & (PAGE_CACHE_SIZE - 1); 678 unsigned blocksize, iblock, pos; 679 struct buffer_head *bh; 680 struct page *page; 681 void *kaddr, *ptr; 682 struct gfs2_quota q; 683 int err, nbytes; 684 u64 size; 685 686 if (gfs2_is_stuffed(ip)) { 687 err = gfs2_unstuff_dinode(ip, NULL); 688 if (err) 689 return err; 690 } 691 692 memset(&q, 0, sizeof(struct gfs2_quota)); 693 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q)); 694 if (err < 0) 695 return err; 696 697 err = -EIO; 698 be64_add_cpu(&q.qu_value, change); 699 qd->qd_qb.qb_value = q.qu_value; 700 if (fdq) { 701 if (fdq->d_fieldmask & QC_SPC_SOFT) { 702 q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); 703 qd->qd_qb.qb_warn = q.qu_warn; 704 } 705 if (fdq->d_fieldmask & QC_SPC_HARD) { 706 q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); 707 qd->qd_qb.qb_limit = q.qu_limit; 708 } 709 if (fdq->d_fieldmask & QC_SPACE) { 710 q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); 711 qd->qd_qb.qb_value = q.qu_value; 712 } 713 } 714 715 /* Write the quota into the quota file on disk */ 716 ptr = &q; 717 nbytes = sizeof(struct gfs2_quota); 718 get_a_page: 719 page = find_or_create_page(mapping, index, GFP_NOFS); 720 if (!page) 721 return -ENOMEM; 722 723 blocksize = inode->i_sb->s_blocksize; 724 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 725 726 if (!page_has_buffers(page)) 727 create_empty_buffers(page, blocksize, 0); 728 729 bh = page_buffers(page); 730 pos = blocksize; 731 while (offset >= pos) { 732 bh = bh->b_this_page; 733 iblock++; 734 pos += blocksize; 735 } 736 737 if (!buffer_mapped(bh)) { 738 gfs2_block_map(inode, iblock, bh, 1); 739 if (!buffer_mapped(bh)) 740 goto unlock_out; 741 /* If it's a newly allocated disk block for quota, zero it */ 742 if (buffer_new(bh)) 743 zero_user(page, pos - blocksize, bh->b_size); 744 } 745 746 if (PageUptodate(page)) 747 set_buffer_uptodate(bh); 748 749 if (!buffer_uptodate(bh)) { 750 ll_rw_block(READ | REQ_META, 1, &bh); 751 wait_on_buffer(bh); 752 if (!buffer_uptodate(bh)) 753 goto unlock_out; 754 } 755 756 gfs2_trans_add_data(ip->i_gl, bh); 757 758 kaddr = kmap_atomic(page); 759 if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE) 760 nbytes = PAGE_CACHE_SIZE - offset; 761 memcpy(kaddr + offset, ptr, nbytes); 762 flush_dcache_page(page); 763 kunmap_atomic(kaddr); 764 unlock_page(page); 765 page_cache_release(page); 766 767 /* If quota straddles page boundary, we need to update the rest of the 768 * quota at the beginning of the next page */ 769 if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) { 770 ptr = ptr + nbytes; 771 nbytes = sizeof(struct gfs2_quota) - nbytes; 772 offset = 0; 773 index++; 774 goto get_a_page; 775 } 776 777 size = loc + sizeof(struct gfs2_quota); 778 if (size > inode->i_size) 779 i_size_write(inode, size); 780 inode->i_mtime = inode->i_atime = CURRENT_TIME; 781 mark_inode_dirty(inode); 782 set_bit(QDF_REFRESH, &qd->qd_flags); 783 return 0; 784 785 unlock_out: 786 unlock_page(page); 787 page_cache_release(page); 788 return err; 789 } 790 791 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) 792 { 793 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd; 794 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 795 struct gfs2_alloc_parms ap = { .aflags = 0, }; 796 unsigned int data_blocks, ind_blocks; 797 struct gfs2_holder *ghs, i_gh; 798 unsigned int qx, x; 799 struct gfs2_quota_data *qd; 800 unsigned reserved; 801 loff_t offset; 802 unsigned int nalloc = 0, blocks; 803 int error; 804 805 error = gfs2_rs_alloc(ip); 806 if (error) 807 return error; 808 809 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 810 &data_blocks, &ind_blocks); 811 812 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS); 813 if (!ghs) 814 return -ENOMEM; 815 816 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); 817 mutex_lock(&ip->i_inode.i_mutex); 818 for (qx = 0; qx < num_qd; qx++) { 819 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, 820 GL_NOCACHE, &ghs[qx]); 821 if (error) 822 goto out; 823 } 824 825 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 826 if (error) 827 goto out; 828 829 for (x = 0; x < num_qd; x++) { 830 offset = qd2offset(qda[x]); 831 if (gfs2_write_alloc_required(ip, offset, 832 sizeof(struct gfs2_quota))) 833 nalloc++; 834 } 835 836 /* 837 * 1 blk for unstuffing inode if stuffed. We add this extra 838 * block to the reservation unconditionally. If the inode 839 * doesn't need unstuffing, the block will be released to the 840 * rgrp since it won't be allocated during the transaction 841 */ 842 /* +3 in the end for unstuffing block, inode size update block 843 * and another block in case quota straddles page boundary and 844 * two blocks need to be updated instead of 1 */ 845 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; 846 847 reserved = 1 + (nalloc * (data_blocks + ind_blocks)); 848 ap.target = reserved; 849 error = gfs2_inplace_reserve(ip, &ap); 850 if (error) 851 goto out_alloc; 852 853 if (nalloc) 854 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS; 855 856 error = gfs2_trans_begin(sdp, blocks, 0); 857 if (error) 858 goto out_ipres; 859 860 for (x = 0; x < num_qd; x++) { 861 qd = qda[x]; 862 offset = qd2offset(qd); 863 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); 864 if (error) 865 goto out_end_trans; 866 867 do_qc(qd, -qd->qd_change_sync); 868 set_bit(QDF_REFRESH, &qd->qd_flags); 869 } 870 871 error = 0; 872 873 out_end_trans: 874 gfs2_trans_end(sdp); 875 out_ipres: 876 gfs2_inplace_release(ip); 877 out_alloc: 878 gfs2_glock_dq_uninit(&i_gh); 879 out: 880 while (qx--) 881 gfs2_glock_dq_uninit(&ghs[qx]); 882 mutex_unlock(&ip->i_inode.i_mutex); 883 kfree(ghs); 884 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl, NORMAL_FLUSH); 885 return error; 886 } 887 888 static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) 889 { 890 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 891 struct gfs2_quota q; 892 struct gfs2_quota_lvb *qlvb; 893 loff_t pos; 894 int error; 895 896 memset(&q, 0, sizeof(struct gfs2_quota)); 897 pos = qd2offset(qd); 898 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q)); 899 if (error < 0) 900 return error; 901 902 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 903 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); 904 qlvb->__pad = 0; 905 qlvb->qb_limit = q.qu_limit; 906 qlvb->qb_warn = q.qu_warn; 907 qlvb->qb_value = q.qu_value; 908 qd->qd_qb = *qlvb; 909 910 return 0; 911 } 912 913 static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 914 struct gfs2_holder *q_gh) 915 { 916 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 917 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 918 struct gfs2_holder i_gh; 919 int error; 920 921 restart: 922 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); 923 if (error) 924 return error; 925 926 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 927 928 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { 929 gfs2_glock_dq_uninit(q_gh); 930 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 931 GL_NOCACHE, q_gh); 932 if (error) 933 return error; 934 935 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); 936 if (error) 937 goto fail; 938 939 error = update_qd(sdp, qd); 940 if (error) 941 goto fail_gunlock; 942 943 gfs2_glock_dq_uninit(&i_gh); 944 gfs2_glock_dq_uninit(q_gh); 945 force_refresh = 0; 946 goto restart; 947 } 948 949 return 0; 950 951 fail_gunlock: 952 gfs2_glock_dq_uninit(&i_gh); 953 fail: 954 gfs2_glock_dq_uninit(q_gh); 955 return error; 956 } 957 958 int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) 959 { 960 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 961 struct gfs2_quota_data *qd; 962 unsigned int x; 963 int error = 0; 964 965 error = gfs2_quota_hold(ip, uid, gid); 966 if (error) 967 return error; 968 969 if (capable(CAP_SYS_RESOURCE) || 970 sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 971 return 0; 972 973 sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num, 974 sizeof(struct gfs2_quota_data *), sort_qd, NULL); 975 976 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 977 int force = NO_FORCE; 978 qd = ip->i_res->rs_qa_qd[x]; 979 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags)) 980 force = FORCE; 981 error = do_glock(qd, force, &ip->i_res->rs_qa_qd_ghs[x]); 982 if (error) 983 break; 984 } 985 986 if (!error) 987 set_bit(GIF_QD_LOCKED, &ip->i_flags); 988 else { 989 while (x--) 990 gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]); 991 gfs2_quota_unhold(ip); 992 } 993 994 return error; 995 } 996 997 static int need_sync(struct gfs2_quota_data *qd) 998 { 999 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 1000 struct gfs2_tune *gt = &sdp->sd_tune; 1001 s64 value; 1002 unsigned int num, den; 1003 int do_sync = 1; 1004 1005 if (!qd->qd_qb.qb_limit) 1006 return 0; 1007 1008 spin_lock(&qd_lock); 1009 value = qd->qd_change; 1010 spin_unlock(&qd_lock); 1011 1012 spin_lock(>->gt_spin); 1013 num = gt->gt_quota_scale_num; 1014 den = gt->gt_quota_scale_den; 1015 spin_unlock(>->gt_spin); 1016 1017 if (value < 0) 1018 do_sync = 0; 1019 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= 1020 (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 1021 do_sync = 0; 1022 else { 1023 value *= gfs2_jindex_size(sdp) * num; 1024 value = div_s64(value, den); 1025 value += (s64)be64_to_cpu(qd->qd_qb.qb_value); 1026 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 1027 do_sync = 0; 1028 } 1029 1030 return do_sync; 1031 } 1032 1033 void gfs2_quota_unlock(struct gfs2_inode *ip) 1034 { 1035 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1036 struct gfs2_quota_data *qda[4]; 1037 unsigned int count = 0; 1038 unsigned int x; 1039 int found; 1040 1041 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) 1042 goto out; 1043 1044 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 1045 struct gfs2_quota_data *qd; 1046 int sync; 1047 1048 qd = ip->i_res->rs_qa_qd[x]; 1049 sync = need_sync(qd); 1050 1051 gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]); 1052 if (!sync) 1053 continue; 1054 1055 spin_lock(&qd_lock); 1056 found = qd_check_sync(sdp, qd, NULL); 1057 spin_unlock(&qd_lock); 1058 1059 if (!found) 1060 continue; 1061 1062 gfs2_assert_warn(sdp, qd->qd_change_sync); 1063 if (bh_get(qd)) { 1064 clear_bit(QDF_LOCKED, &qd->qd_flags); 1065 slot_put(qd); 1066 qd_put(qd); 1067 continue; 1068 } 1069 1070 qda[count++] = qd; 1071 } 1072 1073 if (count) { 1074 do_sync(count, qda); 1075 for (x = 0; x < count; x++) 1076 qd_unlock(qda[x]); 1077 } 1078 1079 out: 1080 gfs2_quota_unhold(ip); 1081 } 1082 1083 #define MAX_LINE 256 1084 1085 static int print_message(struct gfs2_quota_data *qd, char *type) 1086 { 1087 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 1088 1089 fs_info(sdp, "quota %s for %s %u\n", 1090 type, 1091 (qd->qd_id.type == USRQUOTA) ? "user" : "group", 1092 from_kqid(&init_user_ns, qd->qd_id)); 1093 1094 return 0; 1095 } 1096 1097 int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) 1098 { 1099 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1100 struct gfs2_quota_data *qd; 1101 s64 value; 1102 unsigned int x; 1103 int error = 0; 1104 1105 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) 1106 return 0; 1107 1108 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 1109 return 0; 1110 1111 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 1112 qd = ip->i_res->rs_qa_qd[x]; 1113 1114 if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) || 1115 qid_eq(qd->qd_id, make_kqid_gid(gid)))) 1116 continue; 1117 1118 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); 1119 spin_lock(&qd_lock); 1120 value += qd->qd_change; 1121 spin_unlock(&qd_lock); 1122 1123 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { 1124 print_message(qd, "exceeded"); 1125 quota_send_warning(qd->qd_id, 1126 sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN); 1127 1128 error = -EDQUOT; 1129 break; 1130 } else if (be64_to_cpu(qd->qd_qb.qb_warn) && 1131 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value && 1132 time_after_eq(jiffies, qd->qd_last_warn + 1133 gfs2_tune_get(sdp, 1134 gt_quota_warn_period) * HZ)) { 1135 quota_send_warning(qd->qd_id, 1136 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); 1137 error = print_message(qd, "warning"); 1138 qd->qd_last_warn = jiffies; 1139 } 1140 } 1141 1142 return error; 1143 } 1144 1145 void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 1146 kuid_t uid, kgid_t gid) 1147 { 1148 struct gfs2_quota_data *qd; 1149 unsigned int x; 1150 1151 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change)) 1152 return; 1153 if (ip->i_diskflags & GFS2_DIF_SYSTEM) 1154 return; 1155 1156 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 1157 qd = ip->i_res->rs_qa_qd[x]; 1158 1159 if (qid_eq(qd->qd_id, make_kqid_uid(uid)) || 1160 qid_eq(qd->qd_id, make_kqid_gid(gid))) { 1161 do_qc(qd, change); 1162 } 1163 } 1164 } 1165 1166 int gfs2_quota_sync(struct super_block *sb, int type) 1167 { 1168 struct gfs2_sbd *sdp = sb->s_fs_info; 1169 struct gfs2_quota_data **qda; 1170 unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder); 1171 unsigned int num_qd; 1172 unsigned int x; 1173 int error = 0; 1174 1175 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL); 1176 if (!qda) 1177 return -ENOMEM; 1178 1179 mutex_lock(&sdp->sd_quota_sync_mutex); 1180 sdp->sd_quota_sync_gen++; 1181 1182 do { 1183 num_qd = 0; 1184 1185 for (;;) { 1186 error = qd_fish(sdp, qda + num_qd); 1187 if (error || !qda[num_qd]) 1188 break; 1189 if (++num_qd == max_qd) 1190 break; 1191 } 1192 1193 if (num_qd) { 1194 if (!error) 1195 error = do_sync(num_qd, qda); 1196 if (!error) 1197 for (x = 0; x < num_qd; x++) 1198 qda[x]->qd_sync_gen = 1199 sdp->sd_quota_sync_gen; 1200 1201 for (x = 0; x < num_qd; x++) 1202 qd_unlock(qda[x]); 1203 } 1204 } while (!error && num_qd == max_qd); 1205 1206 mutex_unlock(&sdp->sd_quota_sync_mutex); 1207 kfree(qda); 1208 1209 return error; 1210 } 1211 1212 int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid) 1213 { 1214 struct gfs2_quota_data *qd; 1215 struct gfs2_holder q_gh; 1216 int error; 1217 1218 error = qd_get(sdp, qid, &qd); 1219 if (error) 1220 return error; 1221 1222 error = do_glock(qd, FORCE, &q_gh); 1223 if (!error) 1224 gfs2_glock_dq_uninit(&q_gh); 1225 1226 qd_put(qd); 1227 return error; 1228 } 1229 1230 int gfs2_quota_init(struct gfs2_sbd *sdp) 1231 { 1232 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 1233 u64 size = i_size_read(sdp->sd_qc_inode); 1234 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift; 1235 unsigned int x, slot = 0; 1236 unsigned int found = 0; 1237 unsigned int hash; 1238 unsigned int bm_size; 1239 u64 dblock; 1240 u32 extlen = 0; 1241 int error; 1242 1243 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20)) 1244 return -EIO; 1245 1246 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; 1247 bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long)); 1248 bm_size *= sizeof(unsigned long); 1249 error = -ENOMEM; 1250 sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN); 1251 if (sdp->sd_quota_bitmap == NULL) 1252 sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS | 1253 __GFP_ZERO, PAGE_KERNEL); 1254 if (!sdp->sd_quota_bitmap) 1255 return error; 1256 1257 for (x = 0; x < blocks; x++) { 1258 struct buffer_head *bh; 1259 const struct gfs2_quota_change *qc; 1260 unsigned int y; 1261 1262 if (!extlen) { 1263 int new = 0; 1264 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen); 1265 if (error) 1266 goto fail; 1267 } 1268 error = -EIO; 1269 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); 1270 if (!bh) 1271 goto fail; 1272 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) { 1273 brelse(bh); 1274 goto fail; 1275 } 1276 1277 qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header)); 1278 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; 1279 y++, slot++) { 1280 struct gfs2_quota_data *qd; 1281 s64 qc_change = be64_to_cpu(qc->qc_change); 1282 u32 qc_flags = be32_to_cpu(qc->qc_flags); 1283 enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ? 1284 USRQUOTA : GRPQUOTA; 1285 struct kqid qc_id = make_kqid(&init_user_ns, qtype, 1286 be32_to_cpu(qc->qc_id)); 1287 qc++; 1288 if (!qc_change) 1289 continue; 1290 1291 hash = gfs2_qd_hash(sdp, qc_id); 1292 qd = qd_alloc(hash, sdp, qc_id); 1293 if (qd == NULL) { 1294 brelse(bh); 1295 goto fail; 1296 } 1297 1298 set_bit(QDF_CHANGE, &qd->qd_flags); 1299 qd->qd_change = qc_change; 1300 qd->qd_slot = slot; 1301 qd->qd_slot_count = 1; 1302 1303 spin_lock(&qd_lock); 1304 BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap)); 1305 list_add(&qd->qd_list, &sdp->sd_quota_list); 1306 atomic_inc(&sdp->sd_quota_count); 1307 spin_unlock(&qd_lock); 1308 1309 spin_lock_bucket(hash); 1310 hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]); 1311 spin_unlock_bucket(hash); 1312 1313 found++; 1314 } 1315 1316 brelse(bh); 1317 dblock++; 1318 extlen--; 1319 } 1320 1321 if (found) 1322 fs_info(sdp, "found %u quota changes\n", found); 1323 1324 return 0; 1325 1326 fail: 1327 gfs2_quota_cleanup(sdp); 1328 return error; 1329 } 1330 1331 void gfs2_quota_cleanup(struct gfs2_sbd *sdp) 1332 { 1333 struct list_head *head = &sdp->sd_quota_list; 1334 struct gfs2_quota_data *qd; 1335 1336 spin_lock(&qd_lock); 1337 while (!list_empty(head)) { 1338 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); 1339 1340 list_del(&qd->qd_list); 1341 1342 /* Also remove if this qd exists in the reclaim list */ 1343 list_lru_del(&gfs2_qd_lru, &qd->qd_lru); 1344 atomic_dec(&sdp->sd_quota_count); 1345 spin_unlock(&qd_lock); 1346 1347 spin_lock_bucket(qd->qd_hash); 1348 hlist_bl_del_rcu(&qd->qd_hlist); 1349 spin_unlock_bucket(qd->qd_hash); 1350 1351 gfs2_assert_warn(sdp, !qd->qd_change); 1352 gfs2_assert_warn(sdp, !qd->qd_slot_count); 1353 gfs2_assert_warn(sdp, !qd->qd_bh_count); 1354 1355 gfs2_glock_put(qd->qd_gl); 1356 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); 1357 1358 spin_lock(&qd_lock); 1359 } 1360 spin_unlock(&qd_lock); 1361 1362 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); 1363 1364 kvfree(sdp->sd_quota_bitmap); 1365 sdp->sd_quota_bitmap = NULL; 1366 } 1367 1368 static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error) 1369 { 1370 if (error == 0 || error == -EROFS) 1371 return; 1372 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) 1373 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error); 1374 } 1375 1376 static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, 1377 int (*fxn)(struct super_block *sb, int type), 1378 unsigned long t, unsigned long *timeo, 1379 unsigned int *new_timeo) 1380 { 1381 if (t >= *timeo) { 1382 int error = fxn(sdp->sd_vfs, 0); 1383 quotad_error(sdp, msg, error); 1384 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; 1385 } else { 1386 *timeo -= t; 1387 } 1388 } 1389 1390 static void quotad_check_trunc_list(struct gfs2_sbd *sdp) 1391 { 1392 struct gfs2_inode *ip; 1393 1394 while(1) { 1395 ip = NULL; 1396 spin_lock(&sdp->sd_trunc_lock); 1397 if (!list_empty(&sdp->sd_trunc_list)) { 1398 ip = list_entry(sdp->sd_trunc_list.next, 1399 struct gfs2_inode, i_trunc_list); 1400 list_del_init(&ip->i_trunc_list); 1401 } 1402 spin_unlock(&sdp->sd_trunc_lock); 1403 if (ip == NULL) 1404 return; 1405 gfs2_glock_finish_truncate(ip); 1406 } 1407 } 1408 1409 void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) { 1410 if (!sdp->sd_statfs_force_sync) { 1411 sdp->sd_statfs_force_sync = 1; 1412 wake_up(&sdp->sd_quota_wait); 1413 } 1414 } 1415 1416 1417 /** 1418 * gfs2_quotad - Write cached quota changes into the quota file 1419 * @sdp: Pointer to GFS2 superblock 1420 * 1421 */ 1422 1423 int gfs2_quotad(void *data) 1424 { 1425 struct gfs2_sbd *sdp = data; 1426 struct gfs2_tune *tune = &sdp->sd_tune; 1427 unsigned long statfs_timeo = 0; 1428 unsigned long quotad_timeo = 0; 1429 unsigned long t = 0; 1430 DEFINE_WAIT(wait); 1431 int empty; 1432 1433 while (!kthread_should_stop()) { 1434 1435 /* Update the master statfs file */ 1436 if (sdp->sd_statfs_force_sync) { 1437 int error = gfs2_statfs_sync(sdp->sd_vfs, 0); 1438 quotad_error(sdp, "statfs", error); 1439 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ; 1440 } 1441 else 1442 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, 1443 &statfs_timeo, 1444 &tune->gt_statfs_quantum); 1445 1446 /* Update quota file */ 1447 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, 1448 "ad_timeo, &tune->gt_quota_quantum); 1449 1450 /* Check for & recover partially truncated inodes */ 1451 quotad_check_trunc_list(sdp); 1452 1453 try_to_freeze(); 1454 1455 t = min(quotad_timeo, statfs_timeo); 1456 1457 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); 1458 spin_lock(&sdp->sd_trunc_lock); 1459 empty = list_empty(&sdp->sd_trunc_list); 1460 spin_unlock(&sdp->sd_trunc_lock); 1461 if (empty && !sdp->sd_statfs_force_sync) 1462 t -= schedule_timeout(t); 1463 else 1464 t = 0; 1465 finish_wait(&sdp->sd_quota_wait, &wait); 1466 } 1467 1468 return 0; 1469 } 1470 1471 static int gfs2_quota_get_xstate(struct super_block *sb, 1472 struct fs_quota_stat *fqs) 1473 { 1474 struct gfs2_sbd *sdp = sb->s_fs_info; 1475 1476 memset(fqs, 0, sizeof(struct fs_quota_stat)); 1477 fqs->qs_version = FS_QSTAT_VERSION; 1478 1479 switch (sdp->sd_args.ar_quota) { 1480 case GFS2_QUOTA_ON: 1481 fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD); 1482 /*FALLTHRU*/ 1483 case GFS2_QUOTA_ACCOUNT: 1484 fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT); 1485 break; 1486 case GFS2_QUOTA_OFF: 1487 break; 1488 } 1489 1490 if (sdp->sd_quota_inode) { 1491 fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr; 1492 fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks; 1493 } 1494 fqs->qs_uquota.qfs_nextents = 1; /* unsupported */ 1495 fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */ 1496 fqs->qs_incoredqs = list_lru_count(&gfs2_qd_lru); 1497 return 0; 1498 } 1499 1500 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, 1501 struct qc_dqblk *fdq) 1502 { 1503 struct gfs2_sbd *sdp = sb->s_fs_info; 1504 struct gfs2_quota_lvb *qlvb; 1505 struct gfs2_quota_data *qd; 1506 struct gfs2_holder q_gh; 1507 int error; 1508 1509 memset(fdq, 0, sizeof(*fdq)); 1510 1511 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1512 return -ESRCH; /* Crazy XFS error code */ 1513 1514 if ((qid.type != USRQUOTA) && 1515 (qid.type != GRPQUOTA)) 1516 return -EINVAL; 1517 1518 error = qd_get(sdp, qid, &qd); 1519 if (error) 1520 return error; 1521 error = do_glock(qd, FORCE, &q_gh); 1522 if (error) 1523 goto out; 1524 1525 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 1526 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; 1527 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; 1528 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; 1529 1530 gfs2_glock_dq_uninit(&q_gh); 1531 out: 1532 qd_put(qd); 1533 return error; 1534 } 1535 1536 /* GFS2 only supports a subset of the XFS fields */ 1537 #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE) 1538 1539 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, 1540 struct qc_dqblk *fdq) 1541 { 1542 struct gfs2_sbd *sdp = sb->s_fs_info; 1543 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 1544 struct gfs2_quota_data *qd; 1545 struct gfs2_holder q_gh, i_gh; 1546 unsigned int data_blocks, ind_blocks; 1547 unsigned int blocks = 0; 1548 int alloc_required; 1549 loff_t offset; 1550 int error; 1551 1552 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1553 return -ESRCH; /* Crazy XFS error code */ 1554 1555 if ((qid.type != USRQUOTA) && 1556 (qid.type != GRPQUOTA)) 1557 return -EINVAL; 1558 1559 if (fdq->d_fieldmask & ~GFS2_FIELDMASK) 1560 return -EINVAL; 1561 1562 error = qd_get(sdp, qid, &qd); 1563 if (error) 1564 return error; 1565 1566 error = gfs2_rs_alloc(ip); 1567 if (error) 1568 goto out_put; 1569 1570 mutex_lock(&ip->i_inode.i_mutex); 1571 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh); 1572 if (error) 1573 goto out_unlockput; 1574 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 1575 if (error) 1576 goto out_q; 1577 1578 /* Check for existing entry, if none then alloc new blocks */ 1579 error = update_qd(sdp, qd); 1580 if (error) 1581 goto out_i; 1582 1583 /* If nothing has changed, this is a no-op */ 1584 if ((fdq->d_fieldmask & QC_SPC_SOFT) && 1585 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) 1586 fdq->d_fieldmask ^= QC_SPC_SOFT; 1587 1588 if ((fdq->d_fieldmask & QC_SPC_HARD) && 1589 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) 1590 fdq->d_fieldmask ^= QC_SPC_HARD; 1591 1592 if ((fdq->d_fieldmask & QC_SPACE) && 1593 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) 1594 fdq->d_fieldmask ^= QC_SPACE; 1595 1596 if (fdq->d_fieldmask == 0) 1597 goto out_i; 1598 1599 offset = qd2offset(qd); 1600 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); 1601 if (gfs2_is_stuffed(ip)) 1602 alloc_required = 1; 1603 if (alloc_required) { 1604 struct gfs2_alloc_parms ap = { .aflags = 0, }; 1605 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 1606 &data_blocks, &ind_blocks); 1607 blocks = 1 + data_blocks + ind_blocks; 1608 ap.target = blocks; 1609 error = gfs2_inplace_reserve(ip, &ap); 1610 if (error) 1611 goto out_i; 1612 blocks += gfs2_rg_blocks(ip, blocks); 1613 } 1614 1615 /* Some quotas span block boundaries and can update two blocks, 1616 adding an extra block to the transaction to handle such quotas */ 1617 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0); 1618 if (error) 1619 goto out_release; 1620 1621 /* Apply changes */ 1622 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); 1623 1624 gfs2_trans_end(sdp); 1625 out_release: 1626 if (alloc_required) 1627 gfs2_inplace_release(ip); 1628 out_i: 1629 gfs2_glock_dq_uninit(&i_gh); 1630 out_q: 1631 gfs2_glock_dq_uninit(&q_gh); 1632 out_unlockput: 1633 mutex_unlock(&ip->i_inode.i_mutex); 1634 out_put: 1635 qd_put(qd); 1636 return error; 1637 } 1638 1639 const struct quotactl_ops gfs2_quotactl_ops = { 1640 .quota_sync = gfs2_quota_sync, 1641 .get_xstate = gfs2_quota_get_xstate, 1642 .get_dqblk = gfs2_get_dqblk, 1643 .set_dqblk = gfs2_set_dqblk, 1644 }; 1645 1646 void __init gfs2_quota_hash_init(void) 1647 { 1648 unsigned i; 1649 1650 for(i = 0; i < GFS2_QD_HASH_SIZE; i++) 1651 INIT_HLIST_BL_HEAD(&qd_hash_table[i]); 1652 } 1653